Subversion Repositories Kolibri OS

Compare Revisions

No changes between revisions

Regard whitespace Rev 4348 → Rev 4349

/contrib/sdk/sources/ffmpeg/libavdevice/Makefile
0,0 → 1,61
include $(SUBDIR)../config.mak
 
NAME = avdevice
FFLIBS = avformat avcodec avutil
FFLIBS-$(CONFIG_LAVFI_INDEV) += avfilter
 
HEADERS = avdevice.h \
version.h \
 
OBJS = alldevices.o \
avdevice.o \
 
OBJS-$(HAVE_MSVCRT) += file_open.o
 
# input/output devices
OBJS-$(CONFIG_ALSA_INDEV) += alsa-audio-common.o \
alsa-audio-dec.o timefilter.o
OBJS-$(CONFIG_ALSA_OUTDEV) += alsa-audio-common.o \
alsa-audio-enc.o
OBJS-$(CONFIG_BKTR_INDEV) += bktr.o
OBJS-$(CONFIG_CACA_OUTDEV) += caca.o
OBJS-$(CONFIG_DSHOW_INDEV) += dshow.o dshow_enummediatypes.o \
dshow_enumpins.o dshow_filter.o \
dshow_pin.o dshow_common.o
OBJS-$(CONFIG_DV1394_INDEV) += dv1394.o
OBJS-$(CONFIG_FBDEV_INDEV) += fbdev_dec.o \
fbdev_common.o
OBJS-$(CONFIG_FBDEV_OUTDEV) += fbdev_enc.o \
fbdev_common.o
OBJS-$(CONFIG_IEC61883_INDEV) += iec61883.o
OBJS-$(CONFIG_JACK_INDEV) += jack_audio.o timefilter.o
OBJS-$(CONFIG_LAVFI_INDEV) += lavfi.o
OBJS-$(CONFIG_OPENAL_INDEV) += openal-dec.o
OBJS-$(CONFIG_OSS_INDEV) += oss_audio.o
OBJS-$(CONFIG_OSS_OUTDEV) += oss_audio.o
OBJS-$(CONFIG_PULSE_INDEV) += pulse_audio_dec.o \
pulse_audio_common.o
OBJS-$(CONFIG_PULSE_OUTDEV) += pulse_audio_enc.o \
pulse_audio_common.o
OBJS-$(CONFIG_SDL_OUTDEV) += sdl.o
OBJS-$(CONFIG_SNDIO_INDEV) += sndio_common.o sndio_dec.o
OBJS-$(CONFIG_SNDIO_OUTDEV) += sndio_common.o sndio_enc.o
OBJS-$(CONFIG_V4L2_INDEV) += v4l2.o v4l2-common.o timefilter.o
OBJS-$(CONFIG_V4L2_OUTDEV) += v4l2enc.o v4l2-common.o
OBJS-$(CONFIG_V4L_INDEV) += v4l.o
OBJS-$(CONFIG_VFWCAP_INDEV) += vfwcap.o
OBJS-$(CONFIG_X11GRAB_INDEV) += x11grab.o
OBJS-$(CONFIG_XV_OUTDEV) += xv.o
 
# external libraries
OBJS-$(CONFIG_LIBCDIO_INDEV) += libcdio.o
OBJS-$(CONFIG_LIBDC1394_INDEV) += libdc1394.o
 
SKIPHEADERS-$(CONFIG_DSHOW_INDEV) += dshow_capture.h
SKIPHEADERS-$(CONFIG_LIBPULSE) += pulse_audio_common.h
SKIPHEADERS-$(CONFIG_V4L2_INDEV) += v4l2-common.h
SKIPHEADERS-$(CONFIG_V4L2_OUTDEV) += v4l2-common.h
SKIPHEADERS-$(HAVE_ALSA_ASOUNDLIB_H) += alsa-audio.h
SKIPHEADERS-$(HAVE_SNDIO_H) += sndio_common.h
 
TESTPROGS = timefilter
/contrib/sdk/sources/ffmpeg/libavdevice/alldevices.c
0,0 → 1,72
/*
* Register all the grabbing devices.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "config.h"
#include "avdevice.h"
 
#define REGISTER_OUTDEV(X, x) \
{ \
extern AVOutputFormat ff_##x##_muxer; \
if (CONFIG_##X##_OUTDEV) \
av_register_output_format(&ff_##x##_muxer); \
}
 
#define REGISTER_INDEV(X, x) \
{ \
extern AVInputFormat ff_##x##_demuxer; \
if (CONFIG_##X##_INDEV) \
av_register_input_format(&ff_##x##_demuxer); \
}
 
#define REGISTER_INOUTDEV(X, x) REGISTER_OUTDEV(X, x); REGISTER_INDEV(X, x)
 
void avdevice_register_all(void)
{
static int initialized;
 
if (initialized)
return;
initialized = 1;
 
/* devices */
REGISTER_INOUTDEV(ALSA, alsa);
REGISTER_INDEV (BKTR, bktr);
REGISTER_OUTDEV (CACA, caca);
REGISTER_INDEV (DSHOW, dshow);
REGISTER_INDEV (DV1394, dv1394);
REGISTER_INOUTDEV(FBDEV, fbdev);
REGISTER_INDEV (IEC61883, iec61883);
REGISTER_INDEV (JACK, jack);
REGISTER_INDEV (LAVFI, lavfi);
REGISTER_INDEV (OPENAL, openal);
REGISTER_INOUTDEV(OSS, oss);
REGISTER_INOUTDEV(PULSE, pulse);
REGISTER_OUTDEV (SDL, sdl);
REGISTER_INOUTDEV(SNDIO, sndio);
REGISTER_INOUTDEV(V4L2, v4l2);
// REGISTER_INDEV (V4L, v4l
REGISTER_INDEV (VFWCAP, vfwcap);
REGISTER_INDEV (X11GRAB, x11grab);
REGISTER_OUTDEV (XV, xv);
 
/* external libraries */
REGISTER_INDEV (LIBCDIO, libcdio);
REGISTER_INDEV (LIBDC1394, libdc1394);
}
/contrib/sdk/sources/ffmpeg/libavdevice/alsa-audio-common.c
0,0 → 1,345
/*
* ALSA input and output
* Copyright (c) 2007 Luca Abeni ( lucabe72 email it )
* Copyright (c) 2007 Benoit Fouet ( benoit fouet free fr )
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* ALSA input and output: common code
* @author Luca Abeni ( lucabe72 email it )
* @author Benoit Fouet ( benoit fouet free fr )
* @author Nicolas George ( nicolas george normalesup org )
*/
 
#include <alsa/asoundlib.h>
#include "avdevice.h"
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
 
#include "alsa-audio.h"
 
static av_cold snd_pcm_format_t codec_id_to_pcm_format(int codec_id)
{
switch(codec_id) {
case AV_CODEC_ID_PCM_F64LE: return SND_PCM_FORMAT_FLOAT64_LE;
case AV_CODEC_ID_PCM_F64BE: return SND_PCM_FORMAT_FLOAT64_BE;
case AV_CODEC_ID_PCM_F32LE: return SND_PCM_FORMAT_FLOAT_LE;
case AV_CODEC_ID_PCM_F32BE: return SND_PCM_FORMAT_FLOAT_BE;
case AV_CODEC_ID_PCM_S32LE: return SND_PCM_FORMAT_S32_LE;
case AV_CODEC_ID_PCM_S32BE: return SND_PCM_FORMAT_S32_BE;
case AV_CODEC_ID_PCM_U32LE: return SND_PCM_FORMAT_U32_LE;
case AV_CODEC_ID_PCM_U32BE: return SND_PCM_FORMAT_U32_BE;
case AV_CODEC_ID_PCM_S24LE: return SND_PCM_FORMAT_S24_3LE;
case AV_CODEC_ID_PCM_S24BE: return SND_PCM_FORMAT_S24_3BE;
case AV_CODEC_ID_PCM_U24LE: return SND_PCM_FORMAT_U24_3LE;
case AV_CODEC_ID_PCM_U24BE: return SND_PCM_FORMAT_U24_3BE;
case AV_CODEC_ID_PCM_S16LE: return SND_PCM_FORMAT_S16_LE;
case AV_CODEC_ID_PCM_S16BE: return SND_PCM_FORMAT_S16_BE;
case AV_CODEC_ID_PCM_U16LE: return SND_PCM_FORMAT_U16_LE;
case AV_CODEC_ID_PCM_U16BE: return SND_PCM_FORMAT_U16_BE;
case AV_CODEC_ID_PCM_S8: return SND_PCM_FORMAT_S8;
case AV_CODEC_ID_PCM_U8: return SND_PCM_FORMAT_U8;
case AV_CODEC_ID_PCM_MULAW: return SND_PCM_FORMAT_MU_LAW;
case AV_CODEC_ID_PCM_ALAW: return SND_PCM_FORMAT_A_LAW;
default: return SND_PCM_FORMAT_UNKNOWN;
}
}
 
#define MAKE_REORDER_FUNC(NAME, TYPE, CHANNELS, LAYOUT, MAP) \
static void alsa_reorder_ ## NAME ## _ ## LAYOUT(const void *in_v, \
void *out_v, \
int n) \
{ \
const TYPE *in = in_v; \
TYPE *out = out_v; \
\
while (n-- > 0) { \
MAP \
in += CHANNELS; \
out += CHANNELS; \
} \
}
 
#define MAKE_REORDER_FUNCS(CHANNELS, LAYOUT, MAP) \
MAKE_REORDER_FUNC(int8, int8_t, CHANNELS, LAYOUT, MAP) \
MAKE_REORDER_FUNC(int16, int16_t, CHANNELS, LAYOUT, MAP) \
MAKE_REORDER_FUNC(int32, int32_t, CHANNELS, LAYOUT, MAP) \
MAKE_REORDER_FUNC(f32, float, CHANNELS, LAYOUT, MAP)
 
MAKE_REORDER_FUNCS(5, out_50, \
out[0] = in[0]; \
out[1] = in[1]; \
out[2] = in[3]; \
out[3] = in[4]; \
out[4] = in[2]; \
);
 
MAKE_REORDER_FUNCS(6, out_51, \
out[0] = in[0]; \
out[1] = in[1]; \
out[2] = in[4]; \
out[3] = in[5]; \
out[4] = in[2]; \
out[5] = in[3]; \
);
 
MAKE_REORDER_FUNCS(8, out_71, \
out[0] = in[0]; \
out[1] = in[1]; \
out[2] = in[4]; \
out[3] = in[5]; \
out[4] = in[2]; \
out[5] = in[3]; \
out[6] = in[6]; \
out[7] = in[7]; \
);
 
#define FORMAT_I8 0
#define FORMAT_I16 1
#define FORMAT_I32 2
#define FORMAT_F32 3
 
#define PICK_REORDER(layout)\
switch(format) {\
case FORMAT_I8: s->reorder_func = alsa_reorder_int8_out_ ##layout; break;\
case FORMAT_I16: s->reorder_func = alsa_reorder_int16_out_ ##layout; break;\
case FORMAT_I32: s->reorder_func = alsa_reorder_int32_out_ ##layout; break;\
case FORMAT_F32: s->reorder_func = alsa_reorder_f32_out_ ##layout; break;\
}
 
static av_cold int find_reorder_func(AlsaData *s, int codec_id, uint64_t layout, int out)
{
int format;
 
/* reordering input is not currently supported */
if (!out)
return AVERROR(ENOSYS);
 
/* reordering is not needed for QUAD or 2_2 layout */
if (layout == AV_CH_LAYOUT_QUAD || layout == AV_CH_LAYOUT_2_2)
return 0;
 
switch (codec_id) {
case AV_CODEC_ID_PCM_S8:
case AV_CODEC_ID_PCM_U8:
case AV_CODEC_ID_PCM_ALAW:
case AV_CODEC_ID_PCM_MULAW: format = FORMAT_I8; break;
case AV_CODEC_ID_PCM_S16LE:
case AV_CODEC_ID_PCM_S16BE:
case AV_CODEC_ID_PCM_U16LE:
case AV_CODEC_ID_PCM_U16BE: format = FORMAT_I16; break;
case AV_CODEC_ID_PCM_S32LE:
case AV_CODEC_ID_PCM_S32BE:
case AV_CODEC_ID_PCM_U32LE:
case AV_CODEC_ID_PCM_U32BE: format = FORMAT_I32; break;
case AV_CODEC_ID_PCM_F32LE:
case AV_CODEC_ID_PCM_F32BE: format = FORMAT_F32; break;
default: return AVERROR(ENOSYS);
}
 
if (layout == AV_CH_LAYOUT_5POINT0_BACK || layout == AV_CH_LAYOUT_5POINT0)
PICK_REORDER(50)
else if (layout == AV_CH_LAYOUT_5POINT1_BACK || layout == AV_CH_LAYOUT_5POINT1)
PICK_REORDER(51)
else if (layout == AV_CH_LAYOUT_7POINT1)
PICK_REORDER(71)
 
return s->reorder_func ? 0 : AVERROR(ENOSYS);
}
 
av_cold int ff_alsa_open(AVFormatContext *ctx, snd_pcm_stream_t mode,
unsigned int *sample_rate,
int channels, enum AVCodecID *codec_id)
{
AlsaData *s = ctx->priv_data;
const char *audio_device;
int res, flags = 0;
snd_pcm_format_t format;
snd_pcm_t *h;
snd_pcm_hw_params_t *hw_params;
snd_pcm_uframes_t buffer_size, period_size;
uint64_t layout = ctx->streams[0]->codec->channel_layout;
 
if (ctx->filename[0] == 0) audio_device = "default";
else audio_device = ctx->filename;
 
if (*codec_id == AV_CODEC_ID_NONE)
*codec_id = DEFAULT_CODEC_ID;
format = codec_id_to_pcm_format(*codec_id);
if (format == SND_PCM_FORMAT_UNKNOWN) {
av_log(ctx, AV_LOG_ERROR, "sample format 0x%04x is not supported\n", *codec_id);
return AVERROR(ENOSYS);
}
s->frame_size = av_get_bits_per_sample(*codec_id) / 8 * channels;
 
if (ctx->flags & AVFMT_FLAG_NONBLOCK) {
flags = SND_PCM_NONBLOCK;
}
res = snd_pcm_open(&h, audio_device, mode, flags);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "cannot open audio device %s (%s)\n",
audio_device, snd_strerror(res));
return AVERROR(EIO);
}
 
res = snd_pcm_hw_params_malloc(&hw_params);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "cannot allocate hardware parameter structure (%s)\n",
snd_strerror(res));
goto fail1;
}
 
res = snd_pcm_hw_params_any(h, hw_params);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "cannot initialize hardware parameter structure (%s)\n",
snd_strerror(res));
goto fail;
}
 
res = snd_pcm_hw_params_set_access(h, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "cannot set access type (%s)\n",
snd_strerror(res));
goto fail;
}
 
res = snd_pcm_hw_params_set_format(h, hw_params, format);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "cannot set sample format 0x%04x %d (%s)\n",
*codec_id, format, snd_strerror(res));
goto fail;
}
 
res = snd_pcm_hw_params_set_rate_near(h, hw_params, sample_rate, 0);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "cannot set sample rate (%s)\n",
snd_strerror(res));
goto fail;
}
 
res = snd_pcm_hw_params_set_channels(h, hw_params, channels);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "cannot set channel count to %d (%s)\n",
channels, snd_strerror(res));
goto fail;
}
 
snd_pcm_hw_params_get_buffer_size_max(hw_params, &buffer_size);
buffer_size = FFMIN(buffer_size, ALSA_BUFFER_SIZE_MAX);
/* TODO: maybe use ctx->max_picture_buffer somehow */
res = snd_pcm_hw_params_set_buffer_size_near(h, hw_params, &buffer_size);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "cannot set ALSA buffer size (%s)\n",
snd_strerror(res));
goto fail;
}
 
snd_pcm_hw_params_get_period_size_min(hw_params, &period_size, NULL);
if (!period_size)
period_size = buffer_size / 4;
res = snd_pcm_hw_params_set_period_size_near(h, hw_params, &period_size, NULL);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "cannot set ALSA period size (%s)\n",
snd_strerror(res));
goto fail;
}
s->period_size = period_size;
 
res = snd_pcm_hw_params(h, hw_params);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "cannot set parameters (%s)\n",
snd_strerror(res));
goto fail;
}
 
snd_pcm_hw_params_free(hw_params);
 
if (channels > 2 && layout) {
if (find_reorder_func(s, *codec_id, layout, mode == SND_PCM_STREAM_PLAYBACK) < 0) {
char name[128];
av_get_channel_layout_string(name, sizeof(name), channels, layout);
av_log(ctx, AV_LOG_WARNING, "ALSA channel layout unknown or unimplemented for %s %s.\n",
name, mode == SND_PCM_STREAM_PLAYBACK ? "playback" : "capture");
}
if (s->reorder_func) {
s->reorder_buf_size = buffer_size;
s->reorder_buf = av_malloc(s->reorder_buf_size * s->frame_size);
if (!s->reorder_buf)
goto fail1;
}
}
 
s->h = h;
return 0;
 
fail:
snd_pcm_hw_params_free(hw_params);
fail1:
snd_pcm_close(h);
return AVERROR(EIO);
}
 
av_cold int ff_alsa_close(AVFormatContext *s1)
{
AlsaData *s = s1->priv_data;
 
av_freep(&s->reorder_buf);
if (CONFIG_ALSA_INDEV)
ff_timefilter_destroy(s->timefilter);
snd_pcm_close(s->h);
return 0;
}
 
int ff_alsa_xrun_recover(AVFormatContext *s1, int err)
{
AlsaData *s = s1->priv_data;
snd_pcm_t *handle = s->h;
 
av_log(s1, AV_LOG_WARNING, "ALSA buffer xrun.\n");
if (err == -EPIPE) {
err = snd_pcm_prepare(handle);
if (err < 0) {
av_log(s1, AV_LOG_ERROR, "cannot recover from underrun (snd_pcm_prepare failed: %s)\n", snd_strerror(err));
 
return AVERROR(EIO);
}
} else if (err == -ESTRPIPE) {
av_log(s1, AV_LOG_ERROR, "-ESTRPIPE... Unsupported!\n");
 
return -1;
}
return err;
}
 
int ff_alsa_extend_reorder_buf(AlsaData *s, int min_size)
{
int size = s->reorder_buf_size;
void *r;
 
av_assert0(size != 0);
while (size < min_size)
size *= 2;
r = av_realloc(s->reorder_buf, size * s->frame_size);
if (!r)
return AVERROR(ENOMEM);
s->reorder_buf = r;
s->reorder_buf_size = size;
return 0;
}
/contrib/sdk/sources/ffmpeg/libavdevice/alsa-audio-dec.c
0,0 → 1,157
/*
* ALSA input and output
* Copyright (c) 2007 Luca Abeni ( lucabe72 email it )
* Copyright (c) 2007 Benoit Fouet ( benoit fouet free fr )
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* ALSA input and output: input
* @author Luca Abeni ( lucabe72 email it )
* @author Benoit Fouet ( benoit fouet free fr )
* @author Nicolas George ( nicolas george normalesup org )
*
* This avdevice decoder allows to capture audio from an ALSA (Advanced
* Linux Sound Architecture) device.
*
* The filename parameter is the name of an ALSA PCM device capable of
* capture, for example "default" or "plughw:1"; see the ALSA documentation
* for naming conventions. The empty string is equivalent to "default".
*
* The capture period is set to the lower value available for the device,
* which gives a low latency suitable for real-time capture.
*
* The PTS are an Unix time in microsecond.
*
* Due to a bug in the ALSA library
* (https://bugtrack.alsa-project.org/alsa-bug/view.php?id=4308), this
* decoder does not work with certain ALSA plugins, especially the dsnoop
* plugin.
*/
 
#include <alsa/asoundlib.h>
#include "libavformat/internal.h"
#include "libavutil/opt.h"
#include "libavutil/mathematics.h"
#include "libavutil/time.h"
 
#include "avdevice.h"
#include "alsa-audio.h"
 
static av_cold int audio_read_header(AVFormatContext *s1)
{
AlsaData *s = s1->priv_data;
AVStream *st;
int ret;
enum AVCodecID codec_id;
 
st = avformat_new_stream(s1, NULL);
if (!st) {
av_log(s1, AV_LOG_ERROR, "Cannot add stream\n");
 
return AVERROR(ENOMEM);
}
codec_id = s1->audio_codec_id;
 
ret = ff_alsa_open(s1, SND_PCM_STREAM_CAPTURE, &s->sample_rate, s->channels,
&codec_id);
if (ret < 0) {
return AVERROR(EIO);
}
 
/* take real parameters */
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = codec_id;
st->codec->sample_rate = s->sample_rate;
st->codec->channels = s->channels;
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
/* microseconds instead of seconds, MHz instead of Hz */
s->timefilter = ff_timefilter_new(1000000.0 / s->sample_rate,
s->period_size, 1.5E-6);
if (!s->timefilter)
goto fail;
 
return 0;
 
fail:
snd_pcm_close(s->h);
return AVERROR(EIO);
}
 
static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt)
{
AlsaData *s = s1->priv_data;
int res;
int64_t dts;
snd_pcm_sframes_t delay = 0;
 
if (av_new_packet(pkt, s->period_size * s->frame_size) < 0) {
return AVERROR(EIO);
}
 
while ((res = snd_pcm_readi(s->h, pkt->data, s->period_size)) < 0) {
if (res == -EAGAIN) {
av_free_packet(pkt);
 
return AVERROR(EAGAIN);
}
if (ff_alsa_xrun_recover(s1, res) < 0) {
av_log(s1, AV_LOG_ERROR, "ALSA read error: %s\n",
snd_strerror(res));
av_free_packet(pkt);
 
return AVERROR(EIO);
}
ff_timefilter_reset(s->timefilter);
}
 
dts = av_gettime();
snd_pcm_delay(s->h, &delay);
dts -= av_rescale(delay + res, 1000000, s->sample_rate);
pkt->pts = ff_timefilter_update(s->timefilter, dts, s->last_period);
s->last_period = res;
 
pkt->size = res * s->frame_size;
 
return 0;
}
 
static const AVOption options[] = {
{ "sample_rate", "", offsetof(AlsaData, sample_rate), AV_OPT_TYPE_INT, {.i64 = 48000}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ "channels", "", offsetof(AlsaData, channels), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
 
static const AVClass alsa_demuxer_class = {
.class_name = "ALSA demuxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVInputFormat ff_alsa_demuxer = {
.name = "alsa",
.long_name = NULL_IF_CONFIG_SMALL("ALSA audio input"),
.priv_data_size = sizeof(AlsaData),
.read_header = audio_read_header,
.read_packet = audio_read_packet,
.read_close = ff_alsa_close,
.flags = AVFMT_NOFILE,
.priv_class = &alsa_demuxer_class,
};
/contrib/sdk/sources/ffmpeg/libavdevice/alsa-audio-enc.c
0,0 → 1,129
/*
* ALSA input and output
* Copyright (c) 2007 Luca Abeni ( lucabe72 email it )
* Copyright (c) 2007 Benoit Fouet ( benoit fouet free fr )
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* ALSA input and output: output
* @author Luca Abeni ( lucabe72 email it )
* @author Benoit Fouet ( benoit fouet free fr )
*
* This avdevice encoder allows to play audio to an ALSA (Advanced Linux
* Sound Architecture) device.
*
* The filename parameter is the name of an ALSA PCM device capable of
* capture, for example "default" or "plughw:1"; see the ALSA documentation
* for naming conventions. The empty string is equivalent to "default".
*
* The playback period is set to the lower value available for the device,
* which gives a low latency suitable for real-time playback.
*/
 
#include <alsa/asoundlib.h>
 
#include "libavutil/time.h"
#include "libavformat/internal.h"
#include "avdevice.h"
#include "alsa-audio.h"
 
static av_cold int audio_write_header(AVFormatContext *s1)
{
AlsaData *s = s1->priv_data;
AVStream *st;
unsigned int sample_rate;
enum AVCodecID codec_id;
int res;
 
st = s1->streams[0];
sample_rate = st->codec->sample_rate;
codec_id = st->codec->codec_id;
res = ff_alsa_open(s1, SND_PCM_STREAM_PLAYBACK, &sample_rate,
st->codec->channels, &codec_id);
if (sample_rate != st->codec->sample_rate) {
av_log(s1, AV_LOG_ERROR,
"sample rate %d not available, nearest is %d\n",
st->codec->sample_rate, sample_rate);
goto fail;
}
avpriv_set_pts_info(st, 64, 1, sample_rate);
 
return res;
 
fail:
snd_pcm_close(s->h);
return AVERROR(EIO);
}
 
static int audio_write_packet(AVFormatContext *s1, AVPacket *pkt)
{
AlsaData *s = s1->priv_data;
int res;
int size = pkt->size;
uint8_t *buf = pkt->data;
 
size /= s->frame_size;
if (s->reorder_func) {
if (size > s->reorder_buf_size)
if (ff_alsa_extend_reorder_buf(s, size))
return AVERROR(ENOMEM);
s->reorder_func(buf, s->reorder_buf, size);
buf = s->reorder_buf;
}
while ((res = snd_pcm_writei(s->h, buf, size)) < 0) {
if (res == -EAGAIN) {
 
return AVERROR(EAGAIN);
}
 
if (ff_alsa_xrun_recover(s1, res) < 0) {
av_log(s1, AV_LOG_ERROR, "ALSA write error: %s\n",
snd_strerror(res));
 
return AVERROR(EIO);
}
}
 
return 0;
}
 
static void
audio_get_output_timestamp(AVFormatContext *s1, int stream,
int64_t *dts, int64_t *wall)
{
AlsaData *s = s1->priv_data;
snd_pcm_sframes_t delay = 0;
*wall = av_gettime();
snd_pcm_delay(s->h, &delay);
*dts = s1->streams[0]->cur_dts - delay;
}
 
AVOutputFormat ff_alsa_muxer = {
.name = "alsa",
.long_name = NULL_IF_CONFIG_SMALL("ALSA audio output"),
.priv_data_size = sizeof(AlsaData),
.audio_codec = DEFAULT_CODEC_ID,
.video_codec = AV_CODEC_ID_NONE,
.write_header = audio_write_header,
.write_packet = audio_write_packet,
.write_trailer = ff_alsa_close,
.get_output_timestamp = audio_get_output_timestamp,
.flags = AVFMT_NOFILE,
};
/contrib/sdk/sources/ffmpeg/libavdevice/alsa-audio.h
0,0 → 1,101
/*
* ALSA input and output
* Copyright (c) 2007 Luca Abeni ( lucabe72 email it )
* Copyright (c) 2007 Benoit Fouet ( benoit fouet free fr )
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* ALSA input and output: definitions and structures
* @author Luca Abeni ( lucabe72 email it )
* @author Benoit Fouet ( benoit fouet free fr )
*/
 
#ifndef AVDEVICE_ALSA_AUDIO_H
#define AVDEVICE_ALSA_AUDIO_H
 
#include <alsa/asoundlib.h>
#include "config.h"
#include "libavutil/log.h"
#include "timefilter.h"
#include "avdevice.h"
 
/* XXX: we make the assumption that the soundcard accepts this format */
/* XXX: find better solution with "preinit" method, needed also in
other formats */
#define DEFAULT_CODEC_ID AV_NE(AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE)
 
typedef void (*ff_reorder_func)(const void *, void *, int);
 
#define ALSA_BUFFER_SIZE_MAX 65536
 
typedef struct AlsaData {
AVClass *class;
snd_pcm_t *h;
int frame_size; ///< bytes per sample * channels
int period_size; ///< preferred size for reads and writes, in frames
int sample_rate; ///< sample rate set by user
int channels; ///< number of channels set by user
int last_period;
TimeFilter *timefilter;
void (*reorder_func)(const void *, void *, int);
void *reorder_buf;
int reorder_buf_size; ///< in frames
} AlsaData;
 
/**
* Open an ALSA PCM.
*
* @param s media file handle
* @param mode either SND_PCM_STREAM_CAPTURE or SND_PCM_STREAM_PLAYBACK
* @param sample_rate in: requested sample rate;
* out: actually selected sample rate
* @param channels number of channels
* @param codec_id in: requested AVCodecID or AV_CODEC_ID_NONE;
* out: actually selected AVCodecID, changed only if
* AV_CODEC_ID_NONE was requested
*
* @return 0 if OK, AVERROR_xxx on error
*/
int ff_alsa_open(AVFormatContext *s, snd_pcm_stream_t mode,
unsigned int *sample_rate,
int channels, enum AVCodecID *codec_id);
 
/**
* Close the ALSA PCM.
*
* @param s1 media file handle
*
* @return 0
*/
int ff_alsa_close(AVFormatContext *s1);
 
/**
* Try to recover from ALSA buffer underrun.
*
* @param s1 media file handle
* @param err error code reported by the previous ALSA call
*
* @return 0 if OK, AVERROR_xxx on error
*/
int ff_alsa_xrun_recover(AVFormatContext *s1, int err);
 
int ff_alsa_extend_reorder_buf(AlsaData *s, int size);
 
#endif /* AVDEVICE_ALSA_AUDIO_H */
/contrib/sdk/sources/ffmpeg/libavdevice/avdevice-55.def
0,0 → 1,6
EXPORTS
DllStartup
avdevice_configuration
avdevice_license
avdevice_register_all
avdevice_version
/contrib/sdk/sources/ffmpeg/libavdevice/avdevice-55.orig.def
0,0 → 1,6
EXPORTS
DllStartup @1
avdevice_configuration @2
avdevice_license @3
avdevice_register_all @4
avdevice_version @5
/contrib/sdk/sources/ffmpeg/libavdevice/avdevice.c
0,0 → 1,38
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/avassert.h"
#include "avdevice.h"
#include "config.h"
 
unsigned avdevice_version(void)
{
av_assert0(LIBAVDEVICE_VERSION_MICRO >= 100);
return LIBAVDEVICE_VERSION_INT;
}
 
const char * avdevice_configuration(void)
{
return FFMPEG_CONFIGURATION;
}
 
const char * avdevice_license(void)
{
#define LICENSE_PREFIX "libavdevice license: "
return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
}
/contrib/sdk/sources/ffmpeg/libavdevice/avdevice.h
0,0 → 1,69
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVDEVICE_AVDEVICE_H
#define AVDEVICE_AVDEVICE_H
 
#include "version.h"
 
/**
* @file
* @ingroup lavd
* Main libavdevice API header
*/
 
/**
* @defgroup lavd Special devices muxing/demuxing library
* @{
* Libavdevice is a complementary library to @ref libavf "libavformat". It
* provides various "special" platform-specific muxers and demuxers, e.g. for
* grabbing devices, audio capture and playback etc. As a consequence, the
* (de)muxers in libavdevice are of the AVFMT_NOFILE type (they use their own
* I/O functions). The filename passed to avformat_open_input() often does not
* refer to an actually existing file, but has some special device-specific
* meaning - e.g. for x11grab it is the display name.
*
* To use libavdevice, simply call avdevice_register_all() to register all
* compiled muxers and demuxers. They all use standard libavformat API.
* @}
*/
 
#include "libavformat/avformat.h"
 
/**
* Return the LIBAVDEVICE_VERSION_INT constant.
*/
unsigned avdevice_version(void);
 
/**
* Return the libavdevice build-time configuration.
*/
const char *avdevice_configuration(void);
 
/**
* Return the libavdevice license.
*/
const char *avdevice_license(void);
 
/**
* Initialize libavdevice and register all the input and output devices.
* @warning This function is not thread safe.
*/
void avdevice_register_all(void);
 
#endif /* AVDEVICE_AVDEVICE_H */
/contrib/sdk/sources/ffmpeg/libavdevice/avdevice.lib
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/contrib/sdk/sources/ffmpeg/libavdevice/bktr.c
0,0 → 1,348
/*
* *BSD video grab interface
* Copyright (c) 2002 Steve O'Hara-Smith
* based on
* Linux video grab interface
* Copyright (c) 2000,2001 Gerard Lantau
* and
* simple_grab.c Copyright (c) 1999 Roger Hardiman
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavformat/internal.h"
#include "libavutil/internal.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/time.h"
#if HAVE_DEV_BKTR_IOCTL_METEOR_H && HAVE_DEV_BKTR_IOCTL_BT848_H
# include <dev/bktr/ioctl_meteor.h>
# include <dev/bktr/ioctl_bt848.h>
#elif HAVE_MACHINE_IOCTL_METEOR_H && HAVE_MACHINE_IOCTL_BT848_H
# include <machine/ioctl_meteor.h>
# include <machine/ioctl_bt848.h>
#elif HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H && HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H
# include <dev/video/meteor/ioctl_meteor.h>
# include <dev/video/bktr/ioctl_bt848.h>
#elif HAVE_DEV_IC_BT8XX_H
# include <dev/ic/bt8xx.h>
#endif
#include <unistd.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/time.h>
#include <signal.h>
#include <stdint.h>
#include "avdevice.h"
 
typedef struct {
AVClass *class;
int video_fd;
int tuner_fd;
int width, height;
uint64_t per_frame;
int standard;
char *framerate; /**< Set by a private option. */
} VideoData;
 
 
#define PAL 1
#define PALBDGHI 1
#define NTSC 2
#define NTSCM 2
#define SECAM 3
#define PALN 4
#define PALM 5
#define NTSCJ 6
 
/* PAL is 768 x 576. NTSC is 640 x 480 */
#define PAL_HEIGHT 576
#define SECAM_HEIGHT 576
#define NTSC_HEIGHT 480
 
#ifndef VIDEO_FORMAT
#define VIDEO_FORMAT NTSC
#endif
 
static int bktr_dev[] = { METEOR_DEV0, METEOR_DEV1, METEOR_DEV2,
METEOR_DEV3, METEOR_DEV_SVIDEO };
 
uint8_t *video_buf;
size_t video_buf_size;
uint64_t last_frame_time;
volatile sig_atomic_t nsignals;
 
 
static void catchsignal(int signal)
{
nsignals++;
return;
}
 
static av_cold int bktr_init(const char *video_device, int width, int height,
int format, int *video_fd, int *tuner_fd, int idev, double frequency)
{
struct meteor_geomet geo;
int h_max;
long ioctl_frequency;
char *arg;
int c;
struct sigaction act = { {0} }, old;
 
if (idev < 0 || idev > 4)
{
arg = getenv ("BKTR_DEV");
if (arg)
idev = atoi (arg);
if (idev < 0 || idev > 4)
idev = 1;
}
 
if (format < 1 || format > 6)
{
arg = getenv ("BKTR_FORMAT");
if (arg)
format = atoi (arg);
if (format < 1 || format > 6)
format = VIDEO_FORMAT;
}
 
if (frequency <= 0)
{
arg = getenv ("BKTR_FREQUENCY");
if (arg)
frequency = atof (arg);
if (frequency <= 0)
frequency = 0.0;
}
 
sigemptyset(&act.sa_mask);
act.sa_handler = catchsignal;
sigaction(SIGUSR1, &act, &old);
 
*tuner_fd = avpriv_open("/dev/tuner0", O_RDONLY);
if (*tuner_fd < 0)
av_log(NULL, AV_LOG_ERROR, "Warning. Tuner not opened, continuing: %s\n", strerror(errno));
 
*video_fd = avpriv_open(video_device, O_RDONLY);
if (*video_fd < 0) {
av_log(NULL, AV_LOG_ERROR, "%s: %s\n", video_device, strerror(errno));
return -1;
}
 
geo.rows = height;
geo.columns = width;
geo.frames = 1;
geo.oformat = METEOR_GEO_YUV_422 | METEOR_GEO_YUV_12;
 
switch (format) {
case PAL: h_max = PAL_HEIGHT; c = BT848_IFORM_F_PALBDGHI; break;
case PALN: h_max = PAL_HEIGHT; c = BT848_IFORM_F_PALN; break;
case PALM: h_max = PAL_HEIGHT; c = BT848_IFORM_F_PALM; break;
case SECAM: h_max = SECAM_HEIGHT; c = BT848_IFORM_F_SECAM; break;
case NTSC: h_max = NTSC_HEIGHT; c = BT848_IFORM_F_NTSCM; break;
case NTSCJ: h_max = NTSC_HEIGHT; c = BT848_IFORM_F_NTSCJ; break;
default: h_max = PAL_HEIGHT; c = BT848_IFORM_F_PALBDGHI; break;
}
 
if (height <= h_max / 2)
geo.oformat |= METEOR_GEO_EVEN_ONLY;
 
if (ioctl(*video_fd, METEORSETGEO, &geo) < 0) {
av_log(NULL, AV_LOG_ERROR, "METEORSETGEO: %s\n", strerror(errno));
return -1;
}
 
if (ioctl(*video_fd, BT848SFMT, &c) < 0) {
av_log(NULL, AV_LOG_ERROR, "BT848SFMT: %s\n", strerror(errno));
return -1;
}
 
c = bktr_dev[idev];
if (ioctl(*video_fd, METEORSINPUT, &c) < 0) {
av_log(NULL, AV_LOG_ERROR, "METEORSINPUT: %s\n", strerror(errno));
return -1;
}
 
video_buf_size = width * height * 12 / 8;
 
video_buf = (uint8_t *)mmap((caddr_t)0, video_buf_size,
PROT_READ, MAP_SHARED, *video_fd, (off_t)0);
if (video_buf == MAP_FAILED) {
av_log(NULL, AV_LOG_ERROR, "mmap: %s\n", strerror(errno));
return -1;
}
 
if (frequency != 0.0) {
ioctl_frequency = (unsigned long)(frequency*16);
if (ioctl(*tuner_fd, TVTUNER_SETFREQ, &ioctl_frequency) < 0)
av_log(NULL, AV_LOG_ERROR, "TVTUNER_SETFREQ: %s\n", strerror(errno));
}
 
c = AUDIO_UNMUTE;
if (ioctl(*tuner_fd, BT848_SAUDIO, &c) < 0)
av_log(NULL, AV_LOG_ERROR, "TVTUNER_SAUDIO: %s\n", strerror(errno));
 
c = METEOR_CAP_CONTINOUS;
ioctl(*video_fd, METEORCAPTUR, &c);
 
c = SIGUSR1;
ioctl(*video_fd, METEORSSIGNAL, &c);
 
return 0;
}
 
static void bktr_getframe(uint64_t per_frame)
{
uint64_t curtime;
 
curtime = av_gettime();
if (!last_frame_time
|| ((last_frame_time + per_frame) > curtime)) {
if (!usleep(last_frame_time + per_frame + per_frame / 8 - curtime)) {
if (!nsignals)
av_log(NULL, AV_LOG_INFO,
"SLEPT NO signals - %d microseconds late\n",
(int)(av_gettime() - last_frame_time - per_frame));
}
}
nsignals = 0;
last_frame_time = curtime;
}
 
 
/* note: we support only one picture read at a time */
static int grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
{
VideoData *s = s1->priv_data;
 
if (av_new_packet(pkt, video_buf_size) < 0)
return AVERROR(EIO);
 
bktr_getframe(s->per_frame);
 
pkt->pts = av_gettime();
memcpy(pkt->data, video_buf, video_buf_size);
 
return video_buf_size;
}
 
static int grab_read_header(AVFormatContext *s1)
{
VideoData *s = s1->priv_data;
AVStream *st;
AVRational framerate;
int ret = 0;
 
if (!s->framerate)
switch (s->standard) {
case PAL: s->framerate = av_strdup("pal"); break;
case NTSC: s->framerate = av_strdup("ntsc"); break;
case SECAM: s->framerate = av_strdup("25"); break;
default:
av_log(s1, AV_LOG_ERROR, "Unknown standard.\n");
ret = AVERROR(EINVAL);
goto out;
}
if ((ret = av_parse_video_rate(&framerate, s->framerate)) < 0) {
av_log(s1, AV_LOG_ERROR, "Could not parse framerate '%s'.\n", s->framerate);
goto out;
}
 
st = avformat_new_stream(s1, NULL);
if (!st) {
ret = AVERROR(ENOMEM);
goto out;
}
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in use */
 
s->per_frame = ((uint64_t)1000000 * framerate.den) / framerate.num;
 
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->pix_fmt = AV_PIX_FMT_YUV420P;
st->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
st->codec->width = s->width;
st->codec->height = s->height;
st->codec->time_base.den = framerate.num;
st->codec->time_base.num = framerate.den;
 
 
if (bktr_init(s1->filename, s->width, s->height, s->standard,
&s->video_fd, &s->tuner_fd, -1, 0.0) < 0) {
ret = AVERROR(EIO);
goto out;
}
 
nsignals = 0;
last_frame_time = 0;
 
out:
return ret;
}
 
static int grab_read_close(AVFormatContext *s1)
{
VideoData *s = s1->priv_data;
int c;
 
c = METEOR_CAP_STOP_CONT;
ioctl(s->video_fd, METEORCAPTUR, &c);
close(s->video_fd);
 
c = AUDIO_MUTE;
ioctl(s->tuner_fd, BT848_SAUDIO, &c);
close(s->tuner_fd);
 
munmap((caddr_t)video_buf, video_buf_size);
 
return 0;
}
 
#define OFFSET(x) offsetof(VideoData, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "standard", "", offsetof(VideoData, standard), AV_OPT_TYPE_INT, {.i64 = VIDEO_FORMAT}, PAL, NTSCJ, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "PAL", "", 0, AV_OPT_TYPE_CONST, {.i64 = PAL}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "NTSC", "", 0, AV_OPT_TYPE_CONST, {.i64 = NTSC}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "SECAM", "", 0, AV_OPT_TYPE_CONST, {.i64 = SECAM}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "PALN", "", 0, AV_OPT_TYPE_CONST, {.i64 = PALN}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "PALM", "", 0, AV_OPT_TYPE_CONST, {.i64 = PALM}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "NTSCJ", "", 0, AV_OPT_TYPE_CONST, {.i64 = NTSCJ}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, {.str = "vga"}, 0, 0, DEC },
{ "framerate", "", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
{ NULL },
};
 
static const AVClass bktr_class = {
.class_name = "BKTR grab interface",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVInputFormat ff_bktr_demuxer = {
.name = "bktr",
.long_name = NULL_IF_CONFIG_SMALL("video grab"),
.priv_data_size = sizeof(VideoData),
.read_header = grab_read_header,
.read_packet = grab_read_packet,
.read_close = grab_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &bktr_class,
};
/contrib/sdk/sources/ffmpeg/libavdevice/caca.c
0,0 → 1,240
/*
* Copyright (c) 2012 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <caca.h>
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avdevice.h"
 
typedef struct CACAContext {
AVClass *class;
AVFormatContext *ctx;
char *window_title;
int window_width, window_height;
 
caca_canvas_t *canvas;
caca_display_t *display;
caca_dither_t *dither;
 
char *algorithm, *antialias;
char *charset, *color;
char *driver;
 
char *list_dither;
int list_drivers;
} CACAContext;
 
static int caca_write_trailer(AVFormatContext *s)
{
CACAContext *c = s->priv_data;
 
av_freep(&c->window_title);
 
if (c->display) {
caca_free_display(c->display);
c->display = NULL;
}
if (c->dither) {
caca_free_dither(c->dither);
c->dither = NULL;
}
if (c->canvas) {
caca_free_canvas(c->canvas);
c->canvas = NULL;
}
return 0;
}
 
static void list_drivers(CACAContext *c)
{
const char *const *drivers = caca_get_display_driver_list();
int i;
 
av_log(c->ctx, AV_LOG_INFO, "Available drivers:\n");
for (i = 0; drivers[i]; i += 2)
av_log(c->ctx, AV_LOG_INFO, "%s : %s\n", drivers[i], drivers[i + 1]);
}
 
#define DEFINE_LIST_DITHER(thing, thing_str) \
static void list_dither_## thing(CACAContext *c) \
{ \
const char *const *thing = caca_get_dither_## thing ##_list(c->dither); \
int i; \
\
av_log(c->ctx, AV_LOG_INFO, "Available %s:\n", thing_str); \
for (i = 0; thing[i]; i += 2) \
av_log(c->ctx, AV_LOG_INFO, "%s : %s\n", thing[i], thing[i + 1]); \
}
 
DEFINE_LIST_DITHER(color, "colors");
DEFINE_LIST_DITHER(charset, "charsets");
DEFINE_LIST_DITHER(algorithm, "algorithms");
DEFINE_LIST_DITHER(antialias, "antialias");
 
static int caca_write_header(AVFormatContext *s)
{
CACAContext *c = s->priv_data;
AVStream *st = s->streams[0];
AVCodecContext *encctx = st->codec;
int ret, bpp;
 
c->ctx = s;
if (c->list_drivers) {
list_drivers(c);
return AVERROR_EXIT;
}
if (c->list_dither) {
if (!strcmp(c->list_dither, "colors")) {
list_dither_color(c);
} else if (!strcmp(c->list_dither, "charsets")) {
list_dither_charset(c);
} else if (!strcmp(c->list_dither, "algorithms")) {
list_dither_algorithm(c);
} else if (!strcmp(c->list_dither, "antialiases")) {
list_dither_antialias(c);
} else {
av_log(s, AV_LOG_ERROR,
"Invalid argument '%s', for 'list_dither' option\n"
"Argument must be one of 'algorithms, 'antialiases', 'charsets', 'colors'\n",
c->list_dither);
return AVERROR(EINVAL);
}
return AVERROR_EXIT;
}
 
if ( s->nb_streams > 1
|| encctx->codec_type != AVMEDIA_TYPE_VIDEO
|| encctx->codec_id != AV_CODEC_ID_RAWVIDEO) {
av_log(s, AV_LOG_ERROR, "Only supports one rawvideo stream\n");
return AVERROR(EINVAL);
}
 
if (encctx->pix_fmt != AV_PIX_FMT_RGB24) {
av_log(s, AV_LOG_ERROR,
"Unsupported pixel format '%s', choose rgb24\n",
av_get_pix_fmt_name(encctx->pix_fmt));
return AVERROR(EINVAL);
}
 
c->canvas = caca_create_canvas(c->window_width, c->window_height);
if (!c->canvas) {
av_log(s, AV_LOG_ERROR, "Failed to create canvas\n");
ret = AVERROR(errno);
goto fail;
}
 
bpp = av_get_bits_per_pixel(av_pix_fmt_desc_get(encctx->pix_fmt));
c->dither = caca_create_dither(bpp, encctx->width, encctx->height,
bpp / 8 * encctx->width,
0x0000ff, 0x00ff00, 0xff0000, 0);
if (!c->dither) {
av_log(s, AV_LOG_ERROR, "Failed to create dither\n");
ret = AVERROR(errno);
goto fail;
}
 
#define CHECK_DITHER_OPT(opt) \
if (caca_set_dither_##opt(c->dither, c->opt) < 0) { \
ret = AVERROR(errno); \
av_log(s, AV_LOG_ERROR, "Failed to set value '%s' for option '%s'\n", \
c->opt, #opt); \
goto fail; \
}
CHECK_DITHER_OPT(algorithm);
CHECK_DITHER_OPT(antialias);
CHECK_DITHER_OPT(charset);
CHECK_DITHER_OPT(color);
 
c->display = caca_create_display_with_driver(c->canvas, c->driver);
if (!c->display) {
av_log(s, AV_LOG_ERROR, "Failed to create display\n");
list_drivers(c);
ret = AVERROR(errno);
goto fail;
}
 
if (!c->window_width || !c->window_height) {
c->window_width = caca_get_canvas_width(c->canvas);
c->window_height = caca_get_canvas_height(c->canvas);
}
 
if (!c->window_title)
c->window_title = av_strdup(s->filename);
caca_set_display_title(c->display, c->window_title);
caca_set_display_time(c->display, av_rescale_q(1, st->codec->time_base, AV_TIME_BASE_Q));
 
return 0;
 
fail:
caca_write_trailer(s);
return ret;
}
 
static int caca_write_packet(AVFormatContext *s, AVPacket *pkt)
{
CACAContext *c = s->priv_data;
 
caca_dither_bitmap(c->canvas, 0, 0, c->window_width, c->window_height, c->dither, pkt->data);
caca_refresh_display(c->display);
 
return 0;
}
 
#define OFFSET(x) offsetof(CACAContext,x)
#define ENC AV_OPT_FLAG_ENCODING_PARAM
 
static const AVOption options[] = {
{ "window_size", "set window forced size", OFFSET(window_width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL }, 0, 0, ENC},
{ "window_title", "set window title", OFFSET(window_title), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, ENC },
{ "driver", "set display driver", OFFSET(driver), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, ENC },
{ "algorithm", "set dithering algorithm", OFFSET(algorithm), AV_OPT_TYPE_STRING, {.str = "default" }, 0, 0, ENC },
{ "antialias", "set antialias method", OFFSET(antialias), AV_OPT_TYPE_STRING, {.str = "default" }, 0, 0, ENC },
{ "charset", "set charset used to render output", OFFSET(charset), AV_OPT_TYPE_STRING, {.str = "default" }, 0, 0, ENC },
{ "color", "set color used to render output", OFFSET(color), AV_OPT_TYPE_STRING, {.str = "default" }, 0, 0, ENC },
{ "list_drivers", "list available drivers", OFFSET(list_drivers), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, ENC, "list_drivers" },
{ "true", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1}, 0, 0, ENC, "list_drivers" },
{ "false", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0}, 0, 0, ENC, "list_drivers" },
{ "list_dither", "list available dither options", OFFSET(list_dither), AV_OPT_TYPE_STRING, {.dbl=0}, 0, 1, ENC, "list_dither" },
{ "algorithms", NULL, 0, AV_OPT_TYPE_CONST, {.str = "algorithms"}, 0, 0, ENC, "list_dither" },
{ "antialiases", NULL, 0, AV_OPT_TYPE_CONST, {.str = "antialiases"},0, 0, ENC, "list_dither" },
{ "charsets", NULL, 0, AV_OPT_TYPE_CONST, {.str = "charsets"}, 0, 0, ENC, "list_dither" },
{ "colors", NULL, 0, AV_OPT_TYPE_CONST, {.str = "colors"}, 0, 0, ENC, "list_dither" },
{ NULL },
};
 
static const AVClass caca_class = {
.class_name = "caca_outdev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVOutputFormat ff_caca_muxer = {
.name = "caca",
.long_name = NULL_IF_CONFIG_SMALL("caca (color ASCII art) output device"),
.priv_data_size = sizeof(CACAContext),
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_RAWVIDEO,
.write_header = caca_write_header,
.write_packet = caca_write_packet,
.write_trailer = caca_write_trailer,
.flags = AVFMT_NOFILE,
.priv_class = &caca_class,
};
/contrib/sdk/sources/ffmpeg/libavdevice/dshow.c
0,0 → 1,1095
/*
* Directshow capture interface
* Copyright (c) 2010 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
#include "libavutil/opt.h"
#include "libavformat/internal.h"
#include "libavformat/riff.h"
#include "avdevice.h"
#include "dshow_capture.h"
#include "libavcodec/raw.h"
 
struct dshow_ctx {
const AVClass *class;
 
IGraphBuilder *graph;
 
char *device_name[2];
int video_device_number;
int audio_device_number;
 
int list_options;
int list_devices;
int audio_buffer_size;
 
IBaseFilter *device_filter[2];
IPin *device_pin[2];
libAVFilter *capture_filter[2];
libAVPin *capture_pin[2];
 
HANDLE mutex;
HANDLE event[2]; /* event[0] is set by DirectShow
* event[1] is set by callback() */
AVPacketList *pktl;
 
int eof;
 
int64_t curbufsize;
unsigned int video_frame_num;
 
IMediaControl *control;
IMediaEvent *media_event;
 
enum AVPixelFormat pixel_format;
enum AVCodecID video_codec_id;
char *framerate;
 
int requested_width;
int requested_height;
AVRational requested_framerate;
 
int sample_rate;
int sample_size;
int channels;
};
 
static enum AVPixelFormat dshow_pixfmt(DWORD biCompression, WORD biBitCount)
{
switch(biCompression) {
case BI_BITFIELDS:
case BI_RGB:
switch(biBitCount) { /* 1-8 are untested */
case 1:
return AV_PIX_FMT_MONOWHITE;
case 4:
return AV_PIX_FMT_RGB4;
case 8:
return AV_PIX_FMT_RGB8;
case 16:
return AV_PIX_FMT_RGB555;
case 24:
return AV_PIX_FMT_BGR24;
case 32:
return AV_PIX_FMT_RGB32;
}
}
return avpriv_find_pix_fmt(ff_raw_pix_fmt_tags, biCompression); // all others
}
 
static int
dshow_read_close(AVFormatContext *s)
{
struct dshow_ctx *ctx = s->priv_data;
AVPacketList *pktl;
 
if (ctx->control) {
IMediaControl_Stop(ctx->control);
IMediaControl_Release(ctx->control);
}
 
if (ctx->media_event)
IMediaEvent_Release(ctx->media_event);
 
if (ctx->graph) {
IEnumFilters *fenum;
int r;
r = IGraphBuilder_EnumFilters(ctx->graph, &fenum);
if (r == S_OK) {
IBaseFilter *f;
IEnumFilters_Reset(fenum);
while (IEnumFilters_Next(fenum, 1, &f, NULL) == S_OK) {
if (IGraphBuilder_RemoveFilter(ctx->graph, f) == S_OK)
IEnumFilters_Reset(fenum); /* When a filter is removed,
* the list must be reset. */
IBaseFilter_Release(f);
}
IEnumFilters_Release(fenum);
}
IGraphBuilder_Release(ctx->graph);
}
 
if (ctx->capture_pin[VideoDevice])
libAVPin_Release(ctx->capture_pin[VideoDevice]);
if (ctx->capture_pin[AudioDevice])
libAVPin_Release(ctx->capture_pin[AudioDevice]);
if (ctx->capture_filter[VideoDevice])
libAVFilter_Release(ctx->capture_filter[VideoDevice]);
if (ctx->capture_filter[AudioDevice])
libAVFilter_Release(ctx->capture_filter[AudioDevice]);
 
if (ctx->device_pin[VideoDevice])
IPin_Release(ctx->device_pin[VideoDevice]);
if (ctx->device_pin[AudioDevice])
IPin_Release(ctx->device_pin[AudioDevice]);
if (ctx->device_filter[VideoDevice])
IBaseFilter_Release(ctx->device_filter[VideoDevice]);
if (ctx->device_filter[AudioDevice])
IBaseFilter_Release(ctx->device_filter[AudioDevice]);
 
if (ctx->device_name[0])
av_free(ctx->device_name[0]);
if (ctx->device_name[1])
av_free(ctx->device_name[1]);
 
if(ctx->mutex)
CloseHandle(ctx->mutex);
if(ctx->event[0])
CloseHandle(ctx->event[0]);
if(ctx->event[1])
CloseHandle(ctx->event[1]);
 
pktl = ctx->pktl;
while (pktl) {
AVPacketList *next = pktl->next;
av_destruct_packet(&pktl->pkt);
av_free(pktl);
pktl = next;
}
 
CoUninitialize();
 
return 0;
}
 
static char *dup_wchar_to_utf8(wchar_t *w)
{
char *s = NULL;
int l = WideCharToMultiByte(CP_UTF8, 0, w, -1, 0, 0, 0, 0);
s = av_malloc(l);
if (s)
WideCharToMultiByte(CP_UTF8, 0, w, -1, s, l, 0, 0);
return s;
}
 
static int shall_we_drop(AVFormatContext *s)
{
struct dshow_ctx *ctx = s->priv_data;
static const uint8_t dropscore[] = {62, 75, 87, 100};
const int ndropscores = FF_ARRAY_ELEMS(dropscore);
unsigned int buffer_fullness = (ctx->curbufsize*100)/s->max_picture_buffer;
 
if(dropscore[++ctx->video_frame_num%ndropscores] <= buffer_fullness) {
av_log(s, AV_LOG_ERROR,
"real-time buffer %d%% full! frame dropped!\n", buffer_fullness);
return 1;
}
 
return 0;
}
 
static void
callback(void *priv_data, int index, uint8_t *buf, int buf_size, int64_t time)
{
AVFormatContext *s = priv_data;
struct dshow_ctx *ctx = s->priv_data;
AVPacketList **ppktl, *pktl_next;
 
// dump_videohdr(s, vdhdr);
 
WaitForSingleObject(ctx->mutex, INFINITE);
 
if(shall_we_drop(s))
goto fail;
 
pktl_next = av_mallocz(sizeof(AVPacketList));
if(!pktl_next)
goto fail;
 
if(av_new_packet(&pktl_next->pkt, buf_size) < 0) {
av_free(pktl_next);
goto fail;
}
 
pktl_next->pkt.stream_index = index;
pktl_next->pkt.pts = time;
memcpy(pktl_next->pkt.data, buf, buf_size);
 
for(ppktl = &ctx->pktl ; *ppktl ; ppktl = &(*ppktl)->next);
*ppktl = pktl_next;
 
ctx->curbufsize += buf_size;
 
SetEvent(ctx->event[1]);
ReleaseMutex(ctx->mutex);
 
return;
fail:
ReleaseMutex(ctx->mutex);
return;
}
 
/**
* Cycle through available devices using the device enumerator devenum,
* retrieve the device with type specified by devtype and return the
* pointer to the object found in *pfilter.
* If pfilter is NULL, list all device names.
*/
static int
dshow_cycle_devices(AVFormatContext *avctx, ICreateDevEnum *devenum,
enum dshowDeviceType devtype, IBaseFilter **pfilter)
{
struct dshow_ctx *ctx = avctx->priv_data;
IBaseFilter *device_filter = NULL;
IEnumMoniker *classenum = NULL;
IMoniker *m = NULL;
const char *device_name = ctx->device_name[devtype];
int skip = (devtype == VideoDevice) ? ctx->video_device_number
: ctx->audio_device_number;
int r;
 
const GUID *device_guid[2] = { &CLSID_VideoInputDeviceCategory,
&CLSID_AudioInputDeviceCategory };
const char *devtypename = (devtype == VideoDevice) ? "video" : "audio";
 
r = ICreateDevEnum_CreateClassEnumerator(devenum, device_guid[devtype],
(IEnumMoniker **) &classenum, 0);
if (r != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not enumerate %s devices.\n",
devtypename);
return AVERROR(EIO);
}
 
while (!device_filter && IEnumMoniker_Next(classenum, 1, &m, NULL) == S_OK) {
IPropertyBag *bag = NULL;
char *buf = NULL;
VARIANT var;
 
r = IMoniker_BindToStorage(m, 0, 0, &IID_IPropertyBag, (void *) &bag);
if (r != S_OK)
goto fail1;
 
var.vt = VT_BSTR;
r = IPropertyBag_Read(bag, L"FriendlyName", &var, NULL);
if (r != S_OK)
goto fail1;
 
buf = dup_wchar_to_utf8(var.bstrVal);
 
if (pfilter) {
if (strcmp(device_name, buf))
goto fail1;
 
if (!skip--)
IMoniker_BindToObject(m, 0, 0, &IID_IBaseFilter, (void *) &device_filter);
} else {
av_log(avctx, AV_LOG_INFO, " \"%s\"\n", buf);
}
 
fail1:
if (buf)
av_free(buf);
if (bag)
IPropertyBag_Release(bag);
IMoniker_Release(m);
}
 
IEnumMoniker_Release(classenum);
 
if (pfilter) {
if (!device_filter) {
av_log(avctx, AV_LOG_ERROR, "Could not find %s device.\n",
devtypename);
return AVERROR(EIO);
}
*pfilter = device_filter;
}
 
return 0;
}
 
/**
* Cycle through available formats using the specified pin,
* try to set parameters specified through AVOptions and if successful
* return 1 in *pformat_set.
* If pformat_set is NULL, list all pin capabilities.
*/
static void
dshow_cycle_formats(AVFormatContext *avctx, enum dshowDeviceType devtype,
IPin *pin, int *pformat_set)
{
struct dshow_ctx *ctx = avctx->priv_data;
IAMStreamConfig *config = NULL;
AM_MEDIA_TYPE *type = NULL;
int format_set = 0;
void *caps = NULL;
int i, n, size;
 
if (IPin_QueryInterface(pin, &IID_IAMStreamConfig, (void **) &config) != S_OK)
return;
if (IAMStreamConfig_GetNumberOfCapabilities(config, &n, &size) != S_OK)
goto end;
 
caps = av_malloc(size);
if (!caps)
goto end;
 
for (i = 0; i < n && !format_set; i++) {
IAMStreamConfig_GetStreamCaps(config, i, &type, (void *) caps);
 
#if DSHOWDEBUG
ff_print_AM_MEDIA_TYPE(type);
#endif
 
if (devtype == VideoDevice) {
VIDEO_STREAM_CONFIG_CAPS *vcaps = caps;
BITMAPINFOHEADER *bih;
int64_t *fr;
#if DSHOWDEBUG
ff_print_VIDEO_STREAM_CONFIG_CAPS(vcaps);
#endif
if (IsEqualGUID(&type->formattype, &FORMAT_VideoInfo)) {
VIDEOINFOHEADER *v = (void *) type->pbFormat;
fr = &v->AvgTimePerFrame;
bih = &v->bmiHeader;
} else if (IsEqualGUID(&type->formattype, &FORMAT_VideoInfo2)) {
VIDEOINFOHEADER2 *v = (void *) type->pbFormat;
fr = &v->AvgTimePerFrame;
bih = &v->bmiHeader;
} else {
goto next;
}
if (!pformat_set) {
enum AVPixelFormat pix_fmt = dshow_pixfmt(bih->biCompression, bih->biBitCount);
if (pix_fmt == AV_PIX_FMT_NONE) {
enum AVCodecID codec_id = ff_codec_get_id(avformat_get_riff_video_tags(), bih->biCompression);
AVCodec *codec = avcodec_find_decoder(codec_id);
if (codec_id == AV_CODEC_ID_NONE || !codec) {
av_log(avctx, AV_LOG_INFO, " unknown compression type 0x%X", (int) bih->biCompression);
} else {
av_log(avctx, AV_LOG_INFO, " vcodec=%s", codec->name);
}
} else {
av_log(avctx, AV_LOG_INFO, " pixel_format=%s", av_get_pix_fmt_name(pix_fmt));
}
av_log(avctx, AV_LOG_INFO, " min s=%ldx%ld fps=%g max s=%ldx%ld fps=%g\n",
vcaps->MinOutputSize.cx, vcaps->MinOutputSize.cy,
1e7 / vcaps->MaxFrameInterval,
vcaps->MaxOutputSize.cx, vcaps->MaxOutputSize.cy,
1e7 / vcaps->MinFrameInterval);
continue;
}
if (ctx->video_codec_id != AV_CODEC_ID_RAWVIDEO) {
if (ctx->video_codec_id != ff_codec_get_id(avformat_get_riff_video_tags(), bih->biCompression))
goto next;
}
if (ctx->pixel_format != AV_PIX_FMT_NONE &&
ctx->pixel_format != dshow_pixfmt(bih->biCompression, bih->biBitCount)) {
goto next;
}
if (ctx->framerate) {
int64_t framerate = ((int64_t) ctx->requested_framerate.den*10000000)
/ ctx->requested_framerate.num;
if (framerate > vcaps->MaxFrameInterval ||
framerate < vcaps->MinFrameInterval)
goto next;
*fr = framerate;
}
if (ctx->requested_width && ctx->requested_height) {
if (ctx->requested_width > vcaps->MaxOutputSize.cx ||
ctx->requested_width < vcaps->MinOutputSize.cx ||
ctx->requested_height > vcaps->MaxOutputSize.cy ||
ctx->requested_height < vcaps->MinOutputSize.cy)
goto next;
bih->biWidth = ctx->requested_width;
bih->biHeight = ctx->requested_height;
}
} else {
AUDIO_STREAM_CONFIG_CAPS *acaps = caps;
WAVEFORMATEX *fx;
#if DSHOWDEBUG
ff_print_AUDIO_STREAM_CONFIG_CAPS(acaps);
#endif
if (IsEqualGUID(&type->formattype, &FORMAT_WaveFormatEx)) {
fx = (void *) type->pbFormat;
} else {
goto next;
}
if (!pformat_set) {
av_log(avctx, AV_LOG_INFO, " min ch=%lu bits=%lu rate=%6lu max ch=%lu bits=%lu rate=%6lu\n",
acaps->MinimumChannels, acaps->MinimumBitsPerSample, acaps->MinimumSampleFrequency,
acaps->MaximumChannels, acaps->MaximumBitsPerSample, acaps->MaximumSampleFrequency);
continue;
}
if (ctx->sample_rate) {
if (ctx->sample_rate > acaps->MaximumSampleFrequency ||
ctx->sample_rate < acaps->MinimumSampleFrequency)
goto next;
fx->nSamplesPerSec = ctx->sample_rate;
}
if (ctx->sample_size) {
if (ctx->sample_size > acaps->MaximumBitsPerSample ||
ctx->sample_size < acaps->MinimumBitsPerSample)
goto next;
fx->wBitsPerSample = ctx->sample_size;
}
if (ctx->channels) {
if (ctx->channels > acaps->MaximumChannels ||
ctx->channels < acaps->MinimumChannels)
goto next;
fx->nChannels = ctx->channels;
}
}
if (IAMStreamConfig_SetFormat(config, type) != S_OK)
goto next;
format_set = 1;
next:
if (type->pbFormat)
CoTaskMemFree(type->pbFormat);
CoTaskMemFree(type);
}
end:
IAMStreamConfig_Release(config);
if (caps)
av_free(caps);
if (pformat_set)
*pformat_set = format_set;
}
 
/**
* Set audio device buffer size in milliseconds (which can directly impact
* latency, depending on the device).
*/
static int
dshow_set_audio_buffer_size(AVFormatContext *avctx, IPin *pin)
{
struct dshow_ctx *ctx = avctx->priv_data;
IAMBufferNegotiation *buffer_negotiation = NULL;
ALLOCATOR_PROPERTIES props = { -1, -1, -1, -1 };
IAMStreamConfig *config = NULL;
AM_MEDIA_TYPE *type = NULL;
int ret = AVERROR(EIO);
 
if (IPin_QueryInterface(pin, &IID_IAMStreamConfig, (void **) &config) != S_OK)
goto end;
if (IAMStreamConfig_GetFormat(config, &type) != S_OK)
goto end;
if (!IsEqualGUID(&type->formattype, &FORMAT_WaveFormatEx))
goto end;
 
props.cbBuffer = (((WAVEFORMATEX *) type->pbFormat)->nAvgBytesPerSec)
* ctx->audio_buffer_size / 1000;
 
if (IPin_QueryInterface(pin, &IID_IAMBufferNegotiation, (void **) &buffer_negotiation) != S_OK)
goto end;
if (IAMBufferNegotiation_SuggestAllocatorProperties(buffer_negotiation, &props) != S_OK)
goto end;
 
ret = 0;
 
end:
if (buffer_negotiation)
IAMBufferNegotiation_Release(buffer_negotiation);
if (type) {
if (type->pbFormat)
CoTaskMemFree(type->pbFormat);
CoTaskMemFree(type);
}
if (config)
IAMStreamConfig_Release(config);
 
return ret;
}
 
/**
* Cycle through available pins using the device_filter device, of type
* devtype, retrieve the first output pin and return the pointer to the
* object found in *ppin.
* If ppin is NULL, cycle through all pins listing audio/video capabilities.
*/
static int
dshow_cycle_pins(AVFormatContext *avctx, enum dshowDeviceType devtype,
IBaseFilter *device_filter, IPin **ppin)
{
struct dshow_ctx *ctx = avctx->priv_data;
IEnumPins *pins = 0;
IPin *device_pin = NULL;
IPin *pin;
int r;
 
const GUID *mediatype[2] = { &MEDIATYPE_Video, &MEDIATYPE_Audio };
const char *devtypename = (devtype == VideoDevice) ? "video" : "audio";
 
int set_format = (devtype == VideoDevice && (ctx->framerate ||
(ctx->requested_width && ctx->requested_height) ||
ctx->pixel_format != AV_PIX_FMT_NONE ||
ctx->video_codec_id != AV_CODEC_ID_RAWVIDEO))
|| (devtype == AudioDevice && (ctx->channels || ctx->sample_rate));
int format_set = 0;
 
r = IBaseFilter_EnumPins(device_filter, &pins);
if (r != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not enumerate pins.\n");
return AVERROR(EIO);
}
 
if (!ppin) {
av_log(avctx, AV_LOG_INFO, "DirectShow %s device options\n",
devtypename);
}
while (!device_pin && IEnumPins_Next(pins, 1, &pin, NULL) == S_OK) {
IKsPropertySet *p = NULL;
IEnumMediaTypes *types = NULL;
PIN_INFO info = {0};
AM_MEDIA_TYPE *type;
GUID category;
DWORD r2;
 
IPin_QueryPinInfo(pin, &info);
IBaseFilter_Release(info.pFilter);
 
if (info.dir != PINDIR_OUTPUT)
goto next;
if (IPin_QueryInterface(pin, &IID_IKsPropertySet, (void **) &p) != S_OK)
goto next;
if (IKsPropertySet_Get(p, &AMPROPSETID_Pin, AMPROPERTY_PIN_CATEGORY,
NULL, 0, &category, sizeof(GUID), &r2) != S_OK)
goto next;
if (!IsEqualGUID(&category, &PIN_CATEGORY_CAPTURE))
goto next;
 
if (!ppin) {
char *buf = dup_wchar_to_utf8(info.achName);
av_log(avctx, AV_LOG_INFO, " Pin \"%s\"\n", buf);
av_free(buf);
dshow_cycle_formats(avctx, devtype, pin, NULL);
goto next;
}
if (set_format) {
dshow_cycle_formats(avctx, devtype, pin, &format_set);
if (!format_set) {
goto next;
}
}
if (devtype == AudioDevice && ctx->audio_buffer_size) {
if (dshow_set_audio_buffer_size(avctx, pin) < 0)
goto next;
}
 
if (IPin_EnumMediaTypes(pin, &types) != S_OK)
goto next;
 
IEnumMediaTypes_Reset(types);
while (!device_pin && IEnumMediaTypes_Next(types, 1, &type, NULL) == S_OK) {
if (IsEqualGUID(&type->majortype, mediatype[devtype])) {
device_pin = pin;
goto next;
}
CoTaskMemFree(type);
}
 
next:
if (types)
IEnumMediaTypes_Release(types);
if (p)
IKsPropertySet_Release(p);
if (device_pin != pin)
IPin_Release(pin);
}
 
IEnumPins_Release(pins);
 
if (ppin) {
if (set_format && !format_set) {
av_log(avctx, AV_LOG_ERROR, "Could not set %s options\n", devtypename);
return AVERROR(EIO);
}
if (!device_pin) {
av_log(avctx, AV_LOG_ERROR,
"Could not find output pin from %s capture device.\n", devtypename);
return AVERROR(EIO);
}
*ppin = device_pin;
}
 
return 0;
}
 
/**
* List options for device with type devtype.
*
* @param devenum device enumerator used for accessing the device
*/
static int
dshow_list_device_options(AVFormatContext *avctx, ICreateDevEnum *devenum,
enum dshowDeviceType devtype)
{
struct dshow_ctx *ctx = avctx->priv_data;
IBaseFilter *device_filter = NULL;
int r;
 
if ((r = dshow_cycle_devices(avctx, devenum, devtype, &device_filter)) < 0)
return r;
ctx->device_filter[devtype] = device_filter;
if ((r = dshow_cycle_pins(avctx, devtype, device_filter, NULL)) < 0)
return r;
 
return 0;
}
 
static int
dshow_open_device(AVFormatContext *avctx, ICreateDevEnum *devenum,
enum dshowDeviceType devtype)
{
struct dshow_ctx *ctx = avctx->priv_data;
IBaseFilter *device_filter = NULL;
IGraphBuilder *graph = ctx->graph;
IPin *device_pin = NULL;
libAVPin *capture_pin = NULL;
libAVFilter *capture_filter = NULL;
int ret = AVERROR(EIO);
int r;
 
const wchar_t *filter_name[2] = { L"Audio capture filter", L"Video capture filter" };
 
if ((r = dshow_cycle_devices(avctx, devenum, devtype, &device_filter)) < 0) {
ret = r;
goto error;
}
 
ctx->device_filter [devtype] = device_filter;
 
r = IGraphBuilder_AddFilter(graph, device_filter, NULL);
if (r != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not add device filter to graph.\n");
goto error;
}
 
if ((r = dshow_cycle_pins(avctx, devtype, device_filter, &device_pin)) < 0) {
ret = r;
goto error;
}
ctx->device_pin[devtype] = device_pin;
 
capture_filter = libAVFilter_Create(avctx, callback, devtype);
if (!capture_filter) {
av_log(avctx, AV_LOG_ERROR, "Could not create grabber filter.\n");
goto error;
}
ctx->capture_filter[devtype] = capture_filter;
 
r = IGraphBuilder_AddFilter(graph, (IBaseFilter *) capture_filter,
filter_name[devtype]);
if (r != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not add capture filter to graph\n");
goto error;
}
 
libAVPin_AddRef(capture_filter->pin);
capture_pin = capture_filter->pin;
ctx->capture_pin[devtype] = capture_pin;
 
r = IGraphBuilder_ConnectDirect(graph, device_pin, (IPin *) capture_pin, NULL);
if (r != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not connect pins\n");
goto error;
}
 
ret = 0;
 
error:
return ret;
}
 
static enum AVCodecID waveform_codec_id(enum AVSampleFormat sample_fmt)
{
switch (sample_fmt) {
case AV_SAMPLE_FMT_U8: return AV_CODEC_ID_PCM_U8;
case AV_SAMPLE_FMT_S16: return AV_CODEC_ID_PCM_S16LE;
case AV_SAMPLE_FMT_S32: return AV_CODEC_ID_PCM_S32LE;
default: return AV_CODEC_ID_NONE; /* Should never happen. */
}
}
 
static enum AVSampleFormat sample_fmt_bits_per_sample(int bits)
{
switch (bits) {
case 8: return AV_SAMPLE_FMT_U8;
case 16: return AV_SAMPLE_FMT_S16;
case 32: return AV_SAMPLE_FMT_S32;
default: return AV_SAMPLE_FMT_NONE; /* Should never happen. */
}
}
 
static int
dshow_add_device(AVFormatContext *avctx,
enum dshowDeviceType devtype)
{
struct dshow_ctx *ctx = avctx->priv_data;
AM_MEDIA_TYPE type;
AVCodecContext *codec;
AVStream *st;
int ret = AVERROR(EIO);
 
st = avformat_new_stream(avctx, NULL);
if (!st) {
ret = AVERROR(ENOMEM);
goto error;
}
st->id = devtype;
 
ctx->capture_filter[devtype]->stream_index = st->index;
 
libAVPin_ConnectionMediaType(ctx->capture_pin[devtype], &type);
 
codec = st->codec;
if (devtype == VideoDevice) {
BITMAPINFOHEADER *bih = NULL;
AVRational time_base;
 
if (IsEqualGUID(&type.formattype, &FORMAT_VideoInfo)) {
VIDEOINFOHEADER *v = (void *) type.pbFormat;
time_base = (AVRational) { v->AvgTimePerFrame, 10000000 };
bih = &v->bmiHeader;
} else if (IsEqualGUID(&type.formattype, &FORMAT_VideoInfo2)) {
VIDEOINFOHEADER2 *v = (void *) type.pbFormat;
time_base = (AVRational) { v->AvgTimePerFrame, 10000000 };
bih = &v->bmiHeader;
}
if (!bih) {
av_log(avctx, AV_LOG_ERROR, "Could not get media type.\n");
goto error;
}
 
codec->time_base = time_base;
codec->codec_type = AVMEDIA_TYPE_VIDEO;
codec->width = bih->biWidth;
codec->height = bih->biHeight;
codec->pix_fmt = dshow_pixfmt(bih->biCompression, bih->biBitCount);
if (bih->biCompression == MKTAG('H', 'D', 'Y', 'C')) {
av_log(avctx, AV_LOG_DEBUG, "attempt to use full range for HDYC...\n");
codec->color_range = AVCOL_RANGE_MPEG; // just in case it needs this...
}
if (codec->pix_fmt == AV_PIX_FMT_NONE) {
codec->codec_id = ff_codec_get_id(avformat_get_riff_video_tags(), bih->biCompression);
if (codec->codec_id == AV_CODEC_ID_NONE) {
av_log(avctx, AV_LOG_ERROR, "Unknown compression type. "
"Please report type 0x%X.\n", (int) bih->biCompression);
return AVERROR_PATCHWELCOME;
}
codec->bits_per_coded_sample = bih->biBitCount;
} else {
codec->codec_id = AV_CODEC_ID_RAWVIDEO;
if (bih->biCompression == BI_RGB || bih->biCompression == BI_BITFIELDS) {
codec->bits_per_coded_sample = bih->biBitCount;
codec->extradata = av_malloc(9 + FF_INPUT_BUFFER_PADDING_SIZE);
if (codec->extradata) {
codec->extradata_size = 9;
memcpy(codec->extradata, "BottomUp", 9);
}
}
}
} else {
WAVEFORMATEX *fx = NULL;
 
if (IsEqualGUID(&type.formattype, &FORMAT_WaveFormatEx)) {
fx = (void *) type.pbFormat;
}
if (!fx) {
av_log(avctx, AV_LOG_ERROR, "Could not get media type.\n");
goto error;
}
 
codec->codec_type = AVMEDIA_TYPE_AUDIO;
codec->sample_fmt = sample_fmt_bits_per_sample(fx->wBitsPerSample);
codec->codec_id = waveform_codec_id(codec->sample_fmt);
codec->sample_rate = fx->nSamplesPerSec;
codec->channels = fx->nChannels;
}
 
avpriv_set_pts_info(st, 64, 1, 10000000);
 
ret = 0;
 
error:
return ret;
}
 
static int parse_device_name(AVFormatContext *avctx)
{
struct dshow_ctx *ctx = avctx->priv_data;
char **device_name = ctx->device_name;
char *name = av_strdup(avctx->filename);
char *tmp = name;
int ret = 1;
char *type;
 
while ((type = strtok(tmp, "="))) {
char *token = strtok(NULL, ":");
tmp = NULL;
 
if (!strcmp(type, "video")) {
device_name[0] = token;
} else if (!strcmp(type, "audio")) {
device_name[1] = token;
} else {
device_name[0] = NULL;
device_name[1] = NULL;
break;
}
}
 
if (!device_name[0] && !device_name[1]) {
ret = 0;
} else {
if (device_name[0])
device_name[0] = av_strdup(device_name[0]);
if (device_name[1])
device_name[1] = av_strdup(device_name[1]);
}
 
av_free(name);
return ret;
}
 
static int dshow_read_header(AVFormatContext *avctx)
{
struct dshow_ctx *ctx = avctx->priv_data;
IGraphBuilder *graph = NULL;
ICreateDevEnum *devenum = NULL;
IMediaControl *control = NULL;
IMediaEvent *media_event = NULL;
HANDLE media_event_handle;
HANDLE proc;
int ret = AVERROR(EIO);
int r;
 
CoInitialize(0);
 
if (!ctx->list_devices && !parse_device_name(avctx)) {
av_log(avctx, AV_LOG_ERROR, "Malformed dshow input string.\n");
goto error;
}
 
ctx->video_codec_id = avctx->video_codec_id ? avctx->video_codec_id
: AV_CODEC_ID_RAWVIDEO;
if (ctx->pixel_format != AV_PIX_FMT_NONE) {
if (ctx->video_codec_id != AV_CODEC_ID_RAWVIDEO) {
av_log(avctx, AV_LOG_ERROR, "Pixel format may only be set when "
"video codec is not set or set to rawvideo\n");
ret = AVERROR(EINVAL);
goto error;
}
}
if (ctx->framerate) {
r = av_parse_video_rate(&ctx->requested_framerate, ctx->framerate);
if (r < 0) {
av_log(avctx, AV_LOG_ERROR, "Could not parse framerate '%s'.\n", ctx->framerate);
goto error;
}
}
 
r = CoCreateInstance(&CLSID_FilterGraph, NULL, CLSCTX_INPROC_SERVER,
&IID_IGraphBuilder, (void **) &graph);
if (r != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not create capture graph.\n");
goto error;
}
ctx->graph = graph;
 
r = CoCreateInstance(&CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC_SERVER,
&IID_ICreateDevEnum, (void **) &devenum);
if (r != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not enumerate system devices.\n");
goto error;
}
 
if (ctx->list_devices) {
av_log(avctx, AV_LOG_INFO, "DirectShow video devices\n");
dshow_cycle_devices(avctx, devenum, VideoDevice, NULL);
av_log(avctx, AV_LOG_INFO, "DirectShow audio devices\n");
dshow_cycle_devices(avctx, devenum, AudioDevice, NULL);
ret = AVERROR_EXIT;
goto error;
}
if (ctx->list_options) {
if (ctx->device_name[VideoDevice])
dshow_list_device_options(avctx, devenum, VideoDevice);
if (ctx->device_name[AudioDevice])
dshow_list_device_options(avctx, devenum, AudioDevice);
ret = AVERROR_EXIT;
goto error;
}
 
if (ctx->device_name[VideoDevice]) {
if ((r = dshow_open_device(avctx, devenum, VideoDevice)) < 0 ||
(r = dshow_add_device(avctx, VideoDevice)) < 0) {
ret = r;
goto error;
}
}
if (ctx->device_name[AudioDevice]) {
if ((r = dshow_open_device(avctx, devenum, AudioDevice)) < 0 ||
(r = dshow_add_device(avctx, AudioDevice)) < 0) {
ret = r;
goto error;
}
}
 
ctx->mutex = CreateMutex(NULL, 0, NULL);
if (!ctx->mutex) {
av_log(avctx, AV_LOG_ERROR, "Could not create Mutex\n");
goto error;
}
ctx->event[1] = CreateEvent(NULL, 1, 0, NULL);
if (!ctx->event[1]) {
av_log(avctx, AV_LOG_ERROR, "Could not create Event\n");
goto error;
}
 
r = IGraphBuilder_QueryInterface(graph, &IID_IMediaControl, (void **) &control);
if (r != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not get media control.\n");
goto error;
}
ctx->control = control;
 
r = IGraphBuilder_QueryInterface(graph, &IID_IMediaEvent, (void **) &media_event);
if (r != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not get media event.\n");
goto error;
}
ctx->media_event = media_event;
 
r = IMediaEvent_GetEventHandle(media_event, (void *) &media_event_handle);
if (r != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not get media event handle.\n");
goto error;
}
proc = GetCurrentProcess();
r = DuplicateHandle(proc, media_event_handle, proc, &ctx->event[0],
0, 0, DUPLICATE_SAME_ACCESS);
if (!r) {
av_log(avctx, AV_LOG_ERROR, "Could not duplicate media event handle.\n");
goto error;
}
 
r = IMediaControl_Run(control);
if (r == S_FALSE) {
OAFilterState pfs;
r = IMediaControl_GetState(control, 0, &pfs);
}
if (r != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not run filter\n");
goto error;
}
 
ret = 0;
 
error:
 
if (devenum)
ICreateDevEnum_Release(devenum);
 
if (ret < 0)
dshow_read_close(avctx);
 
return ret;
}
 
/**
* Checks media events from DirectShow and returns -1 on error or EOF. Also
* purges all events that might be in the event queue to stop the trigger
* of event notification.
*/
static int dshow_check_event_queue(IMediaEvent *media_event)
{
LONG_PTR p1, p2;
long code;
int ret = 0;
 
while (IMediaEvent_GetEvent(media_event, &code, &p1, &p2, 0) != E_ABORT) {
if (code == EC_COMPLETE || code == EC_DEVICE_LOST || code == EC_ERRORABORT)
ret = -1;
IMediaEvent_FreeEventParams(media_event, code, p1, p2);
}
 
return ret;
}
 
static int dshow_read_packet(AVFormatContext *s, AVPacket *pkt)
{
struct dshow_ctx *ctx = s->priv_data;
AVPacketList *pktl = NULL;
 
while (!ctx->eof && !pktl) {
WaitForSingleObject(ctx->mutex, INFINITE);
pktl = ctx->pktl;
if (pktl) {
*pkt = pktl->pkt;
ctx->pktl = ctx->pktl->next;
av_free(pktl);
ctx->curbufsize -= pkt->size;
}
ResetEvent(ctx->event[1]);
ReleaseMutex(ctx->mutex);
if (!pktl) {
if (dshow_check_event_queue(ctx->media_event) < 0) {
ctx->eof = 1;
} else if (s->flags & AVFMT_FLAG_NONBLOCK) {
return AVERROR(EAGAIN);
} else {
WaitForMultipleObjects(2, ctx->event, 0, INFINITE);
}
}
}
 
return ctx->eof ? AVERROR(EIO) : pkt->size;
}
 
#define OFFSET(x) offsetof(struct dshow_ctx, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "video_size", "set video size given a string such as 640x480 or hd720.", OFFSET(requested_width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, DEC },
{ "pixel_format", "set video pixel format", OFFSET(pixel_format), AV_OPT_TYPE_PIXEL_FMT, {.i64 = AV_PIX_FMT_NONE}, -1, AV_PIX_FMT_NB-1, DEC },
{ "framerate", "set video frame rate", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
{ "sample_rate", "set audio sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, DEC },
{ "sample_size", "set audio sample size", OFFSET(sample_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 16, DEC },
{ "channels", "set number of audio channels, such as 1 or 2", OFFSET(channels), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, DEC },
{ "list_devices", "list available devices", OFFSET(list_devices), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, DEC, "list_devices" },
{ "true", "", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, DEC, "list_devices" },
{ "false", "", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, DEC, "list_devices" },
{ "list_options", "list available options for specified device", OFFSET(list_options), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, DEC, "list_options" },
{ "true", "", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, DEC, "list_options" },
{ "false", "", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, DEC, "list_options" },
{ "video_device_number", "set video device number for devices with same name (starts at 0)", OFFSET(video_device_number), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, DEC },
{ "audio_device_number", "set audio device number for devices with same name (starts at 0)", OFFSET(audio_device_number), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, DEC },
{ "audio_buffer_size", "set audio device buffer latency size in milliseconds (default is the device's default)", OFFSET(audio_buffer_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, DEC },
{ NULL },
};
 
static const AVClass dshow_class = {
.class_name = "dshow indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVInputFormat ff_dshow_demuxer = {
.name = "dshow",
.long_name = NULL_IF_CONFIG_SMALL("DirectShow capture"),
.priv_data_size = sizeof(struct dshow_ctx),
.read_header = dshow_read_header,
.read_packet = dshow_read_packet,
.read_close = dshow_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &dshow_class,
};
/contrib/sdk/sources/ffmpeg/libavdevice/dshow_capture.h
0,0 → 1,279
/*
* DirectShow capture interface
* Copyright (c) 2010 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVDEVICE_DSHOW_H
#define AVDEVICE_DSHOW_H
 
#define DSHOWDEBUG 0
 
#include "avdevice.h"
 
#define COBJMACROS
#include <windows.h>
#define NO_DSHOW_STRSAFE
#include <dshow.h>
#include <dvdmedia.h>
 
/* EC_DEVICE_LOST is not defined in MinGW dshow headers. */
#ifndef EC_DEVICE_LOST
#define EC_DEVICE_LOST 0x1f
#endif
 
long ff_copy_dshow_media_type(AM_MEDIA_TYPE *dst, const AM_MEDIA_TYPE *src);
void ff_print_VIDEO_STREAM_CONFIG_CAPS(const VIDEO_STREAM_CONFIG_CAPS *caps);
void ff_print_AUDIO_STREAM_CONFIG_CAPS(const AUDIO_STREAM_CONFIG_CAPS *caps);
void ff_print_AM_MEDIA_TYPE(const AM_MEDIA_TYPE *type);
void ff_printGUID(const GUID *g);
 
#if DSHOWDEBUG
extern const AVClass *ff_dshow_context_class_ptr;
#define dshowdebug(...) av_log(&ff_dshow_context_class_ptr, AV_LOG_DEBUG, __VA_ARGS__)
#else
#define dshowdebug(...)
#endif
 
static inline void nothing(void *foo)
{
}
 
struct GUIDoffset {
const GUID *iid;
int offset;
};
 
enum dshowDeviceType {
VideoDevice = 0,
AudioDevice = 1,
};
 
#define DECLARE_QUERYINTERFACE(class, ...) \
long WINAPI \
class##_QueryInterface(class *this, const GUID *riid, void **ppvObject) \
{ \
struct GUIDoffset ifaces[] = __VA_ARGS__; \
int i; \
dshowdebug(AV_STRINGIFY(class)"_QueryInterface(%p, %p, %p)\n", this, riid, ppvObject); \
ff_printGUID(riid); \
if (!ppvObject) \
return E_POINTER; \
for (i = 0; i < sizeof(ifaces)/sizeof(ifaces[0]); i++) { \
if (IsEqualGUID(riid, ifaces[i].iid)) { \
void *obj = (void *) ((uint8_t *) this + ifaces[i].offset); \
class##_AddRef(this); \
dshowdebug("\tfound %d with offset %d\n", i, ifaces[i].offset); \
*ppvObject = (void *) obj; \
return S_OK; \
} \
} \
dshowdebug("\tE_NOINTERFACE\n"); \
*ppvObject = NULL; \
return E_NOINTERFACE; \
}
#define DECLARE_ADDREF(class) \
unsigned long WINAPI \
class##_AddRef(class *this) \
{ \
dshowdebug(AV_STRINGIFY(class)"_AddRef(%p)\t%ld\n", this, this->ref+1); \
return InterlockedIncrement(&this->ref); \
}
#define DECLARE_RELEASE(class) \
unsigned long WINAPI \
class##_Release(class *this) \
{ \
long ref = InterlockedDecrement(&this->ref); \
dshowdebug(AV_STRINGIFY(class)"_Release(%p)\t%ld\n", this, ref); \
if (!ref) \
class##_Destroy(this); \
return ref; \
}
 
#define DECLARE_DESTROY(class, func) \
void class##_Destroy(class *this) \
{ \
dshowdebug(AV_STRINGIFY(class)"_Destroy(%p)\n", this); \
func(this); \
if (this) { \
if (this->vtbl) \
CoTaskMemFree(this->vtbl); \
CoTaskMemFree(this); \
} \
}
#define DECLARE_CREATE(class, setup, ...) \
class *class##_Create(__VA_ARGS__) \
{ \
class *this = CoTaskMemAlloc(sizeof(class)); \
void *vtbl = CoTaskMemAlloc(sizeof(*this->vtbl)); \
dshowdebug(AV_STRINGIFY(class)"_Create(%p)\n", this); \
if (!this || !vtbl) \
goto fail; \
ZeroMemory(this, sizeof(class)); \
ZeroMemory(vtbl, sizeof(*this->vtbl)); \
this->ref = 1; \
this->vtbl = vtbl; \
if (!setup) \
goto fail; \
dshowdebug("created "AV_STRINGIFY(class)" %p\n", this); \
return this; \
fail: \
class##_Destroy(this); \
dshowdebug("could not create "AV_STRINGIFY(class)"\n"); \
return NULL; \
}
 
#define SETVTBL(vtbl, class, fn) \
do { (vtbl)->fn = (void *) class##_##fn; } while(0)
 
/*****************************************************************************
* Forward Declarations
****************************************************************************/
typedef struct libAVPin libAVPin;
typedef struct libAVMemInputPin libAVMemInputPin;
typedef struct libAVEnumPins libAVEnumPins;
typedef struct libAVEnumMediaTypes libAVEnumMediaTypes;
typedef struct libAVFilter libAVFilter;
 
/*****************************************************************************
* libAVPin
****************************************************************************/
struct libAVPin {
IPinVtbl *vtbl;
long ref;
libAVFilter *filter;
IPin *connectedto;
AM_MEDIA_TYPE type;
IMemInputPinVtbl *imemvtbl;
};
 
long WINAPI libAVPin_QueryInterface (libAVPin *, const GUID *, void **);
unsigned long WINAPI libAVPin_AddRef (libAVPin *);
unsigned long WINAPI libAVPin_Release (libAVPin *);
long WINAPI libAVPin_Connect (libAVPin *, IPin *, const AM_MEDIA_TYPE *);
long WINAPI libAVPin_ReceiveConnection (libAVPin *, IPin *, const AM_MEDIA_TYPE *);
long WINAPI libAVPin_Disconnect (libAVPin *);
long WINAPI libAVPin_ConnectedTo (libAVPin *, IPin **);
long WINAPI libAVPin_ConnectionMediaType (libAVPin *, AM_MEDIA_TYPE *);
long WINAPI libAVPin_QueryPinInfo (libAVPin *, PIN_INFO *);
long WINAPI libAVPin_QueryDirection (libAVPin *, PIN_DIRECTION *);
long WINAPI libAVPin_QueryId (libAVPin *, wchar_t **);
long WINAPI libAVPin_QueryAccept (libAVPin *, const AM_MEDIA_TYPE *);
long WINAPI libAVPin_EnumMediaTypes (libAVPin *, IEnumMediaTypes **);
long WINAPI libAVPin_QueryInternalConnections(libAVPin *, IPin **, unsigned long *);
long WINAPI libAVPin_EndOfStream (libAVPin *);
long WINAPI libAVPin_BeginFlush (libAVPin *);
long WINAPI libAVPin_EndFlush (libAVPin *);
long WINAPI libAVPin_NewSegment (libAVPin *, REFERENCE_TIME, REFERENCE_TIME, double);
 
long WINAPI libAVMemInputPin_QueryInterface (libAVMemInputPin *, const GUID *, void **);
unsigned long WINAPI libAVMemInputPin_AddRef (libAVMemInputPin *);
unsigned long WINAPI libAVMemInputPin_Release (libAVMemInputPin *);
long WINAPI libAVMemInputPin_GetAllocator (libAVMemInputPin *, IMemAllocator **);
long WINAPI libAVMemInputPin_NotifyAllocator (libAVMemInputPin *, IMemAllocator *, BOOL);
long WINAPI libAVMemInputPin_GetAllocatorRequirements(libAVMemInputPin *, ALLOCATOR_PROPERTIES *);
long WINAPI libAVMemInputPin_Receive (libAVMemInputPin *, IMediaSample *);
long WINAPI libAVMemInputPin_ReceiveMultiple (libAVMemInputPin *, IMediaSample **, long, long *);
long WINAPI libAVMemInputPin_ReceiveCanBlock (libAVMemInputPin *);
 
void libAVPin_Destroy(libAVPin *);
libAVPin *libAVPin_Create (libAVFilter *filter);
 
void libAVMemInputPin_Destroy(libAVMemInputPin *);
 
/*****************************************************************************
* libAVEnumPins
****************************************************************************/
struct libAVEnumPins {
IEnumPinsVtbl *vtbl;
long ref;
int pos;
libAVPin *pin;
libAVFilter *filter;
};
 
long WINAPI libAVEnumPins_QueryInterface(libAVEnumPins *, const GUID *, void **);
unsigned long WINAPI libAVEnumPins_AddRef (libAVEnumPins *);
unsigned long WINAPI libAVEnumPins_Release (libAVEnumPins *);
long WINAPI libAVEnumPins_Next (libAVEnumPins *, unsigned long, IPin **, unsigned long *);
long WINAPI libAVEnumPins_Skip (libAVEnumPins *, unsigned long);
long WINAPI libAVEnumPins_Reset (libAVEnumPins *);
long WINAPI libAVEnumPins_Clone (libAVEnumPins *, libAVEnumPins **);
 
void libAVEnumPins_Destroy(libAVEnumPins *);
libAVEnumPins *libAVEnumPins_Create (libAVPin *pin, libAVFilter *filter);
 
/*****************************************************************************
* libAVEnumMediaTypes
****************************************************************************/
struct libAVEnumMediaTypes {
IEnumPinsVtbl *vtbl;
long ref;
int pos;
AM_MEDIA_TYPE type;
};
 
long WINAPI libAVEnumMediaTypes_QueryInterface(libAVEnumMediaTypes *, const GUID *, void **);
unsigned long WINAPI libAVEnumMediaTypes_AddRef (libAVEnumMediaTypes *);
unsigned long WINAPI libAVEnumMediaTypes_Release (libAVEnumMediaTypes *);
long WINAPI libAVEnumMediaTypes_Next (libAVEnumMediaTypes *, unsigned long, AM_MEDIA_TYPE **, unsigned long *);
long WINAPI libAVEnumMediaTypes_Skip (libAVEnumMediaTypes *, unsigned long);
long WINAPI libAVEnumMediaTypes_Reset (libAVEnumMediaTypes *);
long WINAPI libAVEnumMediaTypes_Clone (libAVEnumMediaTypes *, libAVEnumMediaTypes **);
 
void libAVEnumMediaTypes_Destroy(libAVEnumMediaTypes *);
libAVEnumMediaTypes *libAVEnumMediaTypes_Create(const AM_MEDIA_TYPE *type);
 
/*****************************************************************************
* libAVFilter
****************************************************************************/
struct libAVFilter {
IBaseFilterVtbl *vtbl;
long ref;
const wchar_t *name;
libAVPin *pin;
FILTER_INFO info;
FILTER_STATE state;
IReferenceClock *clock;
enum dshowDeviceType type;
void *priv_data;
int stream_index;
int64_t start_time;
void (*callback)(void *priv_data, int index, uint8_t *buf, int buf_size, int64_t time);
};
 
long WINAPI libAVFilter_QueryInterface (libAVFilter *, const GUID *, void **);
unsigned long WINAPI libAVFilter_AddRef (libAVFilter *);
unsigned long WINAPI libAVFilter_Release (libAVFilter *);
long WINAPI libAVFilter_GetClassID (libAVFilter *, CLSID *);
long WINAPI libAVFilter_Stop (libAVFilter *);
long WINAPI libAVFilter_Pause (libAVFilter *);
long WINAPI libAVFilter_Run (libAVFilter *, REFERENCE_TIME);
long WINAPI libAVFilter_GetState (libAVFilter *, DWORD, FILTER_STATE *);
long WINAPI libAVFilter_SetSyncSource (libAVFilter *, IReferenceClock *);
long WINAPI libAVFilter_GetSyncSource (libAVFilter *, IReferenceClock **);
long WINAPI libAVFilter_EnumPins (libAVFilter *, IEnumPins **);
long WINAPI libAVFilter_FindPin (libAVFilter *, const wchar_t *, IPin **);
long WINAPI libAVFilter_QueryFilterInfo(libAVFilter *, FILTER_INFO *);
long WINAPI libAVFilter_JoinFilterGraph(libAVFilter *, IFilterGraph *, const wchar_t *);
long WINAPI libAVFilter_QueryVendorInfo(libAVFilter *, wchar_t **);
 
void libAVFilter_Destroy(libAVFilter *);
libAVFilter *libAVFilter_Create (void *, void *, enum dshowDeviceType);
 
#endif /* AVDEVICE_DSHOW_H */
/contrib/sdk/sources/ffmpeg/libavdevice/dshow_common.c
0,0 → 1,190
/*
* Directshow capture interface
* Copyright (c) 2010 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "dshow_capture.h"
 
long ff_copy_dshow_media_type(AM_MEDIA_TYPE *dst, const AM_MEDIA_TYPE *src)
{
uint8_t *pbFormat = NULL;
 
if (src->cbFormat) {
pbFormat = CoTaskMemAlloc(src->cbFormat);
if (!pbFormat)
return E_OUTOFMEMORY;
memcpy(pbFormat, src->pbFormat, src->cbFormat);
}
 
*dst = *src;
dst->pUnk = NULL;
dst->pbFormat = pbFormat;
 
return S_OK;
}
 
void ff_printGUID(const GUID *g)
{
#if DSHOWDEBUG
const uint32_t *d = (const uint32_t *) &g->Data1;
const uint16_t *w = (const uint16_t *) &g->Data2;
const uint8_t *c = (const uint8_t *) &g->Data4;
 
dshowdebug("0x%08x 0x%04x 0x%04x %02x%02x%02x%02x%02x%02x%02x%02x",
d[0], w[0], w[1],
c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
#endif
}
 
static const char *dshow_context_to_name(void *ptr)
{
return "dshow";
}
static const AVClass ff_dshow_context_class = { "DirectShow", dshow_context_to_name };
const AVClass *ff_dshow_context_class_ptr = &ff_dshow_context_class;
 
#define dstruct(pctx, sname, var, type) \
dshowdebug(" "#var":\t%"type"\n", sname->var)
 
#if DSHOWDEBUG
static void dump_bih(void *s, BITMAPINFOHEADER *bih)
{
dshowdebug(" BITMAPINFOHEADER\n");
dstruct(s, bih, biSize, "lu");
dstruct(s, bih, biWidth, "ld");
dstruct(s, bih, biHeight, "ld");
dstruct(s, bih, biPlanes, "d");
dstruct(s, bih, biBitCount, "d");
dstruct(s, bih, biCompression, "lu");
dshowdebug(" biCompression:\t\"%.4s\"\n",
(char*) &bih->biCompression);
dstruct(s, bih, biSizeImage, "lu");
dstruct(s, bih, biXPelsPerMeter, "lu");
dstruct(s, bih, biYPelsPerMeter, "lu");
dstruct(s, bih, biClrUsed, "lu");
dstruct(s, bih, biClrImportant, "lu");
}
#endif
 
void ff_print_VIDEO_STREAM_CONFIG_CAPS(const VIDEO_STREAM_CONFIG_CAPS *caps)
{
#if DSHOWDEBUG
dshowdebug(" VIDEO_STREAM_CONFIG_CAPS\n");
dshowdebug(" guid\t");
ff_printGUID(&caps->guid);
dshowdebug("\n");
dshowdebug(" VideoStandard\t%lu\n", caps->VideoStandard);
dshowdebug(" InputSize %ld\t%ld\n", caps->InputSize.cx, caps->InputSize.cy);
dshowdebug(" MinCroppingSize %ld\t%ld\n", caps->MinCroppingSize.cx, caps->MinCroppingSize.cy);
dshowdebug(" MaxCroppingSize %ld\t%ld\n", caps->MaxCroppingSize.cx, caps->MaxCroppingSize.cy);
dshowdebug(" CropGranularityX\t%d\n", caps->CropGranularityX);
dshowdebug(" CropGranularityY\t%d\n", caps->CropGranularityY);
dshowdebug(" CropAlignX\t%d\n", caps->CropAlignX);
dshowdebug(" CropAlignY\t%d\n", caps->CropAlignY);
dshowdebug(" MinOutputSize %ld\t%ld\n", caps->MinOutputSize.cx, caps->MinOutputSize.cy);
dshowdebug(" MaxOutputSize %ld\t%ld\n", caps->MaxOutputSize.cx, caps->MaxOutputSize.cy);
dshowdebug(" OutputGranularityX\t%d\n", caps->OutputGranularityX);
dshowdebug(" OutputGranularityY\t%d\n", caps->OutputGranularityY);
dshowdebug(" StretchTapsX\t%d\n", caps->StretchTapsX);
dshowdebug(" StretchTapsY\t%d\n", caps->StretchTapsY);
dshowdebug(" ShrinkTapsX\t%d\n", caps->ShrinkTapsX);
dshowdebug(" ShrinkTapsY\t%d\n", caps->ShrinkTapsY);
dshowdebug(" MinFrameInterval\t%"PRId64"\n", caps->MinFrameInterval);
dshowdebug(" MaxFrameInterval\t%"PRId64"\n", caps->MaxFrameInterval);
dshowdebug(" MinBitsPerSecond\t%ld\n", caps->MinBitsPerSecond);
dshowdebug(" MaxBitsPerSecond\t%ld\n", caps->MaxBitsPerSecond);
#endif
}
 
void ff_print_AUDIO_STREAM_CONFIG_CAPS(const AUDIO_STREAM_CONFIG_CAPS *caps)
{
#if DSHOWDEBUG
dshowdebug(" AUDIO_STREAM_CONFIG_CAPS\n");
dshowdebug(" guid\t");
ff_printGUID(&caps->guid);
dshowdebug("\n");
dshowdebug(" MinimumChannels\t%lu\n", caps->MinimumChannels);
dshowdebug(" MaximumChannels\t%lu\n", caps->MaximumChannels);
dshowdebug(" ChannelsGranularity\t%lu\n", caps->ChannelsGranularity);
dshowdebug(" MinimumBitsPerSample\t%lu\n", caps->MinimumBitsPerSample);
dshowdebug(" MaximumBitsPerSample\t%lu\n", caps->MaximumBitsPerSample);
dshowdebug(" BitsPerSampleGranularity\t%lu\n", caps->BitsPerSampleGranularity);
dshowdebug(" MinimumSampleFrequency\t%lu\n", caps->MinimumSampleFrequency);
dshowdebug(" MaximumSampleFrequency\t%lu\n", caps->MaximumSampleFrequency);
dshowdebug(" SampleFrequencyGranularity\t%lu\n", caps->SampleFrequencyGranularity);
#endif
}
 
void ff_print_AM_MEDIA_TYPE(const AM_MEDIA_TYPE *type)
{
#if DSHOWDEBUG
dshowdebug(" majortype\t");
ff_printGUID(&type->majortype);
dshowdebug("\n");
dshowdebug(" subtype\t");
ff_printGUID(&type->subtype);
dshowdebug("\n");
dshowdebug(" bFixedSizeSamples\t%d\n", type->bFixedSizeSamples);
dshowdebug(" bTemporalCompression\t%d\n", type->bTemporalCompression);
dshowdebug(" lSampleSize\t%lu\n", type->lSampleSize);
dshowdebug(" formattype\t");
ff_printGUID(&type->formattype);
dshowdebug("\n");
dshowdebug(" pUnk\t%p\n", type->pUnk);
dshowdebug(" cbFormat\t%lu\n", type->cbFormat);
dshowdebug(" pbFormat\t%p\n", type->pbFormat);
 
if (IsEqualGUID(&type->formattype, &FORMAT_VideoInfo)) {
VIDEOINFOHEADER *v = (void *) type->pbFormat;
dshowdebug(" rcSource: left %ld top %ld right %ld bottom %ld\n",
v->rcSource.left, v->rcSource.top, v->rcSource.right, v->rcSource.bottom);
dshowdebug(" rcTarget: left %ld top %ld right %ld bottom %ld\n",
v->rcTarget.left, v->rcTarget.top, v->rcTarget.right, v->rcTarget.bottom);
dshowdebug(" dwBitRate: %lu\n", v->dwBitRate);
dshowdebug(" dwBitErrorRate: %lu\n", v->dwBitErrorRate);
dshowdebug(" AvgTimePerFrame: %"PRId64"\n", v->AvgTimePerFrame);
dump_bih(NULL, &v->bmiHeader);
} else if (IsEqualGUID(&type->formattype, &FORMAT_VideoInfo2)) {
VIDEOINFOHEADER2 *v = (void *) type->pbFormat;
dshowdebug(" rcSource: left %ld top %ld right %ld bottom %ld\n",
v->rcSource.left, v->rcSource.top, v->rcSource.right, v->rcSource.bottom);
dshowdebug(" rcTarget: left %ld top %ld right %ld bottom %ld\n",
v->rcTarget.left, v->rcTarget.top, v->rcTarget.right, v->rcTarget.bottom);
dshowdebug(" dwBitRate: %lu\n", v->dwBitRate);
dshowdebug(" dwBitErrorRate: %lu\n", v->dwBitErrorRate);
dshowdebug(" AvgTimePerFrame: %"PRId64"\n", v->AvgTimePerFrame);
dshowdebug(" dwInterlaceFlags: %lu\n", v->dwInterlaceFlags);
dshowdebug(" dwCopyProtectFlags: %lu\n", v->dwCopyProtectFlags);
dshowdebug(" dwPictAspectRatioX: %lu\n", v->dwPictAspectRatioX);
dshowdebug(" dwPictAspectRatioY: %lu\n", v->dwPictAspectRatioY);
// dshowdebug(" dwReserved1: %lu\n", v->u.dwReserved1); /* mingw-w64 is buggy and doesn't name unnamed unions */
dshowdebug(" dwReserved2: %lu\n", v->dwReserved2);
dump_bih(NULL, &v->bmiHeader);
} else if (IsEqualGUID(&type->formattype, &FORMAT_WaveFormatEx)) {
WAVEFORMATEX *fx = (void *) type->pbFormat;
dshowdebug(" wFormatTag: %u\n", fx->wFormatTag);
dshowdebug(" nChannels: %u\n", fx->nChannels);
dshowdebug(" nSamplesPerSec: %lu\n", fx->nSamplesPerSec);
dshowdebug(" nAvgBytesPerSec: %lu\n", fx->nAvgBytesPerSec);
dshowdebug(" nBlockAlign: %u\n", fx->nBlockAlign);
dshowdebug(" wBitsPerSample: %u\n", fx->wBitsPerSample);
dshowdebug(" cbSize: %u\n", fx->cbSize);
}
#endif
}
/contrib/sdk/sources/ffmpeg/libavdevice/dshow_enummediatypes.c
0,0 → 1,103
/*
* DirectShow capture interface
* Copyright (c) 2010 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "dshow_capture.h"
 
DECLARE_QUERYINTERFACE(libAVEnumMediaTypes,
{ {&IID_IUnknown,0}, {&IID_IEnumPins,0} })
DECLARE_ADDREF(libAVEnumMediaTypes)
DECLARE_RELEASE(libAVEnumMediaTypes)
 
long WINAPI
libAVEnumMediaTypes_Next(libAVEnumMediaTypes *this, unsigned long n,
AM_MEDIA_TYPE **types, unsigned long *fetched)
{
int count = 0;
dshowdebug("libAVEnumMediaTypes_Next(%p)\n", this);
if (!types)
return E_POINTER;
if (!this->pos && n == 1) {
if (!IsEqualGUID(&this->type.majortype, &GUID_NULL)) {
AM_MEDIA_TYPE *type = av_malloc(sizeof(AM_MEDIA_TYPE));
ff_copy_dshow_media_type(type, &this->type);
*types = type;
count = 1;
}
this->pos = 1;
}
if (fetched)
*fetched = count;
if (!count)
return S_FALSE;
return S_OK;
}
long WINAPI
libAVEnumMediaTypes_Skip(libAVEnumMediaTypes *this, unsigned long n)
{
dshowdebug("libAVEnumMediaTypes_Skip(%p)\n", this);
if (n) /* Any skip will always fall outside of the only valid type. */
return S_FALSE;
return S_OK;
}
long WINAPI
libAVEnumMediaTypes_Reset(libAVEnumMediaTypes *this)
{
dshowdebug("libAVEnumMediaTypes_Reset(%p)\n", this);
this->pos = 0;
return S_OK;
}
long WINAPI
libAVEnumMediaTypes_Clone(libAVEnumMediaTypes *this, libAVEnumMediaTypes **enums)
{
libAVEnumMediaTypes *new;
dshowdebug("libAVEnumMediaTypes_Clone(%p)\n", this);
if (!enums)
return E_POINTER;
new = libAVEnumMediaTypes_Create(&this->type);
if (!new)
return E_OUTOFMEMORY;
new->pos = this->pos;
*enums = new;
return S_OK;
}
 
static int
libAVEnumMediaTypes_Setup(libAVEnumMediaTypes *this, const AM_MEDIA_TYPE *type)
{
IEnumPinsVtbl *vtbl = this->vtbl;
SETVTBL(vtbl, libAVEnumMediaTypes, QueryInterface);
SETVTBL(vtbl, libAVEnumMediaTypes, AddRef);
SETVTBL(vtbl, libAVEnumMediaTypes, Release);
SETVTBL(vtbl, libAVEnumMediaTypes, Next);
SETVTBL(vtbl, libAVEnumMediaTypes, Skip);
SETVTBL(vtbl, libAVEnumMediaTypes, Reset);
SETVTBL(vtbl, libAVEnumMediaTypes, Clone);
 
if (!type) {
this->type.majortype = GUID_NULL;
} else {
ff_copy_dshow_media_type(&this->type, type);
}
 
return 1;
}
DECLARE_CREATE(libAVEnumMediaTypes, libAVEnumMediaTypes_Setup(this, type), const AM_MEDIA_TYPE *type)
DECLARE_DESTROY(libAVEnumMediaTypes, nothing)
/contrib/sdk/sources/ffmpeg/libavdevice/dshow_enumpins.c
0,0 → 1,105
/*
* DirectShow capture interface
* Copyright (c) 2010 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "dshow_capture.h"
 
DECLARE_QUERYINTERFACE(libAVEnumPins,
{ {&IID_IUnknown,0}, {&IID_IEnumPins,0} })
DECLARE_ADDREF(libAVEnumPins)
DECLARE_RELEASE(libAVEnumPins)
 
long WINAPI
libAVEnumPins_Next(libAVEnumPins *this, unsigned long n, IPin **pins,
unsigned long *fetched)
{
int count = 0;
dshowdebug("libAVEnumPins_Next(%p)\n", this);
if (!pins)
return E_POINTER;
if (!this->pos && n == 1) {
libAVPin_AddRef(this->pin);
*pins = (IPin *) this->pin;
count = 1;
this->pos = 1;
}
if (fetched)
*fetched = count;
if (!count)
return S_FALSE;
return S_OK;
}
long WINAPI
libAVEnumPins_Skip(libAVEnumPins *this, unsigned long n)
{
dshowdebug("libAVEnumPins_Skip(%p)\n", this);
if (n) /* Any skip will always fall outside of the only valid pin. */
return S_FALSE;
return S_OK;
}
long WINAPI
libAVEnumPins_Reset(libAVEnumPins *this)
{
dshowdebug("libAVEnumPins_Reset(%p)\n", this);
this->pos = 0;
return S_OK;
}
long WINAPI
libAVEnumPins_Clone(libAVEnumPins *this, libAVEnumPins **pins)
{
libAVEnumPins *new;
dshowdebug("libAVEnumPins_Clone(%p)\n", this);
if (!pins)
return E_POINTER;
new = libAVEnumPins_Create(this->pin, this->filter);
if (!new)
return E_OUTOFMEMORY;
new->pos = this->pos;
*pins = new;
return S_OK;
}
 
static int
libAVEnumPins_Setup(libAVEnumPins *this, libAVPin *pin, libAVFilter *filter)
{
IEnumPinsVtbl *vtbl = this->vtbl;
SETVTBL(vtbl, libAVEnumPins, QueryInterface);
SETVTBL(vtbl, libAVEnumPins, AddRef);
SETVTBL(vtbl, libAVEnumPins, Release);
SETVTBL(vtbl, libAVEnumPins, Next);
SETVTBL(vtbl, libAVEnumPins, Skip);
SETVTBL(vtbl, libAVEnumPins, Reset);
SETVTBL(vtbl, libAVEnumPins, Clone);
 
this->pin = pin;
this->filter = filter;
libAVFilter_AddRef(this->filter);
 
return 1;
}
static int
libAVEnumPins_Cleanup(libAVEnumPins *this)
{
libAVFilter_Release(this->filter);
return 1;
}
DECLARE_CREATE(libAVEnumPins, libAVEnumPins_Setup(this, pin, filter),
libAVPin *pin, libAVFilter *filter)
DECLARE_DESTROY(libAVEnumPins, libAVEnumPins_Cleanup)
/contrib/sdk/sources/ffmpeg/libavdevice/dshow_filter.c
0,0 → 1,202
/*
* DirectShow capture interface
* Copyright (c) 2010 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "dshow_capture.h"
 
DECLARE_QUERYINTERFACE(libAVFilter,
{ {&IID_IUnknown,0}, {&IID_IBaseFilter,0} })
DECLARE_ADDREF(libAVFilter)
DECLARE_RELEASE(libAVFilter)
 
long WINAPI
libAVFilter_GetClassID(libAVFilter *this, CLSID *id)
{
dshowdebug("libAVFilter_GetClassID(%p)\n", this);
/* I'm not creating a ClassID just for this. */
return E_FAIL;
}
long WINAPI
libAVFilter_Stop(libAVFilter *this)
{
dshowdebug("libAVFilter_Stop(%p)\n", this);
this->state = State_Stopped;
return S_OK;
}
long WINAPI
libAVFilter_Pause(libAVFilter *this)
{
dshowdebug("libAVFilter_Pause(%p)\n", this);
this->state = State_Paused;
return S_OK;
}
long WINAPI
libAVFilter_Run(libAVFilter *this, REFERENCE_TIME start)
{
dshowdebug("libAVFilter_Run(%p) %"PRId64"\n", this, start);
this->state = State_Running;
this->start_time = start;
return S_OK;
}
long WINAPI
libAVFilter_GetState(libAVFilter *this, DWORD ms, FILTER_STATE *state)
{
dshowdebug("libAVFilter_GetState(%p)\n", this);
if (!state)
return E_POINTER;
*state = this->state;
return S_OK;
}
long WINAPI
libAVFilter_SetSyncSource(libAVFilter *this, IReferenceClock *clock)
{
dshowdebug("libAVFilter_SetSyncSource(%p)\n", this);
 
if (this->clock != clock) {
if (this->clock)
IReferenceClock_Release(this->clock);
this->clock = clock;
if (clock)
IReferenceClock_AddRef(clock);
}
 
return S_OK;
}
long WINAPI
libAVFilter_GetSyncSource(libAVFilter *this, IReferenceClock **clock)
{
dshowdebug("libAVFilter_GetSyncSource(%p)\n", this);
 
if (!clock)
return E_POINTER;
if (this->clock)
IReferenceClock_AddRef(this->clock);
*clock = this->clock;
 
return S_OK;
}
long WINAPI
libAVFilter_EnumPins(libAVFilter *this, IEnumPins **enumpin)
{
libAVEnumPins *new;
dshowdebug("libAVFilter_EnumPins(%p)\n", this);
 
if (!enumpin)
return E_POINTER;
new = libAVEnumPins_Create(this->pin, this);
if (!new)
return E_OUTOFMEMORY;
 
*enumpin = (IEnumPins *) new;
return S_OK;
}
long WINAPI
libAVFilter_FindPin(libAVFilter *this, const wchar_t *id, IPin **pin)
{
libAVPin *found = NULL;
dshowdebug("libAVFilter_FindPin(%p)\n", this);
 
if (!id || !pin)
return E_POINTER;
if (!wcscmp(id, L"In")) {
found = this->pin;
libAVPin_AddRef(found);
}
*pin = (IPin *) found;
if (!found)
return VFW_E_NOT_FOUND;
 
return S_OK;
}
long WINAPI
libAVFilter_QueryFilterInfo(libAVFilter *this, FILTER_INFO *info)
{
dshowdebug("libAVFilter_QueryFilterInfo(%p)\n", this);
 
if (!info)
return E_POINTER;
if (this->info.pGraph)
IFilterGraph_AddRef(this->info.pGraph);
*info = this->info;
 
return S_OK;
}
long WINAPI
libAVFilter_JoinFilterGraph(libAVFilter *this, IFilterGraph *graph,
const wchar_t *name)
{
dshowdebug("libAVFilter_JoinFilterGraph(%p)\n", this);
 
this->info.pGraph = graph;
if (name)
wcscpy(this->info.achName, name);
 
return S_OK;
}
long WINAPI
libAVFilter_QueryVendorInfo(libAVFilter *this, wchar_t **info)
{
dshowdebug("libAVFilter_QueryVendorInfo(%p)\n", this);
 
if (!info)
return E_POINTER;
*info = wcsdup(L"libAV");
 
return S_OK;
}
 
static int
libAVFilter_Setup(libAVFilter *this, void *priv_data, void *callback,
enum dshowDeviceType type)
{
IBaseFilterVtbl *vtbl = this->vtbl;
SETVTBL(vtbl, libAVFilter, QueryInterface);
SETVTBL(vtbl, libAVFilter, AddRef);
SETVTBL(vtbl, libAVFilter, Release);
SETVTBL(vtbl, libAVFilter, GetClassID);
SETVTBL(vtbl, libAVFilter, Stop);
SETVTBL(vtbl, libAVFilter, Pause);
SETVTBL(vtbl, libAVFilter, Run);
SETVTBL(vtbl, libAVFilter, GetState);
SETVTBL(vtbl, libAVFilter, SetSyncSource);
SETVTBL(vtbl, libAVFilter, GetSyncSource);
SETVTBL(vtbl, libAVFilter, EnumPins);
SETVTBL(vtbl, libAVFilter, FindPin);
SETVTBL(vtbl, libAVFilter, QueryFilterInfo);
SETVTBL(vtbl, libAVFilter, JoinFilterGraph);
SETVTBL(vtbl, libAVFilter, QueryVendorInfo);
 
this->pin = libAVPin_Create(this);
 
this->priv_data = priv_data;
this->callback = callback;
this->type = type;
 
return 1;
}
static int
libAVFilter_Cleanup(libAVFilter *this)
{
libAVPin_Release(this->pin);
return 1;
}
DECLARE_CREATE(libAVFilter, libAVFilter_Setup(this, priv_data, callback, type),
void *priv_data, void *callback, enum dshowDeviceType type)
DECLARE_DESTROY(libAVFilter, libAVFilter_Cleanup)
/contrib/sdk/sources/ffmpeg/libavdevice/dshow_pin.c
0,0 → 1,362
/*
* DirectShow capture interface
* Copyright (c) 2010 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "dshow_capture.h"
 
#include <stddef.h>
#define imemoffset offsetof(libAVPin, imemvtbl)
 
DECLARE_QUERYINTERFACE(libAVPin,
{ {&IID_IUnknown,0}, {&IID_IPin,0}, {&IID_IMemInputPin,imemoffset} })
DECLARE_ADDREF(libAVPin)
DECLARE_RELEASE(libAVPin)
 
long WINAPI
libAVPin_Connect(libAVPin *this, IPin *pin, const AM_MEDIA_TYPE *type)
{
dshowdebug("libAVPin_Connect(%p, %p, %p)\n", this, pin, type);
/* Input pins receive connections. */
return S_FALSE;
}
long WINAPI
libAVPin_ReceiveConnection(libAVPin *this, IPin *pin,
const AM_MEDIA_TYPE *type)
{
enum dshowDeviceType devtype = this->filter->type;
dshowdebug("libAVPin_ReceiveConnection(%p)\n", this);
 
if (!pin)
return E_POINTER;
if (this->connectedto)
return VFW_E_ALREADY_CONNECTED;
 
ff_print_AM_MEDIA_TYPE(type);
if (devtype == VideoDevice) {
if (!IsEqualGUID(&type->majortype, &MEDIATYPE_Video))
return VFW_E_TYPE_NOT_ACCEPTED;
} else {
if (!IsEqualGUID(&type->majortype, &MEDIATYPE_Audio))
return VFW_E_TYPE_NOT_ACCEPTED;
}
 
IPin_AddRef(pin);
this->connectedto = pin;
 
ff_copy_dshow_media_type(&this->type, type);
 
return S_OK;
}
long WINAPI
libAVPin_Disconnect(libAVPin *this)
{
dshowdebug("libAVPin_Disconnect(%p)\n", this);
 
if (this->filter->state != State_Stopped)
return VFW_E_NOT_STOPPED;
if (!this->connectedto)
return S_FALSE;
IPin_Release(this->connectedto);
this->connectedto = NULL;
 
return S_OK;
}
long WINAPI
libAVPin_ConnectedTo(libAVPin *this, IPin **pin)
{
dshowdebug("libAVPin_ConnectedTo(%p)\n", this);
 
if (!pin)
return E_POINTER;
if (!this->connectedto)
return VFW_E_NOT_CONNECTED;
IPin_AddRef(this->connectedto);
*pin = this->connectedto;
 
return S_OK;
}
long WINAPI
libAVPin_ConnectionMediaType(libAVPin *this, AM_MEDIA_TYPE *type)
{
dshowdebug("libAVPin_ConnectionMediaType(%p)\n", this);
 
if (!type)
return E_POINTER;
if (!this->connectedto)
return VFW_E_NOT_CONNECTED;
 
return ff_copy_dshow_media_type(type, &this->type);
}
long WINAPI
libAVPin_QueryPinInfo(libAVPin *this, PIN_INFO *info)
{
dshowdebug("libAVPin_QueryPinInfo(%p)\n", this);
 
if (!info)
return E_POINTER;
 
if (this->filter)
libAVFilter_AddRef(this->filter);
 
info->pFilter = (IBaseFilter *) this->filter;
info->dir = PINDIR_INPUT;
wcscpy(info->achName, L"Capture");
 
return S_OK;
}
long WINAPI
libAVPin_QueryDirection(libAVPin *this, PIN_DIRECTION *dir)
{
dshowdebug("libAVPin_QueryDirection(%p)\n", this);
if (!dir)
return E_POINTER;
*dir = PINDIR_INPUT;
return S_OK;
}
long WINAPI
libAVPin_QueryId(libAVPin *this, wchar_t **id)
{
dshowdebug("libAVPin_QueryId(%p)\n", this);
 
if (!id)
return E_POINTER;
 
*id = wcsdup(L"libAV Pin");
 
return S_OK;
}
long WINAPI
libAVPin_QueryAccept(libAVPin *this, const AM_MEDIA_TYPE *type)
{
dshowdebug("libAVPin_QueryAccept(%p)\n", this);
return S_FALSE;
}
long WINAPI
libAVPin_EnumMediaTypes(libAVPin *this, IEnumMediaTypes **enumtypes)
{
const AM_MEDIA_TYPE *type = NULL;
libAVEnumMediaTypes *new;
dshowdebug("libAVPin_EnumMediaTypes(%p)\n", this);
 
if (!enumtypes)
return E_POINTER;
new = libAVEnumMediaTypes_Create(type);
if (!new)
return E_OUTOFMEMORY;
 
*enumtypes = (IEnumMediaTypes *) new;
return S_OK;
}
long WINAPI
libAVPin_QueryInternalConnections(libAVPin *this, IPin **pin,
unsigned long *npin)
{
dshowdebug("libAVPin_QueryInternalConnections(%p)\n", this);
return E_NOTIMPL;
}
long WINAPI
libAVPin_EndOfStream(libAVPin *this)
{
dshowdebug("libAVPin_EndOfStream(%p)\n", this);
/* I don't care. */
return S_OK;
}
long WINAPI
libAVPin_BeginFlush(libAVPin *this)
{
dshowdebug("libAVPin_BeginFlush(%p)\n", this);
/* I don't care. */
return S_OK;
}
long WINAPI
libAVPin_EndFlush(libAVPin *this)
{
dshowdebug("libAVPin_EndFlush(%p)\n", this);
/* I don't care. */
return S_OK;
}
long WINAPI
libAVPin_NewSegment(libAVPin *this, REFERENCE_TIME start, REFERENCE_TIME stop,
double rate)
{
dshowdebug("libAVPin_NewSegment(%p)\n", this);
/* I don't care. */
return S_OK;
}
 
static int
libAVPin_Setup(libAVPin *this, libAVFilter *filter)
{
IPinVtbl *vtbl = this->vtbl;
IMemInputPinVtbl *imemvtbl;
 
if (!filter)
return 0;
 
imemvtbl = av_malloc(sizeof(IMemInputPinVtbl));
if (!imemvtbl)
return 0;
 
SETVTBL(imemvtbl, libAVMemInputPin, QueryInterface);
SETVTBL(imemvtbl, libAVMemInputPin, AddRef);
SETVTBL(imemvtbl, libAVMemInputPin, Release);
SETVTBL(imemvtbl, libAVMemInputPin, GetAllocator);
SETVTBL(imemvtbl, libAVMemInputPin, NotifyAllocator);
SETVTBL(imemvtbl, libAVMemInputPin, GetAllocatorRequirements);
SETVTBL(imemvtbl, libAVMemInputPin, Receive);
SETVTBL(imemvtbl, libAVMemInputPin, ReceiveMultiple);
SETVTBL(imemvtbl, libAVMemInputPin, ReceiveCanBlock);
 
this->imemvtbl = imemvtbl;
 
SETVTBL(vtbl, libAVPin, QueryInterface);
SETVTBL(vtbl, libAVPin, AddRef);
SETVTBL(vtbl, libAVPin, Release);
SETVTBL(vtbl, libAVPin, Connect);
SETVTBL(vtbl, libAVPin, ReceiveConnection);
SETVTBL(vtbl, libAVPin, Disconnect);
SETVTBL(vtbl, libAVPin, ConnectedTo);
SETVTBL(vtbl, libAVPin, ConnectionMediaType);
SETVTBL(vtbl, libAVPin, QueryPinInfo);
SETVTBL(vtbl, libAVPin, QueryDirection);
SETVTBL(vtbl, libAVPin, QueryId);
SETVTBL(vtbl, libAVPin, QueryAccept);
SETVTBL(vtbl, libAVPin, EnumMediaTypes);
SETVTBL(vtbl, libAVPin, QueryInternalConnections);
SETVTBL(vtbl, libAVPin, EndOfStream);
SETVTBL(vtbl, libAVPin, BeginFlush);
SETVTBL(vtbl, libAVPin, EndFlush);
SETVTBL(vtbl, libAVPin, NewSegment);
 
this->filter = filter;
 
return 1;
}
DECLARE_CREATE(libAVPin, libAVPin_Setup(this, filter), libAVFilter *filter)
DECLARE_DESTROY(libAVPin, nothing)
 
/*****************************************************************************
* libAVMemInputPin
****************************************************************************/
long WINAPI
libAVMemInputPin_QueryInterface(libAVMemInputPin *this, const GUID *riid,
void **ppvObject)
{
libAVPin *pin = (libAVPin *) ((uint8_t *) this - imemoffset);
dshowdebug("libAVMemInputPin_QueryInterface(%p)\n", this);
return libAVPin_QueryInterface(pin, riid, ppvObject);
}
unsigned long WINAPI
libAVMemInputPin_AddRef(libAVMemInputPin *this)
{
libAVPin *pin = (libAVPin *) ((uint8_t *) this - imemoffset);
dshowdebug("libAVMemInputPin_AddRef(%p)\n", this);
return libAVPin_AddRef(pin);
}
unsigned long WINAPI
libAVMemInputPin_Release(libAVMemInputPin *this)
{
libAVPin *pin = (libAVPin *) ((uint8_t *) this - imemoffset);
dshowdebug("libAVMemInputPin_Release(%p)\n", this);
return libAVPin_Release(pin);
}
long WINAPI
libAVMemInputPin_GetAllocator(libAVMemInputPin *this, IMemAllocator **alloc)
{
dshowdebug("libAVMemInputPin_GetAllocator(%p)\n", this);
return VFW_E_NO_ALLOCATOR;
}
long WINAPI
libAVMemInputPin_NotifyAllocator(libAVMemInputPin *this, IMemAllocator *alloc,
BOOL rdwr)
{
dshowdebug("libAVMemInputPin_NotifyAllocator(%p)\n", this);
return S_OK;
}
long WINAPI
libAVMemInputPin_GetAllocatorRequirements(libAVMemInputPin *this,
ALLOCATOR_PROPERTIES *props)
{
dshowdebug("libAVMemInputPin_GetAllocatorRequirements(%p)\n", this);
return E_NOTIMPL;
}
long WINAPI
libAVMemInputPin_Receive(libAVMemInputPin *this, IMediaSample *sample)
{
libAVPin *pin = (libAVPin *) ((uint8_t *) this - imemoffset);
enum dshowDeviceType devtype = pin->filter->type;
void *priv_data;
uint8_t *buf;
int buf_size;
int index;
int64_t curtime;
 
dshowdebug("libAVMemInputPin_Receive(%p)\n", this);
 
if (!sample)
return E_POINTER;
 
if (devtype == VideoDevice) {
/* PTS from video devices is unreliable. */
IReferenceClock *clock = pin->filter->clock;
IReferenceClock_GetTime(clock, &curtime);
} else {
int64_t dummy;
IMediaSample_GetTime(sample, &curtime, &dummy);
curtime += pin->filter->start_time;
}
 
buf_size = IMediaSample_GetActualDataLength(sample);
IMediaSample_GetPointer(sample, &buf);
priv_data = pin->filter->priv_data;
index = pin->filter->stream_index;
 
pin->filter->callback(priv_data, index, buf, buf_size, curtime);
 
return S_OK;
}
long WINAPI
libAVMemInputPin_ReceiveMultiple(libAVMemInputPin *this,
IMediaSample **samples, long n, long *nproc)
{
int i;
dshowdebug("libAVMemInputPin_ReceiveMultiple(%p)\n", this);
 
for (i = 0; i < n; i++)
libAVMemInputPin_Receive(this, samples[i]);
 
*nproc = n;
return S_OK;
}
long WINAPI
libAVMemInputPin_ReceiveCanBlock(libAVMemInputPin *this)
{
dshowdebug("libAVMemInputPin_ReceiveCanBlock(%p)\n", this);
/* I swear I will not block. */
return S_FALSE;
}
 
void
libAVMemInputPin_Destroy(libAVMemInputPin *this)
{
libAVPin *pin = (libAVPin *) ((uint8_t *) this - imemoffset);
dshowdebug("libAVMemInputPin_Destroy(%p)\n", this);
libAVPin_Destroy(pin);
}
/contrib/sdk/sources/ffmpeg/libavdevice/dv1394.c
0,0 → 1,238
/*
* Linux DV1394 interface
* Copyright (c) 2003 Max Krasnyansky <maxk@qualcomm.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "config.h"
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#include <poll.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
 
#include "libavutil/internal.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "avdevice.h"
#include "libavformat/dv.h"
#include "dv1394.h"
 
struct dv1394_data {
AVClass *class;
int fd;
int channel;
int format;
 
uint8_t *ring; /* Ring buffer */
int index; /* Current frame index */
int avail; /* Number of frames available for reading */
int done; /* Number of completed frames */
 
DVDemuxContext* dv_demux; /* Generic DV muxing/demuxing context */
};
 
/*
* The trick here is to kludge around well known problem with kernel Ooopsing
* when you try to capture PAL on a device node configure for NTSC. That's
* why we have to configure the device node for PAL, and then read only NTSC
* amount of data.
*/
static int dv1394_reset(struct dv1394_data *dv)
{
struct dv1394_init init;
 
init.channel = dv->channel;
init.api_version = DV1394_API_VERSION;
init.n_frames = DV1394_RING_FRAMES;
init.format = DV1394_PAL;
 
if (ioctl(dv->fd, DV1394_INIT, &init) < 0)
return -1;
 
dv->avail = dv->done = 0;
return 0;
}
 
static int dv1394_start(struct dv1394_data *dv)
{
/* Tell DV1394 driver to enable receiver */
if (ioctl(dv->fd, DV1394_START_RECEIVE, 0) < 0) {
av_log(NULL, AV_LOG_ERROR, "Failed to start receiver: %s\n", strerror(errno));
return -1;
}
return 0;
}
 
static int dv1394_read_header(AVFormatContext * context)
{
struct dv1394_data *dv = context->priv_data;
 
dv->dv_demux = avpriv_dv_init_demux(context);
if (!dv->dv_demux)
goto failed;
 
/* Open and initialize DV1394 device */
dv->fd = avpriv_open(context->filename, O_RDONLY);
if (dv->fd < 0) {
av_log(context, AV_LOG_ERROR, "Failed to open DV interface: %s\n", strerror(errno));
goto failed;
}
 
if (dv1394_reset(dv) < 0) {
av_log(context, AV_LOG_ERROR, "Failed to initialize DV interface: %s\n", strerror(errno));
goto failed;
}
 
dv->ring = mmap(NULL, DV1394_PAL_FRAME_SIZE * DV1394_RING_FRAMES,
PROT_READ, MAP_PRIVATE, dv->fd, 0);
if (dv->ring == MAP_FAILED) {
av_log(context, AV_LOG_ERROR, "Failed to mmap DV ring buffer: %s\n", strerror(errno));
goto failed;
}
 
if (dv1394_start(dv) < 0)
goto failed;
 
return 0;
 
failed:
close(dv->fd);
return AVERROR(EIO);
}
 
static int dv1394_read_packet(AVFormatContext *context, AVPacket *pkt)
{
struct dv1394_data *dv = context->priv_data;
int size;
 
size = avpriv_dv_get_packet(dv->dv_demux, pkt);
if (size > 0)
return size;
 
if (!dv->avail) {
struct dv1394_status s;
struct pollfd p;
 
if (dv->done) {
/* Request more frames */
if (ioctl(dv->fd, DV1394_RECEIVE_FRAMES, dv->done) < 0) {
/* This usually means that ring buffer overflowed.
* We have to reset :(.
*/
 
av_log(context, AV_LOG_ERROR, "DV1394: Ring buffer overflow. Reseting ..\n");
 
dv1394_reset(dv);
dv1394_start(dv);
}
dv->done = 0;
}
 
/* Wait until more frames are available */
restart_poll:
p.fd = dv->fd;
p.events = POLLIN | POLLERR | POLLHUP;
if (poll(&p, 1, -1) < 0) {
if (errno == EAGAIN || errno == EINTR)
goto restart_poll;
av_log(context, AV_LOG_ERROR, "Poll failed: %s\n", strerror(errno));
return AVERROR(EIO);
}
 
if (ioctl(dv->fd, DV1394_GET_STATUS, &s) < 0) {
av_log(context, AV_LOG_ERROR, "Failed to get status: %s\n", strerror(errno));
return AVERROR(EIO);
}
av_dlog(context, "DV1394: status\n"
"\tactive_frame\t%d\n"
"\tfirst_clear_frame\t%d\n"
"\tn_clear_frames\t%d\n"
"\tdropped_frames\t%d\n",
s.active_frame, s.first_clear_frame,
s.n_clear_frames, s.dropped_frames);
 
dv->avail = s.n_clear_frames;
dv->index = s.first_clear_frame;
dv->done = 0;
 
if (s.dropped_frames) {
av_log(context, AV_LOG_ERROR, "DV1394: Frame drop detected (%d). Reseting ..\n",
s.dropped_frames);
 
dv1394_reset(dv);
dv1394_start(dv);
}
}
 
av_dlog(context, "index %d, avail %d, done %d\n", dv->index, dv->avail,
dv->done);
 
size = avpriv_dv_produce_packet(dv->dv_demux, pkt,
dv->ring + (dv->index * DV1394_PAL_FRAME_SIZE),
DV1394_PAL_FRAME_SIZE, -1);
dv->index = (dv->index + 1) % DV1394_RING_FRAMES;
dv->done++; dv->avail--;
 
return size;
}
 
static int dv1394_close(AVFormatContext * context)
{
struct dv1394_data *dv = context->priv_data;
 
/* Shutdown DV1394 receiver */
if (ioctl(dv->fd, DV1394_SHUTDOWN, 0) < 0)
av_log(context, AV_LOG_ERROR, "Failed to shutdown DV1394: %s\n", strerror(errno));
 
/* Unmap ring buffer */
if (munmap(dv->ring, DV1394_NTSC_FRAME_SIZE * DV1394_RING_FRAMES) < 0)
av_log(context, AV_LOG_ERROR, "Failed to munmap DV1394 ring buffer: %s\n", strerror(errno));
 
close(dv->fd);
av_free(dv->dv_demux);
 
return 0;
}
 
static const AVOption options[] = {
{ "standard", "", offsetof(struct dv1394_data, format), AV_OPT_TYPE_INT, {.i64 = DV1394_NTSC}, DV1394_NTSC, DV1394_PAL, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "PAL", "", 0, AV_OPT_TYPE_CONST, {.i64 = DV1394_PAL}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "NTSC", "", 0, AV_OPT_TYPE_CONST, {.i64 = DV1394_NTSC}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "channel", "", offsetof(struct dv1394_data, channel), AV_OPT_TYPE_INT, {.i64 = DV1394_DEFAULT_CHANNEL}, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
 
static const AVClass dv1394_class = {
.class_name = "DV1394 indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVInputFormat ff_dv1394_demuxer = {
.name = "dv1394",
.long_name = NULL_IF_CONFIG_SMALL("DV1394 A/V grab"),
.priv_data_size = sizeof(struct dv1394_data),
.read_header = dv1394_read_header,
.read_packet = dv1394_read_packet,
.read_close = dv1394_close,
.flags = AVFMT_NOFILE,
.priv_class = &dv1394_class,
};
/contrib/sdk/sources/ffmpeg/libavdevice/dv1394.h
0,0 → 1,357
/*
* DV input/output over IEEE 1394 on OHCI chips
* Copyright (C)2001 Daniel Maas <dmaas@dcine.com>
* receive, proc_fs by Dan Dennedy <dan@dennedy.org>
*
* based on:
* video1394.h - driver for OHCI 1394 boards
* Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
* Peter Schlaile <udbz@rz.uni-karlsruhe.de>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVDEVICE_DV1394_H
#define AVDEVICE_DV1394_H
 
#define DV1394_DEFAULT_CHANNEL 63
#define DV1394_DEFAULT_CARD 0
#define DV1394_RING_FRAMES 20
 
#define DV1394_WIDTH 720
#define DV1394_NTSC_HEIGHT 480
#define DV1394_PAL_HEIGHT 576
 
/* This is the public user-space interface. Try not to break it. */
 
#define DV1394_API_VERSION 0x20011127
 
/* ********************
** **
** DV1394 API **
** **
********************
 
There are two methods of operating the DV1394 DV output device.
 
1)
 
The simplest is an interface based on write(): simply write
full DV frames of data to the device, and they will be transmitted
as quickly as possible. The FD may be set for non-blocking I/O,
in which case you can use select() or poll() to wait for output
buffer space.
 
To set the DV output parameters (e.g. whether you want NTSC or PAL
video), use the DV1394_INIT ioctl, passing in the parameters you
want in a struct dv1394_init.
 
Example 1:
To play a raw .DV file: cat foo.DV > /dev/dv1394
(cat will use write() internally)
 
Example 2:
static struct dv1394_init init = {
0x63, (broadcast channel)
4, (four-frame ringbuffer)
DV1394_NTSC, (send NTSC video)
0, 0 (default empty packet rate)
}
 
ioctl(fd, DV1394_INIT, &init);
 
while(1) {
read( <a raw DV file>, buf, DV1394_NTSC_FRAME_SIZE );
write( <the dv1394 FD>, buf, DV1394_NTSC_FRAME_SIZE );
}
 
2)
 
For more control over buffering, and to avoid unnecessary copies
of the DV data, you can use the more sophisticated the mmap() interface.
First, call the DV1394_INIT ioctl to specify your parameters,
including the number of frames in the ringbuffer. Then, calling mmap()
on the dv1394 device will give you direct access to the ringbuffer
from which the DV card reads your frame data.
 
The ringbuffer is simply one large, contiguous region of memory
containing two or more frames of packed DV data. Each frame of DV data
is 120000 bytes (NTSC) or 144000 bytes (PAL).
 
Fill one or more frames in the ringbuffer, then use the DV1394_SUBMIT_FRAMES
ioctl to begin I/O. You can use either the DV1394_WAIT_FRAMES ioctl
or select()/poll() to wait until the frames are transmitted. Next, you'll
need to call the DV1394_GET_STATUS ioctl to determine which ringbuffer
frames are clear (ready to be filled with new DV data). Finally, use
DV1394_SUBMIT_FRAMES again to send the new data to the DV output.
 
 
Example: here is what a four-frame ringbuffer might look like
during DV transmission:
 
 
frame 0 frame 1 frame 2 frame 3
 
*--------------------------------------*
| CLEAR | DV data | DV data | CLEAR |
*--------------------------------------*
<ACTIVE>
 
transmission goes in this direction --->>>
 
 
The DV hardware is currently transmitting the data in frame 1.
Once frame 1 is finished, it will automatically transmit frame 2.
(if frame 2 finishes before frame 3 is submitted, the device
will continue to transmit frame 2, and will increase the dropped_frames
counter each time it repeats the transmission).
 
 
If you called DV1394_GET_STATUS at this instant, you would
receive the following values:
 
n_frames = 4
active_frame = 1
first_clear_frame = 3
n_clear_frames = 2
 
At this point, you should write new DV data into frame 3 and optionally
frame 0. Then call DV1394_SUBMIT_FRAMES to inform the device that
it may transmit the new frames.
 
ERROR HANDLING
 
An error (buffer underflow/overflow or a break in the DV stream due
to a 1394 bus reset) can be detected by checking the dropped_frames
field of struct dv1394_status (obtained through the
DV1394_GET_STATUS ioctl).
 
The best way to recover from such an error is to re-initialize
dv1394, either by using the DV1394_INIT ioctl call, or closing the
file descriptor and opening it again. (note that you must unmap all
ringbuffer mappings when closing the file descriptor, or else
dv1394 will still be considered 'in use').
 
MAIN LOOP
 
For maximum efficiency and robustness against bus errors, you are
advised to model the main loop of your application after the
following pseudo-code example:
 
(checks of system call return values omitted for brevity; always
check return values in your code!)
 
while( frames left ) {
 
struct pollfd *pfd = ...;
 
pfd->fd = dv1394_fd;
pfd->revents = 0;
pfd->events = POLLOUT | POLLIN; (OUT for transmit, IN for receive)
 
(add other sources of I/O here)
 
poll(pfd, 1, -1); (or select(); add a timeout if you want)
 
if(pfd->revents) {
struct dv1394_status status;
 
ioctl(dv1394_fd, DV1394_GET_STATUS, &status);
 
if(status.dropped_frames > 0) {
reset_dv1394();
} else {
int i;
for (i = 0; i < status.n_clear_frames; i++) {
copy_DV_frame();
}
}
}
}
 
where copy_DV_frame() reads or writes on the dv1394 file descriptor
(read/write mode) or copies data to/from the mmap ringbuffer and
then calls ioctl(DV1394_SUBMIT_FRAMES) to notify dv1394 that new
frames are available (mmap mode).
 
reset_dv1394() is called in the event of a buffer
underflow/overflow or a halt in the DV stream (e.g. due to a 1394
bus reset). To guarantee recovery from the error, this function
should close the dv1394 file descriptor (and munmap() all
ringbuffer mappings, if you are using them), then re-open the
dv1394 device (and re-map the ringbuffer).
 
*/
 
 
/* maximum number of frames in the ringbuffer */
#define DV1394_MAX_FRAMES 32
 
/* number of *full* isochronous packets per DV frame */
#define DV1394_NTSC_PACKETS_PER_FRAME 250
#define DV1394_PAL_PACKETS_PER_FRAME 300
 
/* size of one frame's worth of DV data, in bytes */
#define DV1394_NTSC_FRAME_SIZE (480 * DV1394_NTSC_PACKETS_PER_FRAME)
#define DV1394_PAL_FRAME_SIZE (480 * DV1394_PAL_PACKETS_PER_FRAME)
 
 
/* ioctl() commands */
 
enum {
/* I don't like using 0 as a valid ioctl() */
DV1394_INVALID = 0,
 
 
/* get the driver ready to transmit video.
pass a struct dv1394_init* as the parameter (see below),
or NULL to get default parameters */
DV1394_INIT,
 
 
/* stop transmitting video and free the ringbuffer */
DV1394_SHUTDOWN,
 
 
/* submit N new frames to be transmitted, where
the index of the first new frame is first_clear_buffer,
and the index of the last new frame is
(first_clear_buffer + N) % n_frames */
DV1394_SUBMIT_FRAMES,
 
 
/* block until N buffers are clear (pass N as the parameter)
Because we re-transmit the last frame on underrun, there
will at most be n_frames - 1 clear frames at any time */
DV1394_WAIT_FRAMES,
 
/* capture new frames that have been received, where
the index of the first new frame is first_clear_buffer,
and the index of the last new frame is
(first_clear_buffer + N) % n_frames */
DV1394_RECEIVE_FRAMES,
 
 
DV1394_START_RECEIVE,
 
 
/* pass a struct dv1394_status* as the parameter (see below) */
DV1394_GET_STATUS,
};
 
 
 
enum pal_or_ntsc {
DV1394_NTSC = 0,
DV1394_PAL
};
 
 
 
 
/* this is the argument to DV1394_INIT */
struct dv1394_init {
/* DV1394_API_VERSION */
unsigned int api_version;
 
/* isochronous transmission channel to use */
unsigned int channel;
 
/* number of frames in the ringbuffer. Must be at least 2
and at most DV1394_MAX_FRAMES. */
unsigned int n_frames;
 
/* send/receive PAL or NTSC video format */
enum pal_or_ntsc format;
 
/* the following are used only for transmission */
 
/* set these to zero unless you want a
non-default empty packet rate (see below) */
unsigned long cip_n;
unsigned long cip_d;
 
/* set this to zero unless you want a
non-default SYT cycle offset (default = 3 cycles) */
unsigned int syt_offset;
};
 
/* NOTE: you may only allocate the DV frame ringbuffer once each time
you open the dv1394 device. DV1394_INIT will fail if you call it a
second time with different 'n_frames' or 'format' arguments (which
would imply a different size for the ringbuffer). If you need a
different buffer size, simply close and re-open the device, then
initialize it with your new settings. */
 
/* Q: What are cip_n and cip_d? */
 
/*
A: DV video streams do not utilize 100% of the potential bandwidth offered
by IEEE 1394 (FireWire). To achieve the correct rate of data transmission,
DV devices must periodically insert empty packets into the 1394 data stream.
Typically there is one empty packet per 14-16 data-carrying packets.
 
Some DV devices will accept a wide range of empty packet rates, while others
require a precise rate. If the dv1394 driver produces empty packets at
a rate that your device does not accept, you may see ugly patterns on the
DV output, or even no output at all.
 
The default empty packet insertion rate seems to work for many people; if
your DV output is stable, you can simply ignore this discussion. However,
we have exposed the empty packet rate as a parameter to support devices that
do not work with the default rate.
 
The decision to insert an empty packet is made with a numerator/denominator
algorithm. Empty packets are produced at an average rate of CIP_N / CIP_D.
You can alter the empty packet rate by passing non-zero values for cip_n
and cip_d to the INIT ioctl.
 
*/
 
 
 
struct dv1394_status {
/* this embedded init struct returns the current dv1394
parameters in use */
struct dv1394_init init;
 
/* the ringbuffer frame that is currently being
displayed. (-1 if the device is not transmitting anything) */
int active_frame;
 
/* index of the first buffer (ahead of active_frame) that
is ready to be filled with data */
unsigned int first_clear_frame;
 
/* how many buffers, including first_clear_buffer, are
ready to be filled with data */
unsigned int n_clear_frames;
 
/* how many times the DV stream has underflowed, overflowed,
or otherwise encountered an error, since the previous call
to DV1394_GET_STATUS */
unsigned int dropped_frames;
 
/* N.B. The dropped_frames counter is only a lower bound on the actual
number of dropped frames, with the special case that if dropped_frames
is zero, then it is guaranteed that NO frames have been dropped
since the last call to DV1394_GET_STATUS.
*/
};
 
 
#endif /* AVDEVICE_DV1394_H */
/contrib/sdk/sources/ffmpeg/libavdevice/fbdev_common.c
0,0 → 1,57
/*
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2009 Giliard B. de Freitas <giliarde@gmail.com>
* Copyright (C) 2002 Gunnar Monell <gmo@linux.nu>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "fbdev_common.h"
#include "libavutil/common.h"
 
struct rgb_pixfmt_map_entry {
int bits_per_pixel;
int red_offset, green_offset, blue_offset, alpha_offset;
enum AVPixelFormat pixfmt;
};
 
static const struct rgb_pixfmt_map_entry rgb_pixfmt_map[] = {
// bpp, red_offset, green_offset, blue_offset, alpha_offset, pixfmt
{ 32, 0, 8, 16, 24, AV_PIX_FMT_RGBA },
{ 32, 16, 8, 0, 24, AV_PIX_FMT_BGRA },
{ 32, 8, 16, 24, 0, AV_PIX_FMT_ARGB },
{ 32, 3, 2, 8, 0, AV_PIX_FMT_ABGR },
{ 24, 0, 8, 16, 0, AV_PIX_FMT_RGB24 },
{ 24, 16, 8, 0, 0, AV_PIX_FMT_BGR24 },
{ 16, 11, 5, 0, 16, AV_PIX_FMT_RGB565 },
};
 
enum AVPixelFormat ff_get_pixfmt_from_fb_varinfo(struct fb_var_screeninfo *varinfo)
{
int i;
 
for (i = 0; i < FF_ARRAY_ELEMS(rgb_pixfmt_map); i++) {
const struct rgb_pixfmt_map_entry *entry = &rgb_pixfmt_map[i];
if (entry->bits_per_pixel == varinfo->bits_per_pixel &&
entry->red_offset == varinfo->red.offset &&
entry->green_offset == varinfo->green.offset &&
entry->blue_offset == varinfo->blue.offset)
return entry->pixfmt;
}
 
return AV_PIX_FMT_NONE;
}
/contrib/sdk/sources/ffmpeg/libavdevice/fbdev_common.h
0,0 → 1,32
/*
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2009 Giliard B. de Freitas <giliarde@gmail.com>
* Copyright (C) 2002 Gunnar Monell <gmo@linux.nu>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVDEVICE_FBDEV_COMMON_H
#define AVDEVICE_FBDEV_COMMON_H
 
#include <features.h>
#include <linux/fb.h>
#include "libavutil/pixfmt.h"
 
enum AVPixelFormat ff_get_pixfmt_from_fb_varinfo(struct fb_var_screeninfo *varinfo);
 
#endif /* AVDEVICE_FBDEV_COMMON_H */
/contrib/sdk/sources/ffmpeg/libavdevice/fbdev_dec.c
0,0 → 1,231
/*
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2009 Giliard B. de Freitas <giliarde@gmail.com>
* Copyright (C) 2002 Gunnar Monell <gmo@linux.nu>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Linux framebuffer input device,
* inspired by code from fbgrab.c by Gunnar Monell.
* @see http://linux-fbdev.sourceforge.net/
*/
 
#include <unistd.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <time.h>
#include <linux/fb.h>
 
#include "libavutil/internal.h"
#include "libavutil/log.h"
#include "libavutil/mem.h"
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
#include "libavformat/internal.h"
#include "avdevice.h"
#include "fbdev_common.h"
 
typedef struct {
AVClass *class; ///< class for private options
int frame_size; ///< size in bytes of a grabbed frame
AVRational framerate_q; ///< framerate
int64_t time_frame; ///< time for the next frame to output (in 1/1000000 units)
 
int fd; ///< framebuffer device file descriptor
int width, height; ///< assumed frame resolution
int frame_linesize; ///< linesize of the output frame, it is assumed to be constant
int bytes_per_pixel;
 
struct fb_var_screeninfo varinfo; ///< variable info;
struct fb_fix_screeninfo fixinfo; ///< fixed info;
 
uint8_t *data; ///< framebuffer data
} FBDevContext;
 
static av_cold int fbdev_read_header(AVFormatContext *avctx)
{
FBDevContext *fbdev = avctx->priv_data;
AVStream *st = NULL;
enum AVPixelFormat pix_fmt;
int ret, flags = O_RDONLY;
 
if (!(st = avformat_new_stream(avctx, NULL)))
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in microseconds */
 
/* NONBLOCK is ignored by the fbdev driver, only set for consistency */
if (avctx->flags & AVFMT_FLAG_NONBLOCK)
flags |= O_NONBLOCK;
 
if ((fbdev->fd = avpriv_open(avctx->filename, flags)) == -1) {
ret = AVERROR(errno);
av_log(avctx, AV_LOG_ERROR,
"Could not open framebuffer device '%s': %s\n",
avctx->filename, av_err2str(ret));
return ret;
}
 
if (ioctl(fbdev->fd, FBIOGET_VSCREENINFO, &fbdev->varinfo) < 0) {
ret = AVERROR(errno);
av_log(avctx, AV_LOG_ERROR,
"FBIOGET_VSCREENINFO: %s\n", av_err2str(ret));
goto fail;
}
 
if (ioctl(fbdev->fd, FBIOGET_FSCREENINFO, &fbdev->fixinfo) < 0) {
ret = AVERROR(errno);
av_log(avctx, AV_LOG_ERROR,
"FBIOGET_FSCREENINFO: %s\n", av_err2str(ret));
goto fail;
}
 
pix_fmt = ff_get_pixfmt_from_fb_varinfo(&fbdev->varinfo);
if (pix_fmt == AV_PIX_FMT_NONE) {
ret = AVERROR(EINVAL);
av_log(avctx, AV_LOG_ERROR,
"Framebuffer pixel format not supported.\n");
goto fail;
}
 
fbdev->width = fbdev->varinfo.xres;
fbdev->height = fbdev->varinfo.yres;
fbdev->bytes_per_pixel = (fbdev->varinfo.bits_per_pixel + 7) >> 3;
fbdev->frame_linesize = fbdev->width * fbdev->bytes_per_pixel;
fbdev->frame_size = fbdev->frame_linesize * fbdev->height;
fbdev->time_frame = AV_NOPTS_VALUE;
fbdev->data = mmap(NULL, fbdev->fixinfo.smem_len, PROT_READ, MAP_SHARED, fbdev->fd, 0);
if (fbdev->data == MAP_FAILED) {
ret = AVERROR(errno);
av_log(avctx, AV_LOG_ERROR, "Error in mmap(): %s\n", av_err2str(ret));
goto fail;
}
 
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
st->codec->width = fbdev->width;
st->codec->height = fbdev->height;
st->codec->pix_fmt = pix_fmt;
st->codec->time_base = av_inv_q(fbdev->framerate_q);
st->codec->bit_rate =
fbdev->width * fbdev->height * fbdev->bytes_per_pixel * av_q2d(fbdev->framerate_q) * 8;
 
av_log(avctx, AV_LOG_INFO,
"w:%d h:%d bpp:%d pixfmt:%s fps:%d/%d bit_rate:%d\n",
fbdev->width, fbdev->height, fbdev->varinfo.bits_per_pixel,
av_get_pix_fmt_name(pix_fmt),
fbdev->framerate_q.num, fbdev->framerate_q.den,
st->codec->bit_rate);
return 0;
 
fail:
close(fbdev->fd);
return ret;
}
 
static int fbdev_read_packet(AVFormatContext *avctx, AVPacket *pkt)
{
FBDevContext *fbdev = avctx->priv_data;
int64_t curtime, delay;
struct timespec ts;
int i, ret;
uint8_t *pin, *pout;
 
if (fbdev->time_frame == AV_NOPTS_VALUE)
fbdev->time_frame = av_gettime();
 
/* wait based on the frame rate */
while (1) {
curtime = av_gettime();
delay = fbdev->time_frame - curtime;
av_dlog(avctx,
"time_frame:%"PRId64" curtime:%"PRId64" delay:%"PRId64"\n",
fbdev->time_frame, curtime, delay);
if (delay <= 0) {
fbdev->time_frame += INT64_C(1000000) / av_q2d(fbdev->framerate_q);
break;
}
if (avctx->flags & AVFMT_FLAG_NONBLOCK)
return AVERROR(EAGAIN);
ts.tv_sec = delay / 1000000;
ts.tv_nsec = (delay % 1000000) * 1000;
while (nanosleep(&ts, &ts) < 0 && errno == EINTR);
}
 
if ((ret = av_new_packet(pkt, fbdev->frame_size)) < 0)
return ret;
 
/* refresh fbdev->varinfo, visible data position may change at each call */
if (ioctl(fbdev->fd, FBIOGET_VSCREENINFO, &fbdev->varinfo) < 0)
av_log(avctx, AV_LOG_WARNING,
"Error refreshing variable info: %s\n", av_err2str(ret));
 
pkt->pts = curtime;
 
/* compute visible data offset */
pin = fbdev->data + fbdev->bytes_per_pixel * fbdev->varinfo.xoffset +
fbdev->varinfo.yoffset * fbdev->fixinfo.line_length;
pout = pkt->data;
 
for (i = 0; i < fbdev->height; i++) {
memcpy(pout, pin, fbdev->frame_linesize);
pin += fbdev->fixinfo.line_length;
pout += fbdev->frame_linesize;
}
 
return fbdev->frame_size;
}
 
static av_cold int fbdev_read_close(AVFormatContext *avctx)
{
FBDevContext *fbdev = avctx->priv_data;
 
munmap(fbdev->data, fbdev->fixinfo.smem_len);
close(fbdev->fd);
 
return 0;
}
 
#define OFFSET(x) offsetof(FBDevContext, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "framerate","", OFFSET(framerate_q), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, DEC },
{ NULL },
};
 
static const AVClass fbdev_class = {
.class_name = "fbdev indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVInputFormat ff_fbdev_demuxer = {
.name = "fbdev",
.long_name = NULL_IF_CONFIG_SMALL("Linux framebuffer"),
.priv_data_size = sizeof(FBDevContext),
.read_header = fbdev_read_header,
.read_packet = fbdev_read_packet,
.read_close = fbdev_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &fbdev_class,
};
/contrib/sdk/sources/ffmpeg/libavdevice/fbdev_enc.c
0,0 → 1,206
/*
* Copyright (c) 2013 Lukasz Marek
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <unistd.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <linux/fb.h>
#include "libavutil/pixdesc.h"
#include "libavutil/log.h"
#include "libavutil/mem.h"
#include "libavutil/opt.h"
#include "libavformat/avformat.h"
#include "fbdev_common.h"
 
typedef struct {
AVClass *class; ///< class for private options
int xoffset; ///< x coordinate of top left corner
int yoffset; ///< y coordinate of top left corner
struct fb_var_screeninfo varinfo; ///< framebuffer variable info
struct fb_fix_screeninfo fixinfo; ///< framebuffer fixed info
int fd; ///< framebuffer device file descriptor
uint8_t *data; ///< framebuffer data
} FBDevContext;
 
static av_cold int fbdev_write_header(AVFormatContext *h)
{
FBDevContext *fbdev = h->priv_data;
enum AVPixelFormat pix_fmt;
int ret, flags = O_RDWR;
 
if (h->nb_streams != 1 || h->streams[0]->codec->codec_type != AVMEDIA_TYPE_VIDEO) {
av_log(fbdev, AV_LOG_ERROR, "Only a single video stream is supported.\n");
return AVERROR(EINVAL);
}
 
if ((fbdev->fd = avpriv_open(h->filename, flags)) == -1) {
ret = AVERROR(errno);
av_log(h, AV_LOG_ERROR,
"Could not open framebuffer device '%s': %s\n",
h->filename, av_err2str(ret));
return ret;
}
 
if (ioctl(fbdev->fd, FBIOGET_VSCREENINFO, &fbdev->varinfo) < 0) {
ret = AVERROR(errno);
av_log(h, AV_LOG_ERROR, "FBIOGET_VSCREENINFO: %s\n", av_err2str(ret));
goto fail;
}
 
if (ioctl(fbdev->fd, FBIOGET_FSCREENINFO, &fbdev->fixinfo) < 0) {
ret = AVERROR(errno);
av_log(h, AV_LOG_ERROR, "FBIOGET_FSCREENINFO: %s\n", av_err2str(ret));
goto fail;
}
 
pix_fmt = ff_get_pixfmt_from_fb_varinfo(&fbdev->varinfo);
if (pix_fmt == AV_PIX_FMT_NONE) {
ret = AVERROR(EINVAL);
av_log(h, AV_LOG_ERROR, "Framebuffer pixel format not supported.\n");
goto fail;
}
 
fbdev->data = mmap(NULL, fbdev->fixinfo.smem_len, PROT_WRITE, MAP_SHARED, fbdev->fd, 0);
if (fbdev->data == MAP_FAILED) {
ret = AVERROR(errno);
av_log(h, AV_LOG_ERROR, "Error in mmap(): %s\n", av_err2str(ret));
goto fail;
}
 
return 0;
fail:
close(fbdev->fd);
return ret;
}
 
static int fbdev_write_packet(AVFormatContext *h, AVPacket *pkt)
{
FBDevContext *fbdev = h->priv_data;
uint8_t *pin, *pout;
enum AVPixelFormat fb_pix_fmt;
int disp_height;
int bytes_to_copy;
AVCodecContext *codec_ctx = h->streams[0]->codec;
enum AVPixelFormat video_pix_fmt = codec_ctx->pix_fmt;
int video_width = codec_ctx->width;
int video_height = codec_ctx->height;
int bytes_per_pixel = ((codec_ctx->bits_per_coded_sample + 7) >> 3);
int src_line_size = video_width * bytes_per_pixel;
int i;
 
if (ioctl(fbdev->fd, FBIOGET_VSCREENINFO, &fbdev->varinfo) < 0)
av_log(h, AV_LOG_WARNING,
"Error refreshing variable info: %s\n", av_err2str(AVERROR(errno)));
 
fb_pix_fmt = ff_get_pixfmt_from_fb_varinfo(&fbdev->varinfo);
 
if (fb_pix_fmt != video_pix_fmt) {
av_log(h, AV_LOG_ERROR, "Pixel format %s is not supported, use %s\n",
av_get_pix_fmt_name(video_pix_fmt), av_get_pix_fmt_name(fb_pix_fmt));
return AVERROR(EINVAL);
}
 
disp_height = FFMIN(fbdev->varinfo.yres, video_height);
bytes_to_copy = FFMIN(fbdev->varinfo.xres, video_width) * bytes_per_pixel;
 
pin = pkt->data;
pout = fbdev->data +
bytes_per_pixel * fbdev->varinfo.xoffset +
fbdev->varinfo.yoffset * fbdev->fixinfo.line_length;
 
if (fbdev->xoffset) {
if (fbdev->xoffset < 0) {
if (-fbdev->xoffset >= video_width) //nothing to display
return 0;
bytes_to_copy += fbdev->xoffset * bytes_per_pixel;
pin -= fbdev->xoffset * bytes_per_pixel;
} else {
int diff = (video_width + fbdev->xoffset) - fbdev->varinfo.xres;
if (diff > 0) {
if (diff >= video_width) //nothing to display
return 0;
bytes_to_copy -= diff * bytes_per_pixel;
}
pout += bytes_per_pixel * fbdev->xoffset;
}
}
 
if (fbdev->yoffset) {
if (fbdev->yoffset < 0) {
if (-fbdev->yoffset >= video_height) //nothing to display
return 0;
disp_height += fbdev->yoffset;
pin -= fbdev->yoffset * src_line_size;
} else {
int diff = (video_height + fbdev->yoffset) - fbdev->varinfo.yres;
if (diff > 0) {
if (diff >= video_height) //nothing to display
return 0;
disp_height -= diff;
}
pout += fbdev->yoffset * fbdev->fixinfo.line_length;
}
}
 
for (i = 0; i < disp_height; i++) {
memcpy(pout, pin, bytes_to_copy);
pout += fbdev->fixinfo.line_length;
pin += src_line_size;
}
 
return 0;
}
 
static av_cold int fbdev_write_trailer(AVFormatContext *h)
{
FBDevContext *fbdev = h->priv_data;
munmap(fbdev->data, fbdev->fixinfo.smem_len);
close(fbdev->fd);
return 0;
}
 
#define OFFSET(x) offsetof(FBDevContext, x)
#define ENC AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
{ "xoffset", "set x coordinate of top left corner", OFFSET(xoffset), AV_OPT_TYPE_INT, {.i64 = 0}, INT_MIN, INT_MAX, ENC },
{ "yoffset", "set y coordinate of top left corner", OFFSET(yoffset), AV_OPT_TYPE_INT, {.i64 = 0}, INT_MIN, INT_MAX, ENC },
{ NULL }
};
 
static const AVClass fbdev_class = {
.class_name = "fbdev outdev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVOutputFormat ff_fbdev_muxer = {
.name = "fbdev",
.long_name = NULL_IF_CONFIG_SMALL("Linux framebuffer"),
.priv_data_size = sizeof(FBDevContext),
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_RAWVIDEO,
.write_header = fbdev_write_header,
.write_packet = fbdev_write_packet,
.write_trailer = fbdev_write_trailer,
.flags = AVFMT_NOFILE | AVFMT_VARIABLE_FPS | AVFMT_NOTIMESTAMPS,
.priv_class = &fbdev_class,
};
/contrib/sdk/sources/ffmpeg/libavdevice/file_open.c
0,0 → 1,0
#include "libavutil/file_open.c"
/contrib/sdk/sources/ffmpeg/libavdevice/iec61883.c
0,0 → 1,497
/*
* Copyright (c) 2012 Georg Lippitsch <georg.lippitsch@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* libiec61883 interface
*/
 
#include <sys/poll.h>
#include <libraw1394/raw1394.h>
#include <libavc1394/avc1394.h>
#include <libavc1394/rom1394.h>
#include <libiec61883/iec61883.h>
#include "libavformat/dv.h"
#include "libavformat/mpegts.h"
#include "libavutil/opt.h"
#include "avdevice.h"
 
#define THREADS HAVE_PTHREADS
 
#if THREADS
#include <pthread.h>
#endif
 
#define MOTDCT_SPEC_ID 0x00005068
#define IEC61883_AUTO 0
#define IEC61883_DV 1
#define IEC61883_HDV 2
 
/**
* For DV, one packet corresponds exactly to one frame.
* For HDV, these are MPEG2 transport stream packets.
* The queue is implemented as linked list.
*/
typedef struct DVPacket {
uint8_t *buf; ///< actual buffer data
int len; ///< size of buffer allocated
struct DVPacket *next; ///< next DVPacket
} DVPacket;
 
struct iec61883_data {
AVClass *class;
raw1394handle_t raw1394; ///< handle for libraw1394
iec61883_dv_fb_t iec61883_dv; ///< handle for libiec61883 when used with DV
iec61883_mpeg2_t iec61883_mpeg2; ///< handle for libiec61883 when used with HDV
 
DVDemuxContext *dv_demux; ///< generic DV muxing/demuxing context
MpegTSContext *mpeg_demux; ///< generic HDV muxing/demuxing context
 
DVPacket *queue_first; ///< first element of packet queue
DVPacket *queue_last; ///< last element of packet queue
 
char *device_guid; ///< to select one of multiple DV devices
 
int packets; ///< Number of packets queued
int max_packets; ///< Max. number of packets in queue
 
int bandwidth; ///< returned by libiec61883
int channel; ///< returned by libiec61883
int input_port; ///< returned by libiec61883
int type; ///< Stream type, to distinguish DV/HDV
int node; ///< returned by libiec61883
int output_port; ///< returned by libiec61883
int thread_loop; ///< Condition for thread while-loop
int receiving; ///< True as soon data from device available
int receive_error; ///< Set in receive task in case of error
int eof; ///< True as soon as no more data available
 
struct pollfd raw1394_poll; ///< to poll for new data from libraw1394
 
/** Parse function for DV/HDV differs, so this is set before packets arrive */
int (*parse_queue)(struct iec61883_data *dv, AVPacket *pkt);
 
#if THREADS
pthread_t receive_task_thread;
pthread_mutex_t mutex;
pthread_cond_t cond;
#endif
};
 
static int iec61883_callback(unsigned char *data, int length,
int complete, void *callback_data)
{
struct iec61883_data *dv = callback_data;
DVPacket *packet;
int ret;
 
#ifdef THREADS
pthread_mutex_lock(&dv->mutex);
#endif
 
if (dv->packets >= dv->max_packets) {
av_log(NULL, AV_LOG_ERROR, "DV packet queue overrun, dropping.\n");
ret = 0;
goto exit;
}
 
packet = av_mallocz(sizeof(*packet));
if (!packet) {
ret = -1;
goto exit;
}
 
packet->buf = av_malloc(length);
if (!packet->buf) {
ret = -1;
goto exit;
}
packet->len = length;
 
memcpy(packet->buf, data, length);
 
if (dv->queue_first) {
dv->queue_last->next = packet;
dv->queue_last = packet;
} else {
dv->queue_first = packet;
dv->queue_last = packet;
}
dv->packets++;
 
ret = 0;
 
exit:
#ifdef THREADS
pthread_cond_broadcast(&dv->cond);
pthread_mutex_unlock(&dv->mutex);
#endif
return ret;
}
 
static void *iec61883_receive_task(void *opaque)
{
struct iec61883_data *dv = (struct iec61883_data *)opaque;
int result;
 
#ifdef THREADS
while (dv->thread_loop)
#endif
{
while ((result = poll(&dv->raw1394_poll, 1, 200)) < 0) {
if (!(errno == EAGAIN || errno == EINTR)) {
av_log(NULL, AV_LOG_ERROR, "Raw1394 poll error occurred.\n");
dv->receive_error = AVERROR(EIO);
return NULL;
}
}
if (result > 0 && ((dv->raw1394_poll.revents & POLLIN)
|| (dv->raw1394_poll.revents & POLLPRI))) {
dv->receiving = 1;
raw1394_loop_iterate(dv->raw1394);
} else if (dv->receiving) {
av_log(NULL, AV_LOG_ERROR, "No more input data available\n");
#ifdef THREADS
pthread_mutex_lock(&dv->mutex);
dv->eof = 1;
pthread_cond_broadcast(&dv->cond);
pthread_mutex_unlock(&dv->mutex);
#else
dv->eof = 1;
#endif
return NULL;
}
}
 
return NULL;
}
 
static int iec61883_parse_queue_dv(struct iec61883_data *dv, AVPacket *pkt)
{
DVPacket *packet;
int size;
 
size = avpriv_dv_get_packet(dv->dv_demux, pkt);
if (size > 0)
return size;
 
packet = dv->queue_first;
if (!packet)
return -1;
 
size = avpriv_dv_produce_packet(dv->dv_demux, pkt,
packet->buf, packet->len, -1);
pkt->destruct = av_destruct_packet;
dv->queue_first = packet->next;
av_free(packet);
dv->packets--;
 
if (size > 0)
return size;
 
return -1;
}
 
static int iec61883_parse_queue_hdv(struct iec61883_data *dv, AVPacket *pkt)
{
DVPacket *packet;
int size;
 
while (dv->queue_first) {
packet = dv->queue_first;
size = ff_mpegts_parse_packet(dv->mpeg_demux, pkt, packet->buf,
packet->len);
dv->queue_first = packet->next;
av_free(packet->buf);
av_free(packet);
dv->packets--;
 
if (size > 0)
return size;
}
 
return -1;
}
 
static int iec61883_read_header(AVFormatContext *context)
{
struct iec61883_data *dv = context->priv_data;
struct raw1394_portinfo pinf[16];
rom1394_directory rom_dir;
char *endptr;
int inport;
int nb_ports;
int port = -1;
int response;
int i, j = 0;
uint64_t guid = 0;
 
dv->input_port = -1;
dv->output_port = -1;
dv->channel = -1;
 
dv->raw1394 = raw1394_new_handle();
 
if (!dv->raw1394) {
av_log(context, AV_LOG_ERROR, "Failed to open IEEE1394 interface.\n");
return AVERROR(EIO);
}
 
if ((nb_ports = raw1394_get_port_info(dv->raw1394, pinf, 16)) < 0) {
av_log(context, AV_LOG_ERROR, "Failed to get number of IEEE1394 ports.\n");
goto fail;
}
 
inport = strtol(context->filename, &endptr, 10);
if (endptr != context->filename && *endptr == '\0') {
av_log(context, AV_LOG_INFO, "Selecting IEEE1394 port: %d\n", inport);
j = inport;
nb_ports = inport + 1;
} else if (strcmp(context->filename, "auto")) {
av_log(context, AV_LOG_ERROR, "Invalid input \"%s\", you should specify "
"\"auto\" for auto-detection, or the port number.\n", context->filename);
goto fail;
}
 
if (dv->device_guid) {
if (sscanf(dv->device_guid, "%llx", (long long unsigned int *)&guid) != 1) {
av_log(context, AV_LOG_INFO, "Invalid dvguid parameter: %s\n",
dv->device_guid);
goto fail;
}
}
 
for (; j < nb_ports && port==-1; ++j) {
raw1394_destroy_handle(dv->raw1394);
 
if (!(dv->raw1394 = raw1394_new_handle_on_port(j))) {
av_log(context, AV_LOG_ERROR, "Failed setting IEEE1394 port.\n");
goto fail;
}
 
for (i=0; i<raw1394_get_nodecount(dv->raw1394); ++i) {
 
/* Select device explicitly by GUID */
 
if (guid > 1) {
if (guid == rom1394_get_guid(dv->raw1394, i)) {
dv->node = i;
port = j;
break;
}
} else {
 
/* Select first AV/C tape recorder player node */
 
if (rom1394_get_directory(dv->raw1394, i, &rom_dir) < 0)
continue;
if (((rom1394_get_node_type(&rom_dir) == ROM1394_NODE_TYPE_AVC) &&
avc1394_check_subunit_type(dv->raw1394, i, AVC1394_SUBUNIT_TYPE_VCR)) ||
(rom_dir.unit_spec_id == MOTDCT_SPEC_ID)) {
rom1394_free_directory(&rom_dir);
dv->node = i;
port = j;
break;
}
rom1394_free_directory(&rom_dir);
}
}
}
 
if (port == -1) {
av_log(context, AV_LOG_ERROR, "No AV/C devices found.\n");
goto fail;
}
 
/* Provide bus sanity for multiple connections */
 
iec61883_cmp_normalize_output(dv->raw1394, 0xffc0 | dv->node);
 
/* Find out if device is DV or HDV */
 
if (dv->type == IEC61883_AUTO) {
response = avc1394_transaction(dv->raw1394, dv->node,
AVC1394_CTYPE_STATUS |
AVC1394_SUBUNIT_TYPE_TAPE_RECORDER |
AVC1394_SUBUNIT_ID_0 |
AVC1394_VCR_COMMAND_OUTPUT_SIGNAL_MODE |
0xFF, 2);
response = AVC1394_GET_OPERAND0(response);
dv->type = (response == 0x10 || response == 0x90 || response == 0x1A || response == 0x9A) ?
IEC61883_HDV : IEC61883_DV;
}
 
/* Connect to device, and do initialization */
 
dv->channel = iec61883_cmp_connect(dv->raw1394, dv->node, &dv->output_port,
raw1394_get_local_id(dv->raw1394),
&dv->input_port, &dv->bandwidth);
 
if (dv->channel < 0)
dv->channel = 63;
 
if (!dv->max_packets)
dv->max_packets = 100;
 
if (dv->type == IEC61883_HDV) {
 
/* Init HDV receive */
 
avformat_new_stream(context, NULL);
 
dv->mpeg_demux = ff_mpegts_parse_open(context);
if (!dv->mpeg_demux)
goto fail;
 
dv->parse_queue = iec61883_parse_queue_hdv;
 
dv->iec61883_mpeg2 = iec61883_mpeg2_recv_init(dv->raw1394,
(iec61883_mpeg2_recv_t)iec61883_callback,
dv);
 
dv->max_packets *= 766;
} else {
 
/* Init DV receive */
 
dv->dv_demux = avpriv_dv_init_demux(context);
if (!dv->dv_demux)
goto fail;
 
dv->parse_queue = iec61883_parse_queue_dv;
 
dv->iec61883_dv = iec61883_dv_fb_init(dv->raw1394, iec61883_callback, dv);
}
 
dv->raw1394_poll.fd = raw1394_get_fd(dv->raw1394);
dv->raw1394_poll.events = POLLIN | POLLERR | POLLHUP | POLLPRI;
 
/* Actually start receiving */
 
if (dv->type == IEC61883_HDV)
iec61883_mpeg2_recv_start(dv->iec61883_mpeg2, dv->channel);
else
iec61883_dv_fb_start(dv->iec61883_dv, dv->channel);
 
#if THREADS
dv->thread_loop = 1;
pthread_mutex_init(&dv->mutex, NULL);
pthread_cond_init(&dv->cond, NULL);
pthread_create(&dv->receive_task_thread, NULL, iec61883_receive_task, dv);
#endif
 
return 0;
 
fail:
raw1394_destroy_handle(dv->raw1394);
return AVERROR(EIO);
}
 
static int iec61883_read_packet(AVFormatContext *context, AVPacket *pkt)
{
struct iec61883_data *dv = context->priv_data;
int size;
 
/**
* Try to parse frames from queue
*/
 
#ifdef THREADS
pthread_mutex_lock(&dv->mutex);
while ((size = dv->parse_queue(dv, pkt)) == -1)
if (!dv->eof)
pthread_cond_wait(&dv->cond, &dv->mutex);
else
break;
pthread_mutex_unlock(&dv->mutex);
#else
int result;
while ((size = dv->parse_queue(dv, pkt)) == -1) {
iec61883_receive_task((void *)dv);
if (dv->receive_error)
return dv->receive_error;
}
#endif
 
return size;
}
 
static int iec61883_close(AVFormatContext *context)
{
struct iec61883_data *dv = context->priv_data;
 
#if THREADS
dv->thread_loop = 0;
pthread_join(dv->receive_task_thread, NULL);
pthread_cond_destroy(&dv->cond);
pthread_mutex_destroy(&dv->mutex);
#endif
 
if (dv->type == IEC61883_HDV) {
iec61883_mpeg2_recv_stop(dv->iec61883_mpeg2);
iec61883_mpeg2_close(dv->iec61883_mpeg2);
ff_mpegts_parse_close(dv->mpeg_demux);
} else {
iec61883_dv_fb_stop(dv->iec61883_dv);
iec61883_dv_fb_close(dv->iec61883_dv);
}
while (dv->queue_first) {
DVPacket *packet = dv->queue_first;
dv->queue_first = packet->next;
av_free(packet->buf);
av_free(packet);
}
 
iec61883_cmp_disconnect(dv->raw1394, dv->node, dv->output_port,
raw1394_get_local_id(dv->raw1394),
dv->input_port, dv->channel, dv->bandwidth);
 
raw1394_destroy_handle(dv->raw1394);
 
return 0;
}
 
static const AVOption options[] = {
{ "dvtype", "override autodetection of DV/HDV", offsetof(struct iec61883_data, type), AV_OPT_TYPE_INT, {.i64 = IEC61883_AUTO}, IEC61883_AUTO, IEC61883_HDV, AV_OPT_FLAG_DECODING_PARAM, "dvtype" },
{ "auto", "auto detect DV/HDV", 0, AV_OPT_TYPE_CONST, {.i64 = IEC61883_AUTO}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "dvtype" },
{ "dv", "force device being treated as DV device", 0, AV_OPT_TYPE_CONST, {.i64 = IEC61883_DV}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "dvtype" },
{ "hdv" , "force device being treated as HDV device", 0, AV_OPT_TYPE_CONST, {.i64 = IEC61883_HDV}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "dvtype" },
{ "dvbuffer", "set queue buffer size (in packets)", offsetof(struct iec61883_data, max_packets), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ "dvguid", "select one of multiple DV devices by its GUID", offsetof(struct iec61883_data, device_guid), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
 
static const AVClass iec61883_class = {
.class_name = "iec61883 indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVInputFormat ff_iec61883_demuxer = {
.name = "iec61883",
.long_name = NULL_IF_CONFIG_SMALL("libiec61883 (new DV1394) A/V input device"),
.priv_data_size = sizeof(struct iec61883_data),
.read_header = iec61883_read_header,
.read_packet = iec61883_read_packet,
.read_close = iec61883_close,
.flags = AVFMT_NOFILE,
.priv_class = &iec61883_class,
};
/contrib/sdk/sources/ffmpeg/libavdevice/jack_audio.c
0,0 → 1,347
/*
* JACK Audio Connection Kit input device
* Copyright (c) 2009 Samalyse
* Author: Olivier Guilyardi <olivier samalyse com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "config.h"
#include <semaphore.h>
#include <jack/jack.h>
 
#include "libavutil/log.h"
#include "libavutil/fifo.h"
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libavformat/internal.h"
#include "timefilter.h"
#include "avdevice.h"
 
/**
* Size of the internal FIFO buffers as a number of audio packets
*/
#define FIFO_PACKETS_NUM 16
 
typedef struct {
AVClass *class;
jack_client_t * client;
int activated;
sem_t packet_count;
jack_nframes_t sample_rate;
jack_nframes_t buffer_size;
jack_port_t ** ports;
int nports;
TimeFilter * timefilter;
AVFifoBuffer * new_pkts;
AVFifoBuffer * filled_pkts;
int pkt_xrun;
int jack_xrun;
} JackData;
 
static int process_callback(jack_nframes_t nframes, void *arg)
{
/* Warning: this function runs in realtime. One mustn't allocate memory here
* or do any other thing that could block. */
 
int i, j;
JackData *self = arg;
float * buffer;
jack_nframes_t latency, cycle_delay;
AVPacket pkt;
float *pkt_data;
double cycle_time;
 
if (!self->client)
return 0;
 
/* The approximate delay since the hardware interrupt as a number of frames */
cycle_delay = jack_frames_since_cycle_start(self->client);
 
/* Retrieve filtered cycle time */
cycle_time = ff_timefilter_update(self->timefilter,
av_gettime() / 1000000.0 - (double) cycle_delay / self->sample_rate,
self->buffer_size);
 
/* Check if an empty packet is available, and if there's enough space to send it back once filled */
if ((av_fifo_size(self->new_pkts) < sizeof(pkt)) || (av_fifo_space(self->filled_pkts) < sizeof(pkt))) {
self->pkt_xrun = 1;
return 0;
}
 
/* Retrieve empty (but allocated) packet */
av_fifo_generic_read(self->new_pkts, &pkt, sizeof(pkt), NULL);
 
pkt_data = (float *) pkt.data;
latency = 0;
 
/* Copy and interleave audio data from the JACK buffer into the packet */
for (i = 0; i < self->nports; i++) {
#if HAVE_JACK_PORT_GET_LATENCY_RANGE
jack_latency_range_t range;
jack_port_get_latency_range(self->ports[i], JackCaptureLatency, &range);
latency += range.max;
#else
latency += jack_port_get_total_latency(self->client, self->ports[i]);
#endif
buffer = jack_port_get_buffer(self->ports[i], self->buffer_size);
for (j = 0; j < self->buffer_size; j++)
pkt_data[j * self->nports + i] = buffer[j];
}
 
/* Timestamp the packet with the cycle start time minus the average latency */
pkt.pts = (cycle_time - (double) latency / (self->nports * self->sample_rate)) * 1000000.0;
 
/* Send the now filled packet back, and increase packet counter */
av_fifo_generic_write(self->filled_pkts, &pkt, sizeof(pkt), NULL);
sem_post(&self->packet_count);
 
return 0;
}
 
static void shutdown_callback(void *arg)
{
JackData *self = arg;
self->client = NULL;
}
 
static int xrun_callback(void *arg)
{
JackData *self = arg;
self->jack_xrun = 1;
ff_timefilter_reset(self->timefilter);
return 0;
}
 
static int supply_new_packets(JackData *self, AVFormatContext *context)
{
AVPacket pkt;
int test, pkt_size = self->buffer_size * self->nports * sizeof(float);
 
/* Supply the process callback with new empty packets, by filling the new
* packets FIFO buffer with as many packets as possible. process_callback()
* can't do this by itself, because it can't allocate memory in realtime. */
while (av_fifo_space(self->new_pkts) >= sizeof(pkt)) {
if ((test = av_new_packet(&pkt, pkt_size)) < 0) {
av_log(context, AV_LOG_ERROR, "Could not create packet of size %d\n", pkt_size);
return test;
}
av_fifo_generic_write(self->new_pkts, &pkt, sizeof(pkt), NULL);
}
return 0;
}
 
static int start_jack(AVFormatContext *context)
{
JackData *self = context->priv_data;
jack_status_t status;
int i, test;
 
/* Register as a JACK client, using the context filename as client name. */
self->client = jack_client_open(context->filename, JackNullOption, &status);
if (!self->client) {
av_log(context, AV_LOG_ERROR, "Unable to register as a JACK client\n");
return AVERROR(EIO);
}
 
sem_init(&self->packet_count, 0, 0);
 
self->sample_rate = jack_get_sample_rate(self->client);
self->ports = av_malloc(self->nports * sizeof(*self->ports));
self->buffer_size = jack_get_buffer_size(self->client);
 
/* Register JACK ports */
for (i = 0; i < self->nports; i++) {
char str[16];
snprintf(str, sizeof(str), "input_%d", i + 1);
self->ports[i] = jack_port_register(self->client, str,
JACK_DEFAULT_AUDIO_TYPE,
JackPortIsInput, 0);
if (!self->ports[i]) {
av_log(context, AV_LOG_ERROR, "Unable to register port %s:%s\n",
context->filename, str);
jack_client_close(self->client);
return AVERROR(EIO);
}
}
 
/* Register JACK callbacks */
jack_set_process_callback(self->client, process_callback, self);
jack_on_shutdown(self->client, shutdown_callback, self);
jack_set_xrun_callback(self->client, xrun_callback, self);
 
/* Create time filter */
self->timefilter = ff_timefilter_new (1.0 / self->sample_rate, self->buffer_size, 1.5);
if (!self->timefilter) {
jack_client_close(self->client);
return AVERROR(ENOMEM);
}
 
/* Create FIFO buffers */
self->filled_pkts = av_fifo_alloc(FIFO_PACKETS_NUM * sizeof(AVPacket));
/* New packets FIFO with one extra packet for safety against underruns */
self->new_pkts = av_fifo_alloc((FIFO_PACKETS_NUM + 1) * sizeof(AVPacket));
if ((test = supply_new_packets(self, context))) {
jack_client_close(self->client);
return test;
}
 
return 0;
 
}
 
static void free_pkt_fifo(AVFifoBuffer *fifo)
{
AVPacket pkt;
while (av_fifo_size(fifo)) {
av_fifo_generic_read(fifo, &pkt, sizeof(pkt), NULL);
av_free_packet(&pkt);
}
av_fifo_free(fifo);
}
 
static void stop_jack(JackData *self)
{
if (self->client) {
if (self->activated)
jack_deactivate(self->client);
jack_client_close(self->client);
}
sem_destroy(&self->packet_count);
free_pkt_fifo(self->new_pkts);
free_pkt_fifo(self->filled_pkts);
av_freep(&self->ports);
ff_timefilter_destroy(self->timefilter);
}
 
static int audio_read_header(AVFormatContext *context)
{
JackData *self = context->priv_data;
AVStream *stream;
int test;
 
if ((test = start_jack(context)))
return test;
 
stream = avformat_new_stream(context, NULL);
if (!stream) {
stop_jack(self);
return AVERROR(ENOMEM);
}
 
stream->codec->codec_type = AVMEDIA_TYPE_AUDIO;
#if HAVE_BIGENDIAN
stream->codec->codec_id = AV_CODEC_ID_PCM_F32BE;
#else
stream->codec->codec_id = AV_CODEC_ID_PCM_F32LE;
#endif
stream->codec->sample_rate = self->sample_rate;
stream->codec->channels = self->nports;
 
avpriv_set_pts_info(stream, 64, 1, 1000000); /* 64 bits pts in us */
return 0;
}
 
static int audio_read_packet(AVFormatContext *context, AVPacket *pkt)
{
JackData *self = context->priv_data;
struct timespec timeout = {0, 0};
int test;
 
/* Activate the JACK client on first packet read. Activating the JACK client
* means that process_callback() starts to get called at regular interval.
* If we activate it in audio_read_header(), we're actually reading audio data
* from the device before instructed to, and that may result in an overrun. */
if (!self->activated) {
if (!jack_activate(self->client)) {
self->activated = 1;
av_log(context, AV_LOG_INFO,
"JACK client registered and activated (rate=%dHz, buffer_size=%d frames)\n",
self->sample_rate, self->buffer_size);
} else {
av_log(context, AV_LOG_ERROR, "Unable to activate JACK client\n");
return AVERROR(EIO);
}
}
 
/* Wait for a packet coming back from process_callback(), if one isn't available yet */
timeout.tv_sec = av_gettime() / 1000000 + 2;
if (sem_timedwait(&self->packet_count, &timeout)) {
if (errno == ETIMEDOUT) {
av_log(context, AV_LOG_ERROR,
"Input error: timed out when waiting for JACK process callback output\n");
} else {
av_log(context, AV_LOG_ERROR, "Error while waiting for audio packet: %s\n",
strerror(errno));
}
if (!self->client)
av_log(context, AV_LOG_ERROR, "Input error: JACK server is gone\n");
 
return AVERROR(EIO);
}
 
if (self->pkt_xrun) {
av_log(context, AV_LOG_WARNING, "Audio packet xrun\n");
self->pkt_xrun = 0;
}
 
if (self->jack_xrun) {
av_log(context, AV_LOG_WARNING, "JACK xrun\n");
self->jack_xrun = 0;
}
 
/* Retrieve the packet filled with audio data by process_callback() */
av_fifo_generic_read(self->filled_pkts, pkt, sizeof(*pkt), NULL);
 
if ((test = supply_new_packets(self, context)))
return test;
 
return 0;
}
 
static int audio_read_close(AVFormatContext *context)
{
JackData *self = context->priv_data;
stop_jack(self);
return 0;
}
 
#define OFFSET(x) offsetof(JackData, x)
static const AVOption options[] = {
{ "channels", "Number of audio channels.", OFFSET(nports), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
 
static const AVClass jack_indev_class = {
.class_name = "JACK indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVInputFormat ff_jack_demuxer = {
.name = "jack",
.long_name = NULL_IF_CONFIG_SMALL("JACK Audio Connection Kit"),
.priv_data_size = sizeof(JackData),
.read_header = audio_read_header,
.read_packet = audio_read_packet,
.read_close = audio_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &jack_indev_class,
};
/contrib/sdk/sources/ffmpeg/libavdevice/lavfi.c
0,0 → 1,435
/*
* Copyright (c) 2011 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* libavfilter virtual input device
*/
 
/* #define DEBUG */
 
#include <float.h> /* DBL_MIN, DBL_MAX */
 
#include "libavutil/bprint.h"
#include "libavutil/channel_layout.h"
#include "libavutil/file.h"
#include "libavutil/log.h"
#include "libavutil/mem.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
#include "libavfilter/avfilter.h"
#include "libavfilter/avfiltergraph.h"
#include "libavfilter/buffersink.h"
#include "libavformat/internal.h"
#include "avdevice.h"
 
typedef struct {
AVClass *class; ///< class for private options
char *graph_str;
char *graph_filename;
char *dump_graph;
AVFilterGraph *graph;
AVFilterContext **sinks;
int *sink_stream_map;
int *sink_eof;
int *stream_sink_map;
AVFrame *decoded_frame;
} LavfiContext;
 
static int *create_all_formats(int n)
{
int i, j, *fmts, count = 0;
 
for (i = 0; i < n; i++) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(i);
if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
count++;
}
 
if (!(fmts = av_malloc((count+1) * sizeof(int))))
return NULL;
for (j = 0, i = 0; i < n; i++) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(i);
if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
fmts[j++] = i;
}
fmts[j] = -1;
return fmts;
}
 
av_cold static int lavfi_read_close(AVFormatContext *avctx)
{
LavfiContext *lavfi = avctx->priv_data;
 
av_freep(&lavfi->sink_stream_map);
av_freep(&lavfi->sink_eof);
av_freep(&lavfi->stream_sink_map);
av_freep(&lavfi->sinks);
avfilter_graph_free(&lavfi->graph);
av_frame_free(&lavfi->decoded_frame);
 
return 0;
}
 
av_cold static int lavfi_read_header(AVFormatContext *avctx)
{
LavfiContext *lavfi = avctx->priv_data;
AVFilterInOut *input_links = NULL, *output_links = NULL, *inout;
AVFilter *buffersink, *abuffersink;
int *pix_fmts = create_all_formats(AV_PIX_FMT_NB);
enum AVMediaType type;
int ret = 0, i, n;
 
#define FAIL(ERR) { ret = ERR; goto end; }
 
if (!pix_fmts)
FAIL(AVERROR(ENOMEM));
 
avfilter_register_all();
 
buffersink = avfilter_get_by_name("buffersink");
abuffersink = avfilter_get_by_name("abuffersink");
 
if (lavfi->graph_filename && lavfi->graph_str) {
av_log(avctx, AV_LOG_ERROR,
"Only one of the graph or graph_file options must be specified\n");
FAIL(AVERROR(EINVAL));
}
 
if (lavfi->graph_filename) {
uint8_t *file_buf, *graph_buf;
size_t file_bufsize;
ret = av_file_map(lavfi->graph_filename,
&file_buf, &file_bufsize, 0, avctx);
if (ret < 0)
goto end;
 
/* create a 0-terminated string based on the read file */
graph_buf = av_malloc(file_bufsize + 1);
if (!graph_buf) {
av_file_unmap(file_buf, file_bufsize);
FAIL(AVERROR(ENOMEM));
}
memcpy(graph_buf, file_buf, file_bufsize);
graph_buf[file_bufsize] = 0;
av_file_unmap(file_buf, file_bufsize);
lavfi->graph_str = graph_buf;
}
 
if (!lavfi->graph_str)
lavfi->graph_str = av_strdup(avctx->filename);
 
/* parse the graph, create a stream for each open output */
if (!(lavfi->graph = avfilter_graph_alloc()))
FAIL(AVERROR(ENOMEM));
 
if ((ret = avfilter_graph_parse_ptr(lavfi->graph, lavfi->graph_str,
&input_links, &output_links, avctx)) < 0)
FAIL(ret);
 
if (input_links) {
av_log(avctx, AV_LOG_ERROR,
"Open inputs in the filtergraph are not acceptable\n");
FAIL(AVERROR(EINVAL));
}
 
/* count the outputs */
for (n = 0, inout = output_links; inout; n++, inout = inout->next);
 
if (!(lavfi->sink_stream_map = av_malloc(sizeof(int) * n)))
FAIL(AVERROR(ENOMEM));
if (!(lavfi->sink_eof = av_mallocz(sizeof(int) * n)))
FAIL(AVERROR(ENOMEM));
if (!(lavfi->stream_sink_map = av_malloc(sizeof(int) * n)))
FAIL(AVERROR(ENOMEM));
 
for (i = 0; i < n; i++)
lavfi->stream_sink_map[i] = -1;
 
/* parse the output link names - they need to be of the form out0, out1, ...
* create a mapping between them and the streams */
for (i = 0, inout = output_links; inout; i++, inout = inout->next) {
int stream_idx;
if (!strcmp(inout->name, "out"))
stream_idx = 0;
else if (sscanf(inout->name, "out%d\n", &stream_idx) != 1) {
av_log(avctx, AV_LOG_ERROR,
"Invalid outpad name '%s'\n", inout->name);
FAIL(AVERROR(EINVAL));
}
 
if ((unsigned)stream_idx >= n) {
av_log(avctx, AV_LOG_ERROR,
"Invalid index was specified in output '%s', "
"must be a non-negative value < %d\n",
inout->name, n);
FAIL(AVERROR(EINVAL));
}
 
/* is an audio or video output? */
type = inout->filter_ctx->output_pads[inout->pad_idx].type;
if (type != AVMEDIA_TYPE_VIDEO && type != AVMEDIA_TYPE_AUDIO) {
av_log(avctx, AV_LOG_ERROR,
"Output '%s' is not a video or audio output, not yet supported\n", inout->name);
FAIL(AVERROR(EINVAL));
}
 
if (lavfi->stream_sink_map[stream_idx] != -1) {
av_log(avctx, AV_LOG_ERROR,
"An output with stream index %d was already specified\n",
stream_idx);
FAIL(AVERROR(EINVAL));
}
lavfi->sink_stream_map[i] = stream_idx;
lavfi->stream_sink_map[stream_idx] = i;
}
 
/* for each open output create a corresponding stream */
for (i = 0, inout = output_links; inout; i++, inout = inout->next) {
AVStream *st;
if (!(st = avformat_new_stream(avctx, NULL)))
FAIL(AVERROR(ENOMEM));
st->id = i;
}
 
/* create a sink for each output and connect them to the graph */
lavfi->sinks = av_malloc(sizeof(AVFilterContext *) * avctx->nb_streams);
if (!lavfi->sinks)
FAIL(AVERROR(ENOMEM));
 
for (i = 0, inout = output_links; inout; i++, inout = inout->next) {
AVFilterContext *sink;
 
type = inout->filter_ctx->output_pads[inout->pad_idx].type;
 
if (type == AVMEDIA_TYPE_VIDEO && ! buffersink ||
type == AVMEDIA_TYPE_AUDIO && ! abuffersink) {
av_log(avctx, AV_LOG_ERROR, "Missing required buffersink filter, aborting.\n");
FAIL(AVERROR_FILTER_NOT_FOUND);
}
 
if (type == AVMEDIA_TYPE_VIDEO) {
ret = avfilter_graph_create_filter(&sink, buffersink,
inout->name, NULL,
NULL, lavfi->graph);
if (ret >= 0)
ret = av_opt_set_int_list(sink, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
if (ret < 0)
goto end;
} else if (type == AVMEDIA_TYPE_AUDIO) {
enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_U8,
AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_S32,
AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_DBL, -1 };
 
ret = avfilter_graph_create_filter(&sink, abuffersink,
inout->name, NULL,
NULL, lavfi->graph);
if (ret >= 0)
ret = av_opt_set_int_list(sink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
if (ret < 0)
goto end;
}
 
lavfi->sinks[i] = sink;
if ((ret = avfilter_link(inout->filter_ctx, inout->pad_idx, sink, 0)) < 0)
FAIL(ret);
}
 
/* configure the graph */
if ((ret = avfilter_graph_config(lavfi->graph, avctx)) < 0)
FAIL(ret);
 
if (lavfi->dump_graph) {
char *dump = avfilter_graph_dump(lavfi->graph, lavfi->dump_graph);
fputs(dump, stderr);
fflush(stderr);
av_free(dump);
}
 
/* fill each stream with the information in the corresponding sink */
for (i = 0; i < avctx->nb_streams; i++) {
AVFilterLink *link = lavfi->sinks[lavfi->stream_sink_map[i]]->inputs[0];
AVStream *st = avctx->streams[i];
st->codec->codec_type = link->type;
avpriv_set_pts_info(st, 64, link->time_base.num, link->time_base.den);
if (link->type == AVMEDIA_TYPE_VIDEO) {
st->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
st->codec->pix_fmt = link->format;
st->codec->time_base = link->time_base;
st->codec->width = link->w;
st->codec->height = link->h;
st ->sample_aspect_ratio =
st->codec->sample_aspect_ratio = link->sample_aspect_ratio;
avctx->probesize = FFMAX(avctx->probesize,
link->w * link->h *
av_get_padded_bits_per_pixel(av_pix_fmt_desc_get(link->format)) *
30);
} else if (link->type == AVMEDIA_TYPE_AUDIO) {
st->codec->codec_id = av_get_pcm_codec(link->format, -1);
st->codec->channels = avfilter_link_get_channels(link);
st->codec->sample_fmt = link->format;
st->codec->sample_rate = link->sample_rate;
st->codec->time_base = link->time_base;
st->codec->channel_layout = link->channel_layout;
if (st->codec->codec_id == AV_CODEC_ID_NONE)
av_log(avctx, AV_LOG_ERROR,
"Could not find PCM codec for sample format %s.\n",
av_get_sample_fmt_name(link->format));
}
}
 
if (!(lavfi->decoded_frame = av_frame_alloc()))
FAIL(AVERROR(ENOMEM));
 
end:
av_free(pix_fmts);
avfilter_inout_free(&input_links);
avfilter_inout_free(&output_links);
if (ret < 0)
lavfi_read_close(avctx);
return ret;
}
 
static int lavfi_read_packet(AVFormatContext *avctx, AVPacket *pkt)
{
LavfiContext *lavfi = avctx->priv_data;
double min_pts = DBL_MAX;
int stream_idx, min_pts_sink_idx = 0;
AVFrame *frame = lavfi->decoded_frame;
AVPicture pict;
AVDictionary *frame_metadata;
int ret, i;
int size = 0;
 
/* iterate through all the graph sinks. Select the sink with the
* minimum PTS */
for (i = 0; i < avctx->nb_streams; i++) {
AVRational tb = lavfi->sinks[i]->inputs[0]->time_base;
double d;
int ret;
 
if (lavfi->sink_eof[i])
continue;
 
ret = av_buffersink_get_frame_flags(lavfi->sinks[i], frame,
AV_BUFFERSINK_FLAG_PEEK);
if (ret == AVERROR_EOF) {
av_dlog(avctx, "EOF sink_idx:%d\n", i);
lavfi->sink_eof[i] = 1;
continue;
} else if (ret < 0)
return ret;
d = av_rescale_q(frame->pts, tb, AV_TIME_BASE_Q);
av_dlog(avctx, "sink_idx:%d time:%f\n", i, d);
av_frame_unref(frame);
 
if (d < min_pts) {
min_pts = d;
min_pts_sink_idx = i;
}
}
if (min_pts == DBL_MAX)
return AVERROR_EOF;
 
av_dlog(avctx, "min_pts_sink_idx:%i\n", min_pts_sink_idx);
 
av_buffersink_get_frame_flags(lavfi->sinks[min_pts_sink_idx], frame, 0);
stream_idx = lavfi->sink_stream_map[min_pts_sink_idx];
 
if (frame->width /* FIXME best way of testing a video */) {
size = avpicture_get_size(frame->format, frame->width, frame->height);
if ((ret = av_new_packet(pkt, size)) < 0)
return ret;
 
memcpy(pict.data, frame->data, 4*sizeof(frame->data[0]));
memcpy(pict.linesize, frame->linesize, 4*sizeof(frame->linesize[0]));
 
avpicture_layout(&pict, frame->format, frame->width, frame->height,
pkt->data, size);
} else if (av_frame_get_channels(frame) /* FIXME test audio */) {
size = frame->nb_samples * av_get_bytes_per_sample(frame->format) *
av_frame_get_channels(frame);
if ((ret = av_new_packet(pkt, size)) < 0)
return ret;
memcpy(pkt->data, frame->data[0], size);
}
 
frame_metadata = av_frame_get_metadata(frame);
if (frame_metadata) {
uint8_t *metadata;
AVDictionaryEntry *e = NULL;
AVBPrint meta_buf;
 
av_bprint_init(&meta_buf, 0, AV_BPRINT_SIZE_UNLIMITED);
while ((e = av_dict_get(frame_metadata, "", e, AV_DICT_IGNORE_SUFFIX))) {
av_bprintf(&meta_buf, "%s", e->key);
av_bprint_chars(&meta_buf, '\0', 1);
av_bprintf(&meta_buf, "%s", e->value);
av_bprint_chars(&meta_buf, '\0', 1);
}
if (!av_bprint_is_complete(&meta_buf) ||
!(metadata = av_packet_new_side_data(pkt, AV_PKT_DATA_STRINGS_METADATA,
meta_buf.len))) {
av_bprint_finalize(&meta_buf, NULL);
return AVERROR(ENOMEM);
}
memcpy(metadata, meta_buf.str, meta_buf.len);
av_bprint_finalize(&meta_buf, NULL);
}
 
pkt->stream_index = stream_idx;
pkt->pts = frame->pts;
pkt->pos = av_frame_get_pkt_pos(frame);
pkt->size = size;
av_frame_unref(frame);
return size;
}
 
#define OFFSET(x) offsetof(LavfiContext, x)
 
#define DEC AV_OPT_FLAG_DECODING_PARAM
 
static const AVOption options[] = {
{ "graph", "set libavfilter graph", OFFSET(graph_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
{ "graph_file","set libavfilter graph filename", OFFSET(graph_filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC},
{ "dumpgraph", "dump graph to stderr", OFFSET(dump_graph), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
{ NULL },
};
 
static const AVClass lavfi_class = {
.class_name = "lavfi indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVInputFormat ff_lavfi_demuxer = {
.name = "lavfi",
.long_name = NULL_IF_CONFIG_SMALL("Libavfilter virtual input device"),
.priv_data_size = sizeof(LavfiContext),
.read_header = lavfi_read_header,
.read_packet = lavfi_read_packet,
.read_close = lavfi_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &lavfi_class,
};
/contrib/sdk/sources/ffmpeg/libavdevice/libavdevice.pc
0,0 → 1,14
prefix=/usr/local
exec_prefix=${prefix}
libdir=${prefix}/lib
includedir=${prefix}/include
 
Name: libavdevice
Description: FFmpeg device handling library
Version: 55.5.100
Requires:
Requires.private: libavformat = 55.19.104
Conflicts:
Libs: -L${libdir} -lavdevice
Libs.private: -lm -lz -lpsapi -ladvapi32 -lshell32
Cflags: -I${includedir}
/contrib/sdk/sources/ffmpeg/libavdevice/libavdevice.v
0,0 → 1,4
LIBAVDEVICE_$MAJOR {
global: DllStartup; avdevice_*;
local: *;
};
/contrib/sdk/sources/ffmpeg/libavdevice/libavdevice.ver
0,0 → 1,4
LIBAVDEVICE_55 {
global: DllStartup; avdevice_*;
local: *;
};
/contrib/sdk/sources/ffmpeg/libavdevice/libcdio.c
0,0 → 1,191
/*
* Copyright (c) 2011 Anton Khirnov <anton@khirnov.net>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* libcdio CD grabbing
*/
 
#include "config.h"
 
#if HAVE_CDIO_PARANOIA_H
#include <cdio/cdda.h>
#include <cdio/paranoia.h>
#elif HAVE_CDIO_PARANOIA_PARANOIA_H
#include <cdio/paranoia/cdda.h>
#include <cdio/paranoia/paranoia.h>
#endif
 
#include "libavutil/log.h"
#include "libavutil/mem.h"
#include "libavutil/opt.h"
 
#include "libavformat/avformat.h"
#include "libavformat/internal.h"
 
typedef struct CDIOContext {
const AVClass *class;
cdrom_drive_t *drive;
cdrom_paranoia_t *paranoia;
int32_t last_sector;
 
/* private options */
int speed;
int paranoia_mode;
} CDIOContext;
 
static av_cold int read_header(AVFormatContext *ctx)
{
CDIOContext *s = ctx->priv_data;
AVStream *st;
int ret, i;
char *err = NULL;
 
if (!(st = avformat_new_stream(ctx, NULL)))
return AVERROR(ENOMEM);
s->drive = cdio_cddap_identify(ctx->filename, CDDA_MESSAGE_LOGIT, &err);
if (!s->drive) {
av_log(ctx, AV_LOG_ERROR, "Could not open drive %s.\n", ctx->filename);
return AVERROR(EINVAL);
}
if (err) {
av_log(ctx, AV_LOG_VERBOSE, "%s\n", err);
free(err);
}
if ((ret = cdio_cddap_open(s->drive)) < 0 || !s->drive->opened) {
av_log(ctx, AV_LOG_ERROR, "Could not open disk in drive %s.\n", ctx->filename);
return AVERROR(EINVAL);
}
 
cdio_cddap_verbose_set(s->drive, CDDA_MESSAGE_LOGIT, CDDA_MESSAGE_LOGIT);
if (s->speed)
cdio_cddap_speed_set(s->drive, s->speed);
 
s->paranoia = cdio_paranoia_init(s->drive);
if (!s->paranoia) {
av_log(ctx, AV_LOG_ERROR, "Could not init paranoia.\n");
return AVERROR(EINVAL);
}
cdio_paranoia_modeset(s->paranoia, s->paranoia_mode);
 
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
if (s->drive->bigendianp)
st->codec->codec_id = AV_CODEC_ID_PCM_S16BE;
else
st->codec->codec_id = AV_CODEC_ID_PCM_S16LE;
st->codec->sample_rate = 44100;
st->codec->channels = 2;
if (s->drive->audio_last_sector != CDIO_INVALID_LSN &&
s->drive->audio_first_sector != CDIO_INVALID_LSN)
st->duration = s->drive->audio_last_sector - s->drive->audio_first_sector;
else if (s->drive->tracks)
st->duration = s->drive->disc_toc[s->drive->tracks].dwStartSector;
avpriv_set_pts_info(st, 64, CDIO_CD_FRAMESIZE_RAW, 2*st->codec->channels*st->codec->sample_rate);
 
for (i = 0; i < s->drive->tracks; i++) {
char title[16];
snprintf(title, sizeof(title), "track %02d", s->drive->disc_toc[i].bTrack);
avpriv_new_chapter(ctx, i, st->time_base, s->drive->disc_toc[i].dwStartSector,
s->drive->disc_toc[i+1].dwStartSector, title);
}
 
s->last_sector = cdio_cddap_disc_lastsector(s->drive);
 
return 0;
}
 
static int read_packet(AVFormatContext *ctx, AVPacket *pkt)
{
CDIOContext *s = ctx->priv_data;
int ret;
uint16_t *buf;
char *err = NULL;
 
if (ctx->streams[0]->cur_dts > s->last_sector)
return AVERROR_EOF;
 
buf = cdio_paranoia_read(s->paranoia, NULL);
if (!buf)
return AVERROR_EOF;
 
if (err = cdio_cddap_errors(s->drive)) {
av_log(ctx, AV_LOG_ERROR, "%s\n", err);
free(err);
err = NULL;
}
if (err = cdio_cddap_messages(s->drive)) {
av_log(ctx, AV_LOG_VERBOSE, "%s\n", err);
free(err);
err = NULL;
}
 
if ((ret = av_new_packet(pkt, CDIO_CD_FRAMESIZE_RAW)) < 0)
return ret;
memcpy(pkt->data, buf, CDIO_CD_FRAMESIZE_RAW);
return 0;
}
 
static av_cold int read_close(AVFormatContext *ctx)
{
CDIOContext *s = ctx->priv_data;
cdio_paranoia_free(s->paranoia);
cdio_cddap_close(s->drive);
return 0;
}
 
static int read_seek(AVFormatContext *ctx, int stream_index, int64_t timestamp,
int flags)
{
CDIOContext *s = ctx->priv_data;
AVStream *st = ctx->streams[0];
 
cdio_paranoia_seek(s->paranoia, timestamp, SEEK_SET);
st->cur_dts = timestamp;
return 0;
}
 
#define OFFSET(x) offsetof(CDIOContext, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "speed", "Drive reading speed.", OFFSET(speed), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, DEC },
{ "paranoia_mode", "Error recovery mode.", OFFSET(paranoia_mode), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX, DEC, "paranoia_mode" },
{ "verify", "Verify data integrity in overlap area", 0, AV_OPT_TYPE_CONST, { .i64 = PARANOIA_MODE_VERIFY }, 0, 0, DEC, "paranoia_mode" },
{ "overlap", "Perform overlapped reads.", 0, AV_OPT_TYPE_CONST, { .i64 = PARANOIA_MODE_OVERLAP }, 0, 0, DEC, "paranoia_mode" },
{ "neverskip", "Do not skip failed reads.", 0, AV_OPT_TYPE_CONST, { .i64 = PARANOIA_MODE_NEVERSKIP }, 0, 0, DEC, "paranoia_mode" },
{ NULL },
};
 
static const AVClass libcdio_class = {
.class_name = "libcdio indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVInputFormat ff_libcdio_demuxer = {
.name = "libcdio",
.read_header = read_header,
.read_packet = read_packet,
.read_close = read_close,
.read_seek = read_seek,
.priv_data_size = sizeof(CDIOContext),
.flags = AVFMT_NOFILE,
.priv_class = &libcdio_class,
};
/contrib/sdk/sources/ffmpeg/libavdevice/libdc1394.c
0,0 → 1,420
/*
* IIDC1394 grab interface (uses libdc1394 and libraw1394)
* Copyright (c) 2004 Roman Shaposhnik
* Copyright (c) 2008 Alessandro Sappia
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "config.h"
#include "libavformat/avformat.h"
#include "libavformat/internal.h"
#include "libavutil/log.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
 
#if HAVE_LIBDC1394_2
#include <dc1394/dc1394.h>
#elif HAVE_LIBDC1394_1
#include <libraw1394/raw1394.h>
#include <libdc1394/dc1394_control.h>
 
#define DC1394_VIDEO_MODE_320x240_YUV422 MODE_320x240_YUV422
#define DC1394_VIDEO_MODE_640x480_YUV411 MODE_640x480_YUV411
#define DC1394_VIDEO_MODE_640x480_YUV422 MODE_640x480_YUV422
#define DC1394_FRAMERATE_1_875 FRAMERATE_1_875
#define DC1394_FRAMERATE_3_75 FRAMERATE_3_75
#define DC1394_FRAMERATE_7_5 FRAMERATE_7_5
#define DC1394_FRAMERATE_15 FRAMERATE_15
#define DC1394_FRAMERATE_30 FRAMERATE_30
#define DC1394_FRAMERATE_60 FRAMERATE_60
#define DC1394_FRAMERATE_120 FRAMERATE_120
#define DC1394_FRAMERATE_240 FRAMERATE_240
#endif
 
typedef struct dc1394_data {
AVClass *class;
#if HAVE_LIBDC1394_1
raw1394handle_t handle;
dc1394_cameracapture camera;
int channel;
#elif HAVE_LIBDC1394_2
dc1394_t *d;
dc1394camera_t *camera;
dc1394video_frame_t *frame;
#endif
int current_frame;
int frame_rate; /**< frames per 1000 seconds (fps * 1000) */
char *video_size; /**< String describing video size, set by a private option. */
char *pixel_format; /**< Set by a private option. */
char *framerate; /**< Set by a private option. */
 
AVPacket packet;
} dc1394_data;
 
struct dc1394_frame_format {
int width;
int height;
enum AVPixelFormat pix_fmt;
int frame_size_id;
} dc1394_frame_formats[] = {
{ 320, 240, AV_PIX_FMT_UYVY422, DC1394_VIDEO_MODE_320x240_YUV422 },
{ 640, 480, AV_PIX_FMT_UYYVYY411, DC1394_VIDEO_MODE_640x480_YUV411 },
{ 640, 480, AV_PIX_FMT_UYVY422, DC1394_VIDEO_MODE_640x480_YUV422 },
{ 0, 0, 0, 0 } /* gotta be the last one */
};
 
struct dc1394_frame_rate {
int frame_rate;
int frame_rate_id;
} dc1394_frame_rates[] = {
{ 1875, DC1394_FRAMERATE_1_875 },
{ 3750, DC1394_FRAMERATE_3_75 },
{ 7500, DC1394_FRAMERATE_7_5 },
{ 15000, DC1394_FRAMERATE_15 },
{ 30000, DC1394_FRAMERATE_30 },
{ 60000, DC1394_FRAMERATE_60 },
{120000, DC1394_FRAMERATE_120 },
{240000, DC1394_FRAMERATE_240 },
{ 0, 0 } /* gotta be the last one */
};
 
#define OFFSET(x) offsetof(dc1394_data, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
#if HAVE_LIBDC1394_1
{ "channel", "", offsetof(dc1394_data, channel), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
#endif
{ "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), AV_OPT_TYPE_STRING, {.str = "qvga"}, 0, 0, DEC },
{ "pixel_format", "", OFFSET(pixel_format), AV_OPT_TYPE_STRING, {.str = "uyvy422"}, 0, 0, DEC },
{ "framerate", "", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = "ntsc"}, 0, 0, DEC },
{ NULL },
};
 
static const AVClass libdc1394_class = {
.class_name = "libdc1394 indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
 
static inline int dc1394_read_common(AVFormatContext *c,
struct dc1394_frame_format **select_fmt, struct dc1394_frame_rate **select_fps)
{
dc1394_data* dc1394 = c->priv_data;
AVStream* vst;
struct dc1394_frame_format *fmt;
struct dc1394_frame_rate *fps;
enum AVPixelFormat pix_fmt;
int width, height;
AVRational framerate;
int ret = 0;
 
if ((pix_fmt = av_get_pix_fmt(dc1394->pixel_format)) == AV_PIX_FMT_NONE) {
av_log(c, AV_LOG_ERROR, "No such pixel format: %s.\n", dc1394->pixel_format);
ret = AVERROR(EINVAL);
goto out;
}
 
if ((ret = av_parse_video_size(&width, &height, dc1394->video_size)) < 0) {
av_log(c, AV_LOG_ERROR, "Could not parse video size '%s'.\n", dc1394->video_size);
goto out;
}
if ((ret = av_parse_video_rate(&framerate, dc1394->framerate)) < 0) {
av_log(c, AV_LOG_ERROR, "Could not parse framerate '%s'.\n", dc1394->framerate);
goto out;
}
dc1394->frame_rate = av_rescale(1000, framerate.num, framerate.den);
 
for (fmt = dc1394_frame_formats; fmt->width; fmt++)
if (fmt->pix_fmt == pix_fmt && fmt->width == width && fmt->height == height)
break;
 
for (fps = dc1394_frame_rates; fps->frame_rate; fps++)
if (fps->frame_rate == dc1394->frame_rate)
break;
 
if (!fps->frame_rate || !fmt->width) {
av_log(c, AV_LOG_ERROR, "Can't find matching camera format for %s, %dx%d@%d:1000fps\n", av_get_pix_fmt_name(pix_fmt),
width, height, dc1394->frame_rate);
ret = AVERROR(EINVAL);
goto out;
}
 
/* create a video stream */
vst = avformat_new_stream(c, NULL);
if (!vst) {
ret = AVERROR(ENOMEM);
goto out;
}
avpriv_set_pts_info(vst, 64, 1, 1000);
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
vst->codec->time_base.den = framerate.num;
vst->codec->time_base.num = framerate.den;
vst->codec->width = fmt->width;
vst->codec->height = fmt->height;
vst->codec->pix_fmt = fmt->pix_fmt;
 
/* packet init */
av_init_packet(&dc1394->packet);
dc1394->packet.size = avpicture_get_size(fmt->pix_fmt, fmt->width, fmt->height);
dc1394->packet.stream_index = vst->index;
dc1394->packet.flags |= AV_PKT_FLAG_KEY;
 
dc1394->current_frame = 0;
 
vst->codec->bit_rate = av_rescale(dc1394->packet.size * 8, fps->frame_rate, 1000);
*select_fps = fps;
*select_fmt = fmt;
out:
return ret;
}
 
#if HAVE_LIBDC1394_1
static int dc1394_v1_read_header(AVFormatContext *c)
{
dc1394_data* dc1394 = c->priv_data;
AVStream* vst;
nodeid_t* camera_nodes;
int res;
struct dc1394_frame_format *fmt = NULL;
struct dc1394_frame_rate *fps = NULL;
 
if (dc1394_read_common(c, &fmt, &fps) != 0)
return -1;
 
/* Now let us prep the hardware. */
dc1394->handle = dc1394_create_handle(0); /* FIXME: gotta have ap->port */
if (!dc1394->handle) {
av_log(c, AV_LOG_ERROR, "Can't acquire dc1394 handle on port %d\n", 0 /* ap->port */);
goto out;
}
camera_nodes = dc1394_get_camera_nodes(dc1394->handle, &res, 1);
if (!camera_nodes || camera_nodes[dc1394->channel] == DC1394_NO_CAMERA) {
av_log(c, AV_LOG_ERROR, "There's no IIDC camera on the channel %d\n", dc1394->channel);
goto out_handle;
}
res = dc1394_dma_setup_capture(dc1394->handle, camera_nodes[dc1394->channel],
0,
FORMAT_VGA_NONCOMPRESSED,
fmt->frame_size_id,
SPEED_400,
fps->frame_rate_id, 8, 1,
c->filename,
&dc1394->camera);
dc1394_free_camera_nodes(camera_nodes);
if (res != DC1394_SUCCESS) {
av_log(c, AV_LOG_ERROR, "Can't prepare camera for the DMA capture\n");
goto out_handle;
}
 
res = dc1394_start_iso_transmission(dc1394->handle, dc1394->camera.node);
if (res != DC1394_SUCCESS) {
av_log(c, AV_LOG_ERROR, "Can't start isochronous transmission\n");
goto out_handle_dma;
}
 
return 0;
 
out_handle_dma:
dc1394_dma_unlisten(dc1394->handle, &dc1394->camera);
dc1394_dma_release_camera(dc1394->handle, &dc1394->camera);
out_handle:
dc1394_destroy_handle(dc1394->handle);
out:
return -1;
}
 
static int dc1394_v1_read_packet(AVFormatContext *c, AVPacket *pkt)
{
struct dc1394_data *dc1394 = c->priv_data;
int res;
 
/* discard stale frame */
if (dc1394->current_frame++) {
if (dc1394_dma_done_with_buffer(&dc1394->camera) != DC1394_SUCCESS)
av_log(c, AV_LOG_ERROR, "failed to release %d frame\n", dc1394->current_frame);
}
 
res = dc1394_dma_single_capture(&dc1394->camera);
 
if (res == DC1394_SUCCESS) {
dc1394->packet.data = (uint8_t *)(dc1394->camera.capture_buffer);
dc1394->packet.pts = (dc1394->current_frame * 1000000) / dc1394->frame_rate;
res = dc1394->packet.size;
} else {
av_log(c, AV_LOG_ERROR, "DMA capture failed\n");
dc1394->packet.data = NULL;
res = -1;
}
 
*pkt = dc1394->packet;
return res;
}
 
static int dc1394_v1_close(AVFormatContext * context)
{
struct dc1394_data *dc1394 = context->priv_data;
 
dc1394_stop_iso_transmission(dc1394->handle, dc1394->camera.node);
dc1394_dma_unlisten(dc1394->handle, &dc1394->camera);
dc1394_dma_release_camera(dc1394->handle, &dc1394->camera);
dc1394_destroy_handle(dc1394->handle);
 
return 0;
}
 
#elif HAVE_LIBDC1394_2
static int dc1394_v2_read_header(AVFormatContext *c)
{
dc1394_data* dc1394 = c->priv_data;
dc1394camera_list_t *list;
int res, i;
struct dc1394_frame_format *fmt = NULL;
struct dc1394_frame_rate *fps = NULL;
 
if (dc1394_read_common(c, &fmt, &fps) != 0)
return -1;
 
/* Now let us prep the hardware. */
dc1394->d = dc1394_new();
dc1394_camera_enumerate (dc1394->d, &list);
if ( !list || list->num == 0) {
av_log(c, AV_LOG_ERROR, "Unable to look for an IIDC camera\n\n");
goto out;
}
 
/* FIXME: To select a specific camera I need to search in list its guid */
dc1394->camera = dc1394_camera_new (dc1394->d, list->ids[0].guid);
if (list->num > 1) {
av_log(c, AV_LOG_INFO, "Working with the first camera found\n");
}
 
/* Freeing list of cameras */
dc1394_camera_free_list (list);
 
/* Select MAX Speed possible from the cam */
if (dc1394->camera->bmode_capable>0) {
dc1394_video_set_operation_mode(dc1394->camera, DC1394_OPERATION_MODE_1394B);
i = DC1394_ISO_SPEED_800;
} else {
i = DC1394_ISO_SPEED_400;
}
 
for (res = DC1394_FAILURE; i >= DC1394_ISO_SPEED_MIN && res != DC1394_SUCCESS; i--) {
res=dc1394_video_set_iso_speed(dc1394->camera, i);
}
if (res != DC1394_SUCCESS) {
av_log(c, AV_LOG_ERROR, "Couldn't set ISO Speed\n");
goto out_camera;
}
 
if (dc1394_video_set_mode(dc1394->camera, fmt->frame_size_id) != DC1394_SUCCESS) {
av_log(c, AV_LOG_ERROR, "Couldn't set video format\n");
goto out_camera;
}
 
if (dc1394_video_set_framerate(dc1394->camera,fps->frame_rate_id) != DC1394_SUCCESS) {
av_log(c, AV_LOG_ERROR, "Couldn't set framerate %d \n",fps->frame_rate);
goto out_camera;
}
if (dc1394_capture_setup(dc1394->camera, 10, DC1394_CAPTURE_FLAGS_DEFAULT)!=DC1394_SUCCESS) {
av_log(c, AV_LOG_ERROR, "Cannot setup camera \n");
goto out_camera;
}
 
if (dc1394_video_set_transmission(dc1394->camera, DC1394_ON) !=DC1394_SUCCESS) {
av_log(c, AV_LOG_ERROR, "Cannot start capture\n");
goto out_camera;
}
return 0;
 
out_camera:
dc1394_capture_stop(dc1394->camera);
dc1394_video_set_transmission(dc1394->camera, DC1394_OFF);
dc1394_camera_free (dc1394->camera);
out:
dc1394_free(dc1394->d);
return -1;
}
 
static int dc1394_v2_read_packet(AVFormatContext *c, AVPacket *pkt)
{
struct dc1394_data *dc1394 = c->priv_data;
int res;
 
/* discard stale frame */
if (dc1394->current_frame++) {
if (dc1394_capture_enqueue(dc1394->camera, dc1394->frame) != DC1394_SUCCESS)
av_log(c, AV_LOG_ERROR, "failed to release %d frame\n", dc1394->current_frame);
}
 
res = dc1394_capture_dequeue(dc1394->camera, DC1394_CAPTURE_POLICY_WAIT, &dc1394->frame);
if (res == DC1394_SUCCESS) {
dc1394->packet.data = (uint8_t *) dc1394->frame->image;
dc1394->packet.pts = dc1394->current_frame * 1000000 / dc1394->frame_rate;
res = dc1394->frame->image_bytes;
} else {
av_log(c, AV_LOG_ERROR, "DMA capture failed\n");
dc1394->packet.data = NULL;
res = -1;
}
 
*pkt = dc1394->packet;
return res;
}
 
static int dc1394_v2_close(AVFormatContext * context)
{
struct dc1394_data *dc1394 = context->priv_data;
 
dc1394_video_set_transmission(dc1394->camera, DC1394_OFF);
dc1394_capture_stop(dc1394->camera);
dc1394_camera_free(dc1394->camera);
dc1394_free(dc1394->d);
 
return 0;
}
 
AVInputFormat ff_libdc1394_demuxer = {
.name = "libdc1394",
.long_name = NULL_IF_CONFIG_SMALL("dc1394 v.2 A/V grab"),
.priv_data_size = sizeof(struct dc1394_data),
.read_header = dc1394_v2_read_header,
.read_packet = dc1394_v2_read_packet,
.read_close = dc1394_v2_close,
.flags = AVFMT_NOFILE,
.priv_class = &libdc1394_class,
};
 
#endif
#if HAVE_LIBDC1394_1
AVInputFormat ff_libdc1394_demuxer = {
.name = "libdc1394",
.long_name = NULL_IF_CONFIG_SMALL("dc1394 v.1 A/V grab"),
.priv_data_size = sizeof(struct dc1394_data),
.read_header = dc1394_v1_read_header,
.read_packet = dc1394_v1_read_packet,
.read_close = dc1394_v1_close,
.flags = AVFMT_NOFILE,
.priv_class = &libdc1394_class,
};
#endif
/contrib/sdk/sources/ffmpeg/libavdevice/openal-dec.c
0,0 → 1,252
/*
* Copyright (c) 2011 Jonathan Baldwin
*
* This file is part of FFmpeg.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
* INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
* LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
 
/**
* @file
* OpenAL 1.1 capture device for libavdevice
**/
 
#include <AL/al.h>
#include <AL/alc.h>
 
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include "libavformat/internal.h"
#include "avdevice.h"
 
typedef struct {
AVClass *class;
/** OpenAL capture device context. **/
ALCdevice *device;
/** The number of channels in the captured audio. **/
int channels;
/** The sample rate (in Hz) of the captured audio. **/
int sample_rate;
/** The sample size (in bits) of the captured audio. **/
int sample_size;
/** The OpenAL sample format of the captured audio. **/
ALCenum sample_format;
/** The number of bytes between two consecutive samples of the same channel/component. **/
ALCint sample_step;
/** If true, print a list of capture devices on this system and exit. **/
int list_devices;
} al_data;
 
typedef struct {
ALCenum al_fmt;
enum AVCodecID codec_id;
int channels;
} al_format_info;
 
#define LOWEST_AL_FORMAT FFMIN(FFMIN(AL_FORMAT_MONO8,AL_FORMAT_MONO16),FFMIN(AL_FORMAT_STEREO8,AL_FORMAT_STEREO16))
 
/**
* Get information about an AL_FORMAT value.
* @param al_fmt the AL_FORMAT value to find information about.
* @return A pointer to a structure containing information about the AL_FORMAT value.
*/
static inline al_format_info* get_al_format_info(ALCenum al_fmt)
{
static al_format_info info_table[] = {
[AL_FORMAT_MONO8-LOWEST_AL_FORMAT] = {AL_FORMAT_MONO8, AV_CODEC_ID_PCM_U8, 1},
[AL_FORMAT_MONO16-LOWEST_AL_FORMAT] = {AL_FORMAT_MONO16, AV_NE (AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE), 1},
[AL_FORMAT_STEREO8-LOWEST_AL_FORMAT] = {AL_FORMAT_STEREO8, AV_CODEC_ID_PCM_U8, 2},
[AL_FORMAT_STEREO16-LOWEST_AL_FORMAT] = {AL_FORMAT_STEREO16, AV_NE (AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE), 2},
};
 
return &info_table[al_fmt-LOWEST_AL_FORMAT];
}
 
/**
* Get the OpenAL error code, translated into an av/errno error code.
* @param device The ALC device to check for errors.
* @param error_msg_ret A pointer to a char* in which to return the error message, or NULL if desired.
* @return The error code, or 0 if there is no error.
*/
static inline int al_get_error(ALCdevice *device, const char** error_msg_ret)
{
ALCenum error = alcGetError(device);
if (error_msg_ret)
*error_msg_ret = (const char*) alcGetString(device, error);
switch (error) {
case ALC_NO_ERROR:
return 0;
case ALC_INVALID_DEVICE:
return AVERROR(ENODEV);
break;
case ALC_INVALID_CONTEXT:
case ALC_INVALID_ENUM:
case ALC_INVALID_VALUE:
return AVERROR(EINVAL);
break;
case ALC_OUT_OF_MEMORY:
return AVERROR(ENOMEM);
break;
default:
return AVERROR(EIO);
}
}
 
/**
* Print out a list of OpenAL capture devices on this system.
*/
static inline void print_al_capture_devices(void *log_ctx)
{
const char *devices;
 
if (!(devices = alcGetString(NULL, ALC_CAPTURE_DEVICE_SPECIFIER)))
return;
 
av_log(log_ctx, AV_LOG_INFO, "List of OpenAL capture devices on this system:\n");
 
for (; *devices != '\0'; devices += strlen(devices) + 1)
av_log(log_ctx, AV_LOG_INFO, " %s\n", devices);
}
 
static int read_header(AVFormatContext *ctx)
{
al_data *ad = ctx->priv_data;
static const ALCenum sample_formats[2][2] = {
{ AL_FORMAT_MONO8, AL_FORMAT_STEREO8 },
{ AL_FORMAT_MONO16, AL_FORMAT_STEREO16 }
};
int error = 0;
const char *error_msg;
AVStream *st = NULL;
AVCodecContext *codec = NULL;
 
if (ad->list_devices) {
print_al_capture_devices(ctx);
return AVERROR_EXIT;
}
 
ad->sample_format = sample_formats[ad->sample_size/8-1][ad->channels-1];
 
/* Open device for capture */
ad->device =
alcCaptureOpenDevice(ctx->filename[0] ? ctx->filename : NULL,
ad->sample_rate,
ad->sample_format,
ad->sample_rate); /* Maximum 1 second of sample data to be read at once */
 
if (error = al_get_error(ad->device, &error_msg)) goto fail;
 
/* Create stream */
if (!(st = avformat_new_stream(ctx, NULL))) {
error = AVERROR(ENOMEM);
goto fail;
}
 
/* We work in microseconds */
avpriv_set_pts_info(st, 64, 1, 1000000);
 
/* Set codec parameters */
codec = st->codec;
codec->codec_type = AVMEDIA_TYPE_AUDIO;
codec->sample_rate = ad->sample_rate;
codec->channels = get_al_format_info(ad->sample_format)->channels;
codec->codec_id = get_al_format_info(ad->sample_format)->codec_id;
 
/* This is needed to read the audio data */
ad->sample_step = (av_get_bits_per_sample(get_al_format_info(ad->sample_format)->codec_id) *
get_al_format_info(ad->sample_format)->channels) / 8;
 
/* Finally, start the capture process */
alcCaptureStart(ad->device);
 
return 0;
 
fail:
/* Handle failure */
if (ad->device)
alcCaptureCloseDevice(ad->device);
if (error_msg)
av_log(ctx, AV_LOG_ERROR, "Cannot open device: %s\n", error_msg);
return error;
}
 
static int read_packet(AVFormatContext* ctx, AVPacket *pkt)
{
al_data *ad = ctx->priv_data;
int error=0;
const char *error_msg;
ALCint nb_samples;
 
/* Get number of samples available */
alcGetIntegerv(ad->device, ALC_CAPTURE_SAMPLES, (ALCsizei) sizeof(ALCint), &nb_samples);
if (error = al_get_error(ad->device, &error_msg)) goto fail;
 
/* Create a packet of appropriate size */
av_new_packet(pkt, nb_samples*ad->sample_step);
pkt->pts = av_gettime();
 
/* Fill the packet with the available samples */
alcCaptureSamples(ad->device, pkt->data, nb_samples);
if (error = al_get_error(ad->device, &error_msg)) goto fail;
 
return pkt->size;
fail:
/* Handle failure */
if (pkt->data)
av_destruct_packet(pkt);
if (error_msg)
av_log(ctx, AV_LOG_ERROR, "Error: %s\n", error_msg);
return error;
}
 
static int read_close(AVFormatContext* ctx)
{
al_data *ad = ctx->priv_data;
 
if (ad->device) {
alcCaptureStop(ad->device);
alcCaptureCloseDevice(ad->device);
}
return 0;
}
 
#define OFFSET(x) offsetof(al_data, x)
 
static const AVOption options[] = {
{"channels", "set number of channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, AV_OPT_FLAG_DECODING_PARAM },
{"sample_rate", "set sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64=44100}, 1, 192000, AV_OPT_FLAG_DECODING_PARAM },
{"sample_size", "set sample size", OFFSET(sample_size), AV_OPT_TYPE_INT, {.i64=16}, 8, 16, AV_OPT_FLAG_DECODING_PARAM },
{"list_devices", "list available devices", OFFSET(list_devices), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM, "list_devices" },
{"true", "", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "list_devices" },
{"false", "", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "list_devices" },
{NULL},
};
 
static const AVClass class = {
.class_name = "openal",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT
};
 
AVInputFormat ff_openal_demuxer = {
.name = "openal",
.long_name = NULL_IF_CONFIG_SMALL("OpenAL audio capture device"),
.priv_data_size = sizeof(al_data),
.read_probe = NULL,
.read_header = read_header,
.read_packet = read_packet,
.read_close = read_close,
.flags = AVFMT_NOFILE,
.priv_class = &class
};
/contrib/sdk/sources/ffmpeg/libavdevice/oss_audio.c
0,0 → 1,328
/*
* Linux audio play and grab interface
* Copyright (c) 2000, 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "config.h"
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <errno.h>
#if HAVE_SOUNDCARD_H
#include <soundcard.h>
#else
#include <sys/soundcard.h>
#endif
#include <unistd.h>
#include <fcntl.h>
#include <sys/ioctl.h>
 
#include "libavutil/internal.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include "libavcodec/avcodec.h"
#include "avdevice.h"
#include "libavformat/internal.h"
 
#define AUDIO_BLOCK_SIZE 4096
 
typedef struct {
AVClass *class;
int fd;
int sample_rate;
int channels;
int frame_size; /* in bytes ! */
enum AVCodecID codec_id;
unsigned int flip_left : 1;
uint8_t buffer[AUDIO_BLOCK_SIZE];
int buffer_ptr;
} AudioData;
 
static int audio_open(AVFormatContext *s1, int is_output, const char *audio_device)
{
AudioData *s = s1->priv_data;
int audio_fd;
int tmp, err;
char *flip = getenv("AUDIO_FLIP_LEFT");
 
if (is_output)
audio_fd = avpriv_open(audio_device, O_WRONLY);
else
audio_fd = avpriv_open(audio_device, O_RDONLY);
if (audio_fd < 0) {
av_log(s1, AV_LOG_ERROR, "%s: %s\n", audio_device, strerror(errno));
return AVERROR(EIO);
}
 
if (flip && *flip == '1') {
s->flip_left = 1;
}
 
/* non blocking mode */
if (!is_output) {
if (fcntl(audio_fd, F_SETFL, O_NONBLOCK) < 0) {
av_log(s1, AV_LOG_WARNING, "%s: Could not enable non block mode (%s)\n", audio_device, strerror(errno));
}
}
 
s->frame_size = AUDIO_BLOCK_SIZE;
 
/* select format : favour native format */
err = ioctl(audio_fd, SNDCTL_DSP_GETFMTS, &tmp);
 
#if HAVE_BIGENDIAN
if (tmp & AFMT_S16_BE) {
tmp = AFMT_S16_BE;
} else if (tmp & AFMT_S16_LE) {
tmp = AFMT_S16_LE;
} else {
tmp = 0;
}
#else
if (tmp & AFMT_S16_LE) {
tmp = AFMT_S16_LE;
} else if (tmp & AFMT_S16_BE) {
tmp = AFMT_S16_BE;
} else {
tmp = 0;
}
#endif
 
switch(tmp) {
case AFMT_S16_LE:
s->codec_id = AV_CODEC_ID_PCM_S16LE;
break;
case AFMT_S16_BE:
s->codec_id = AV_CODEC_ID_PCM_S16BE;
break;
default:
av_log(s1, AV_LOG_ERROR, "Soundcard does not support 16 bit sample format\n");
close(audio_fd);
return AVERROR(EIO);
}
err=ioctl(audio_fd, SNDCTL_DSP_SETFMT, &tmp);
if (err < 0) {
av_log(s1, AV_LOG_ERROR, "SNDCTL_DSP_SETFMT: %s\n", strerror(errno));
goto fail;
}
 
tmp = (s->channels == 2);
err = ioctl(audio_fd, SNDCTL_DSP_STEREO, &tmp);
if (err < 0) {
av_log(s1, AV_LOG_ERROR, "SNDCTL_DSP_STEREO: %s\n", strerror(errno));
goto fail;
}
 
tmp = s->sample_rate;
err = ioctl(audio_fd, SNDCTL_DSP_SPEED, &tmp);
if (err < 0) {
av_log(s1, AV_LOG_ERROR, "SNDCTL_DSP_SPEED: %s\n", strerror(errno));
goto fail;
}
s->sample_rate = tmp; /* store real sample rate */
s->fd = audio_fd;
 
return 0;
fail:
close(audio_fd);
return AVERROR(EIO);
}
 
static int audio_close(AudioData *s)
{
close(s->fd);
return 0;
}
 
/* sound output support */
static int audio_write_header(AVFormatContext *s1)
{
AudioData *s = s1->priv_data;
AVStream *st;
int ret;
 
st = s1->streams[0];
s->sample_rate = st->codec->sample_rate;
s->channels = st->codec->channels;
ret = audio_open(s1, 1, s1->filename);
if (ret < 0) {
return AVERROR(EIO);
} else {
return 0;
}
}
 
static int audio_write_packet(AVFormatContext *s1, AVPacket *pkt)
{
AudioData *s = s1->priv_data;
int len, ret;
int size= pkt->size;
uint8_t *buf= pkt->data;
 
while (size > 0) {
len = FFMIN(AUDIO_BLOCK_SIZE - s->buffer_ptr, size);
memcpy(s->buffer + s->buffer_ptr, buf, len);
s->buffer_ptr += len;
if (s->buffer_ptr >= AUDIO_BLOCK_SIZE) {
for(;;) {
ret = write(s->fd, s->buffer, AUDIO_BLOCK_SIZE);
if (ret > 0)
break;
if (ret < 0 && (errno != EAGAIN && errno != EINTR))
return AVERROR(EIO);
}
s->buffer_ptr = 0;
}
buf += len;
size -= len;
}
return 0;
}
 
static int audio_write_trailer(AVFormatContext *s1)
{
AudioData *s = s1->priv_data;
 
audio_close(s);
return 0;
}
 
/* grab support */
 
static int audio_read_header(AVFormatContext *s1)
{
AudioData *s = s1->priv_data;
AVStream *st;
int ret;
 
st = avformat_new_stream(s1, NULL);
if (!st) {
return AVERROR(ENOMEM);
}
 
ret = audio_open(s1, 0, s1->filename);
if (ret < 0) {
return AVERROR(EIO);
}
 
/* take real parameters */
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = s->codec_id;
st->codec->sample_rate = s->sample_rate;
st->codec->channels = s->channels;
 
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
return 0;
}
 
static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt)
{
AudioData *s = s1->priv_data;
int ret, bdelay;
int64_t cur_time;
struct audio_buf_info abufi;
 
if ((ret=av_new_packet(pkt, s->frame_size)) < 0)
return ret;
 
ret = read(s->fd, pkt->data, pkt->size);
if (ret <= 0){
av_free_packet(pkt);
pkt->size = 0;
if (ret<0) return AVERROR(errno);
else return AVERROR_EOF;
}
pkt->size = ret;
 
/* compute pts of the start of the packet */
cur_time = av_gettime();
bdelay = ret;
if (ioctl(s->fd, SNDCTL_DSP_GETISPACE, &abufi) == 0) {
bdelay += abufi.bytes;
}
/* subtract time represented by the number of bytes in the audio fifo */
cur_time -= (bdelay * 1000000LL) / (s->sample_rate * s->channels);
 
/* convert to wanted units */
pkt->pts = cur_time;
 
if (s->flip_left && s->channels == 2) {
int i;
short *p = (short *) pkt->data;
 
for (i = 0; i < ret; i += 4) {
*p = ~*p;
p += 2;
}
}
return 0;
}
 
static int audio_read_close(AVFormatContext *s1)
{
AudioData *s = s1->priv_data;
 
audio_close(s);
return 0;
}
 
#if CONFIG_OSS_INDEV
static const AVOption options[] = {
{ "sample_rate", "", offsetof(AudioData, sample_rate), AV_OPT_TYPE_INT, {.i64 = 48000}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ "channels", "", offsetof(AudioData, channels), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
 
static const AVClass oss_demuxer_class = {
.class_name = "OSS demuxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVInputFormat ff_oss_demuxer = {
.name = "oss",
.long_name = NULL_IF_CONFIG_SMALL("OSS (Open Sound System) capture"),
.priv_data_size = sizeof(AudioData),
.read_header = audio_read_header,
.read_packet = audio_read_packet,
.read_close = audio_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &oss_demuxer_class,
};
#endif
 
#if CONFIG_OSS_OUTDEV
AVOutputFormat ff_oss_muxer = {
.name = "oss",
.long_name = NULL_IF_CONFIG_SMALL("OSS (Open Sound System) playback"),
.priv_data_size = sizeof(AudioData),
/* XXX: we make the assumption that the soundcard accepts this format */
/* XXX: find better solution with "preinit" method, needed also in
other formats */
.audio_codec = AV_NE(AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE),
.video_codec = AV_CODEC_ID_NONE,
.write_header = audio_write_header,
.write_packet = audio_write_packet,
.write_trailer = audio_write_trailer,
.flags = AVFMT_NOFILE,
};
#endif
/contrib/sdk/sources/ffmpeg/libavdevice/pulse_audio_common.c
0,0 → 1,42
/*
* Pulseaudio input
* Copyright (c) 2011 Luca Barbato <lu_zero@gentoo.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/attributes.h"
#include "libavcodec/avcodec.h"
#include "pulse_audio_common.h"
 
pa_sample_format_t av_cold codec_id_to_pulse_format(int codec_id)
{
switch (codec_id) {
case AV_CODEC_ID_PCM_U8: return PA_SAMPLE_U8;
case AV_CODEC_ID_PCM_ALAW: return PA_SAMPLE_ALAW;
case AV_CODEC_ID_PCM_MULAW: return PA_SAMPLE_ULAW;
case AV_CODEC_ID_PCM_S16LE: return PA_SAMPLE_S16LE;
case AV_CODEC_ID_PCM_S16BE: return PA_SAMPLE_S16BE;
case AV_CODEC_ID_PCM_F32LE: return PA_SAMPLE_FLOAT32LE;
case AV_CODEC_ID_PCM_F32BE: return PA_SAMPLE_FLOAT32BE;
case AV_CODEC_ID_PCM_S32LE: return PA_SAMPLE_S32LE;
case AV_CODEC_ID_PCM_S32BE: return PA_SAMPLE_S32BE;
case AV_CODEC_ID_PCM_S24LE: return PA_SAMPLE_S24LE;
case AV_CODEC_ID_PCM_S24BE: return PA_SAMPLE_S24BE;
default: return PA_SAMPLE_INVALID;
}
}
/contrib/sdk/sources/ffmpeg/libavdevice/pulse_audio_common.h
0,0 → 1,29
/*
* Pulseaudio input
* Copyright (c) 2011 Luca Barbato <lu_zero@gentoo.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVDEVICE_PULSE_AUDIO_COMMON_H
#define AVDEVICE_PULSE_AUDIO_COMMON_H
 
#include <pulse/simple.h>
 
pa_sample_format_t codec_id_to_pulse_format(int codec_id);
 
#endif /* AVDEVICE_PULSE_AUDIO_COMMON_H */
/contrib/sdk/sources/ffmpeg/libavdevice/pulse_audio_dec.c
0,0 → 1,173
/*
* Pulseaudio input
* Copyright (c) 2011 Luca Barbato <lu_zero@gentoo.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* PulseAudio input using the simple API.
* @author Luca Barbato <lu_zero@gentoo.org>
*/
 
#include <pulse/simple.h>
#include <pulse/rtclock.h>
#include <pulse/error.h>
#include "libavformat/avformat.h"
#include "libavformat/internal.h"
#include "libavutil/opt.h"
#include "pulse_audio_common.h"
 
#define DEFAULT_CODEC_ID AV_NE(AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE)
 
typedef struct PulseData {
AVClass *class;
char *server;
char *name;
char *stream_name;
int sample_rate;
int channels;
int frame_size;
int fragment_size;
pa_simple *s;
int64_t pts;
int64_t frame_duration;
} PulseData;
 
static av_cold int pulse_read_header(AVFormatContext *s)
{
PulseData *pd = s->priv_data;
AVStream *st;
char *device = NULL;
int ret;
enum AVCodecID codec_id =
s->audio_codec_id == AV_CODEC_ID_NONE ? DEFAULT_CODEC_ID : s->audio_codec_id;
const pa_sample_spec ss = { codec_id_to_pulse_format(codec_id),
pd->sample_rate,
pd->channels };
 
pa_buffer_attr attr = { -1 };
 
st = avformat_new_stream(s, NULL);
 
if (!st) {
av_log(s, AV_LOG_ERROR, "Cannot add stream\n");
return AVERROR(ENOMEM);
}
 
attr.fragsize = pd->fragment_size;
 
if (strcmp(s->filename, "default"))
device = s->filename;
 
pd->s = pa_simple_new(pd->server, pd->name,
PA_STREAM_RECORD,
device, pd->stream_name, &ss,
NULL, &attr, &ret);
 
if (!pd->s) {
av_log(s, AV_LOG_ERROR, "pa_simple_new failed: %s\n",
pa_strerror(ret));
return AVERROR(EIO);
}
/* take real parameters */
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = codec_id;
st->codec->sample_rate = pd->sample_rate;
st->codec->channels = pd->channels;
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
 
pd->pts = AV_NOPTS_VALUE;
pd->frame_duration = (pd->frame_size * 1000000LL * 8) /
(pd->sample_rate * pd->channels * av_get_bits_per_sample(codec_id));
 
return 0;
}
 
static int pulse_read_packet(AVFormatContext *s, AVPacket *pkt)
{
PulseData *pd = s->priv_data;
int res;
pa_usec_t latency;
 
if (av_new_packet(pkt, pd->frame_size) < 0) {
return AVERROR(ENOMEM);
}
 
if ((pa_simple_read(pd->s, pkt->data, pkt->size, &res)) < 0) {
av_log(s, AV_LOG_ERROR, "pa_simple_read failed: %s\n",
pa_strerror(res));
av_free_packet(pkt);
return AVERROR(EIO);
}
 
if ((latency = pa_simple_get_latency(pd->s, &res)) == (pa_usec_t) -1) {
av_log(s, AV_LOG_ERROR, "pa_simple_get_latency() failed: %s\n",
pa_strerror(res));
return AVERROR(EIO);
}
 
if (pd->pts == AV_NOPTS_VALUE) {
pd->pts = -latency;
}
 
pkt->pts = pd->pts;
 
pd->pts += pd->frame_duration;
 
return 0;
}
 
static av_cold int pulse_close(AVFormatContext *s)
{
PulseData *pd = s->priv_data;
pa_simple_free(pd->s);
return 0;
}
 
#define OFFSET(a) offsetof(PulseData, a)
#define D AV_OPT_FLAG_DECODING_PARAM
 
static const AVOption options[] = {
{ "server", "set PulseAudio server", OFFSET(server), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, D },
{ "name", "set application name", OFFSET(name), AV_OPT_TYPE_STRING, {.str = LIBAVFORMAT_IDENT}, 0, 0, D },
{ "stream_name", "set stream description", OFFSET(stream_name), AV_OPT_TYPE_STRING, {.str = "record"}, 0, 0, D },
{ "sample_rate", "set sample rate in Hz", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = 48000}, 1, INT_MAX, D },
{ "channels", "set number of audio channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, D },
{ "frame_size", "set number of bytes per frame", OFFSET(frame_size), AV_OPT_TYPE_INT, {.i64 = 1024}, 1, INT_MAX, D },
{ "fragment_size", "set buffering size, affects latency and cpu usage", OFFSET(fragment_size), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, D },
{ NULL },
};
 
static const AVClass pulse_demuxer_class = {
.class_name = "Pulse demuxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVInputFormat ff_pulse_demuxer = {
.name = "pulse",
.long_name = NULL_IF_CONFIG_SMALL("Pulse audio input"),
.priv_data_size = sizeof(PulseData),
.read_header = pulse_read_header,
.read_packet = pulse_read_packet,
.read_close = pulse_close,
.flags = AVFMT_NOFILE,
.priv_class = &pulse_demuxer_class,
};
/contrib/sdk/sources/ffmpeg/libavdevice/pulse_audio_enc.c
0,0 → 1,167
/*
* Copyright (c) 2013 Lukasz Marek <lukasz.m.luki@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <pulse/simple.h>
#include <pulse/error.h>
#include "libavformat/avformat.h"
#include "libavformat/internal.h"
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include "libavutil/log.h"
#include "pulse_audio_common.h"
 
typedef struct PulseData {
AVClass *class;
const char *server;
const char *name;
const char *stream_name;
const char *device;
pa_simple *pa;
int64_t timestamp;
} PulseData;
 
static av_cold int pulse_write_header(AVFormatContext *h)
{
PulseData *s = h->priv_data;
AVStream *st = NULL;
int ret;
pa_sample_spec ss;
pa_buffer_attr attr = { -1, -1, -1, -1, -1 };
const char *stream_name = s->stream_name;
 
if (h->nb_streams != 1 || h->streams[0]->codec->codec_type != AVMEDIA_TYPE_AUDIO) {
av_log(s, AV_LOG_ERROR, "Only a single audio stream is supported.\n");
return AVERROR(EINVAL);
}
st = h->streams[0];
 
if (!stream_name) {
if (h->filename[0])
stream_name = h->filename;
else
stream_name = "Playback";
}
 
ss.format = codec_id_to_pulse_format(st->codec->codec_id);
ss.rate = st->codec->sample_rate;
ss.channels = st->codec->channels;
 
s->pa = pa_simple_new(s->server, // Server
s->name, // Application name
PA_STREAM_PLAYBACK,
s->device, // Device
stream_name, // Description of a stream
&ss, // Sample format
NULL, // Use default channel map
&attr, // Buffering attributes
&ret); // Result
 
if (!s->pa) {
av_log(s, AV_LOG_ERROR, "pa_simple_new failed: %s\n", pa_strerror(ret));
return AVERROR(EIO);
}
 
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
 
return 0;
}
 
static av_cold int pulse_write_trailer(AVFormatContext *h)
{
PulseData *s = h->priv_data;
pa_simple_flush(s->pa, NULL);
pa_simple_free(s->pa);
s->pa = NULL;
return 0;
}
 
static int pulse_write_packet(AVFormatContext *h, AVPacket *pkt)
{
PulseData *s = h->priv_data;
int error;
 
if (!pkt) {
if (pa_simple_flush(s->pa, &error) < 0) {
av_log(s, AV_LOG_ERROR, "pa_simple_flush failed: %s\n", pa_strerror(error));
return AVERROR(EIO);
}
return 0;
}
 
if (pkt->dts != AV_NOPTS_VALUE)
s->timestamp = pkt->dts;
 
if (pkt->duration) {
s->timestamp += pkt->duration;
} else {
AVStream *st = h->streams[0];
AVCodecContext *codec_ctx = st->codec;
AVRational r = { 1, codec_ctx->sample_rate };
int64_t samples = pkt->size / (av_get_bytes_per_sample(codec_ctx->sample_fmt) * codec_ctx->channels);
s->timestamp += av_rescale_q(samples, r, st->time_base);
}
 
if (pa_simple_write(s->pa, pkt->data, pkt->size, &error) < 0) {
av_log(s, AV_LOG_ERROR, "pa_simple_write failed: %s\n", pa_strerror(error));
return AVERROR(EIO);
}
 
return 0;
}
 
static void pulse_get_output_timestamp(AVFormatContext *h, int stream, int64_t *dts, int64_t *wall)
{
PulseData *s = h->priv_data;
pa_usec_t latency = pa_simple_get_latency(s->pa, NULL);
*wall = av_gettime();
*dts = s->timestamp - latency;
}
 
#define OFFSET(a) offsetof(PulseData, a)
#define E AV_OPT_FLAG_ENCODING_PARAM
 
static const AVOption options[] = {
{ "server", "set PulseAudio server", OFFSET(server), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E },
{ "name", "set application name", OFFSET(name), AV_OPT_TYPE_STRING, {.str = LIBAVFORMAT_IDENT}, 0, 0, E },
{ "stream_name", "set stream description", OFFSET(stream_name), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E },
{ "device", "set device name", OFFSET(device), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E },
{ NULL }
};
 
static const AVClass pulse_muxer_class = {
.class_name = "Pulse muxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVOutputFormat ff_pulse_muxer = {
.name = "pulse",
.long_name = NULL_IF_CONFIG_SMALL("Pulse audio output"),
.priv_data_size = sizeof(PulseData),
.audio_codec = AV_NE(AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE),
.video_codec = AV_CODEC_ID_NONE,
.write_header = pulse_write_header,
.write_packet = pulse_write_packet,
.write_trailer = pulse_write_trailer,
.get_output_timestamp = pulse_get_output_timestamp,
.flags = AVFMT_NOFILE | AVFMT_ALLOW_FLUSH,
.priv_class = &pulse_muxer_class,
};
/contrib/sdk/sources/ffmpeg/libavdevice/sdl.c
0,0 → 1,236
/*
* Copyright (c) 2011 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* libSDL output device
*/
 
#include <SDL.h>
#include "libavutil/avstring.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
#include "avdevice.h"
 
typedef struct {
AVClass *class;
SDL_Surface *surface;
SDL_Overlay *overlay;
char *window_title;
char *icon_title;
int window_width, window_height; /**< size of the window */
int window_fullscreen;
int overlay_width, overlay_height; /**< size of the video in the window */
int overlay_x, overlay_y;
int overlay_fmt;
int sdl_was_already_inited;
} SDLContext;
 
static const struct sdl_overlay_pix_fmt_entry {
enum AVPixelFormat pix_fmt; int overlay_fmt;
} sdl_overlay_pix_fmt_map[] = {
{ AV_PIX_FMT_YUV420P, SDL_IYUV_OVERLAY },
{ AV_PIX_FMT_YUYV422, SDL_YUY2_OVERLAY },
{ AV_PIX_FMT_UYVY422, SDL_UYVY_OVERLAY },
{ AV_PIX_FMT_NONE, 0 },
};
 
static int sdl_write_trailer(AVFormatContext *s)
{
SDLContext *sdl = s->priv_data;
 
av_freep(&sdl->window_title);
av_freep(&sdl->icon_title);
 
if (sdl->overlay) {
SDL_FreeYUVOverlay(sdl->overlay);
sdl->overlay = NULL;
}
if (!sdl->sdl_was_already_inited)
SDL_Quit();
 
return 0;
}
 
static int sdl_write_header(AVFormatContext *s)
{
SDLContext *sdl = s->priv_data;
AVStream *st = s->streams[0];
AVCodecContext *encctx = st->codec;
AVRational sar, dar; /* sample and display aspect ratios */
int i, ret;
int flags = SDL_SWSURFACE | sdl->window_fullscreen ? SDL_FULLSCREEN : 0;
 
if (!sdl->window_title)
sdl->window_title = av_strdup(s->filename);
if (!sdl->icon_title)
sdl->icon_title = av_strdup(sdl->window_title);
 
if (SDL_WasInit(SDL_INIT_VIDEO)) {
av_log(s, AV_LOG_ERROR,
"SDL video subsystem was already inited, aborting\n");
sdl->sdl_was_already_inited = 1;
ret = AVERROR(EINVAL);
goto fail;
}
 
if (SDL_Init(SDL_INIT_VIDEO) != 0) {
av_log(s, AV_LOG_ERROR, "Unable to initialize SDL: %s\n", SDL_GetError());
ret = AVERROR(EINVAL);
goto fail;
}
 
if ( s->nb_streams > 1
|| encctx->codec_type != AVMEDIA_TYPE_VIDEO
|| encctx->codec_id != AV_CODEC_ID_RAWVIDEO) {
av_log(s, AV_LOG_ERROR, "Only supports one rawvideo stream\n");
ret = AVERROR(EINVAL);
goto fail;
}
 
for (i = 0; sdl_overlay_pix_fmt_map[i].pix_fmt != AV_PIX_FMT_NONE; i++) {
if (sdl_overlay_pix_fmt_map[i].pix_fmt == encctx->pix_fmt) {
sdl->overlay_fmt = sdl_overlay_pix_fmt_map[i].overlay_fmt;
break;
}
}
 
if (!sdl->overlay_fmt) {
av_log(s, AV_LOG_ERROR,
"Unsupported pixel format '%s', choose one of yuv420p, yuyv422, or uyvy422\n",
av_get_pix_fmt_name(encctx->pix_fmt));
ret = AVERROR(EINVAL);
goto fail;
}
 
/* compute overlay width and height from the codec context information */
sar = st->sample_aspect_ratio.num ? st->sample_aspect_ratio : (AVRational){ 1, 1 };
dar = av_mul_q(sar, (AVRational){ encctx->width, encctx->height });
 
/* we suppose the screen has a 1/1 sample aspect ratio */
if (sdl->window_width && sdl->window_height) {
/* fit in the window */
if (av_cmp_q(dar, (AVRational){ sdl->window_width, sdl->window_height }) > 0) {
/* fit in width */
sdl->overlay_width = sdl->window_width;
sdl->overlay_height = av_rescale(sdl->overlay_width, dar.den, dar.num);
} else {
/* fit in height */
sdl->overlay_height = sdl->window_height;
sdl->overlay_width = av_rescale(sdl->overlay_height, dar.num, dar.den);
}
} else {
if (sar.num > sar.den) {
sdl->overlay_width = encctx->width;
sdl->overlay_height = av_rescale(sdl->overlay_width, dar.den, dar.num);
} else {
sdl->overlay_height = encctx->height;
sdl->overlay_width = av_rescale(sdl->overlay_height, dar.num, dar.den);
}
sdl->window_width = sdl->overlay_width;
sdl->window_height = sdl->overlay_height;
}
sdl->overlay_x = (sdl->window_width - sdl->overlay_width ) / 2;
sdl->overlay_y = (sdl->window_height - sdl->overlay_height) / 2;
 
SDL_WM_SetCaption(sdl->window_title, sdl->icon_title);
sdl->surface = SDL_SetVideoMode(sdl->window_width, sdl->window_height,
24, flags);
if (!sdl->surface) {
av_log(s, AV_LOG_ERROR, "Unable to set video mode: %s\n", SDL_GetError());
ret = AVERROR(EINVAL);
goto fail;
}
 
sdl->overlay = SDL_CreateYUVOverlay(encctx->width, encctx->height,
sdl->overlay_fmt, sdl->surface);
if (!sdl->overlay || sdl->overlay->pitches[0] < encctx->width) {
av_log(s, AV_LOG_ERROR,
"SDL does not support an overlay with size of %dx%d pixels\n",
encctx->width, encctx->height);
ret = AVERROR(EINVAL);
goto fail;
}
 
av_log(s, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s sar:%d/%d -> w:%d h:%d\n",
encctx->width, encctx->height, av_get_pix_fmt_name(encctx->pix_fmt), sar.num, sar.den,
sdl->overlay_width, sdl->overlay_height);
return 0;
 
fail:
sdl_write_trailer(s);
return ret;
}
 
static int sdl_write_packet(AVFormatContext *s, AVPacket *pkt)
{
SDLContext *sdl = s->priv_data;
AVCodecContext *encctx = s->streams[0]->codec;
SDL_Rect rect = { sdl->overlay_x, sdl->overlay_y, sdl->overlay_width, sdl->overlay_height };
AVPicture pict;
int i;
 
avpicture_fill(&pict, pkt->data, encctx->pix_fmt, encctx->width, encctx->height);
 
SDL_FillRect(sdl->surface, &sdl->surface->clip_rect,
SDL_MapRGB(sdl->surface->format, 0, 0, 0));
SDL_LockYUVOverlay(sdl->overlay);
for (i = 0; i < 3; i++) {
sdl->overlay->pixels [i] = pict.data [i];
sdl->overlay->pitches[i] = pict.linesize[i];
}
SDL_DisplayYUVOverlay(sdl->overlay, &rect);
SDL_UnlockYUVOverlay(sdl->overlay);
 
SDL_UpdateRect(sdl->surface, rect.x, rect.y, rect.w, rect.h);
 
return 0;
}
 
#define OFFSET(x) offsetof(SDLContext,x)
 
static const AVOption options[] = {
{ "window_title", "set SDL window title", OFFSET(window_title), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
{ "icon_title", "set SDL iconified window title", OFFSET(icon_title) , AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
{ "window_size", "set SDL window forced size", OFFSET(window_width), AV_OPT_TYPE_IMAGE_SIZE,{.str=NULL}, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
{ "window_fullscreen", "set SDL window fullscreen", OFFSET(window_fullscreen), AV_OPT_TYPE_INT,{.i64=0}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
{ NULL },
};
 
static const AVClass sdl_class = {
.class_name = "sdl outdev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVOutputFormat ff_sdl_muxer = {
.name = "sdl",
.long_name = NULL_IF_CONFIG_SMALL("SDL output device"),
.priv_data_size = sizeof(SDLContext),
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_RAWVIDEO,
.write_header = sdl_write_header,
.write_packet = sdl_write_packet,
.write_trailer = sdl_write_trailer,
.flags = AVFMT_NOFILE | AVFMT_VARIABLE_FPS | AVFMT_NOTIMESTAMPS,
.priv_class = &sdl_class,
};
/contrib/sdk/sources/ffmpeg/libavdevice/sndio_common.c
0,0 → 1,120
/*
* sndio play and grab interface
* Copyright (c) 2010 Jacob Meuser
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <stdint.h>
#include <sndio.h>
 
#include "avdevice.h"
 
#include "sndio_common.h"
 
static inline void movecb(void *addr, int delta)
{
SndioData *s = addr;
 
s->hwpos += delta * s->channels * s->bps;
}
 
av_cold int ff_sndio_open(AVFormatContext *s1, int is_output,
const char *audio_device)
{
SndioData *s = s1->priv_data;
struct sio_hdl *hdl;
struct sio_par par;
 
hdl = sio_open(audio_device, is_output ? SIO_PLAY : SIO_REC, 0);
if (!hdl) {
av_log(s1, AV_LOG_ERROR, "Could not open sndio device\n");
return AVERROR(EIO);
}
 
sio_initpar(&par);
 
par.bits = 16;
par.sig = 1;
par.le = SIO_LE_NATIVE;
 
if (is_output)
par.pchan = s->channels;
else
par.rchan = s->channels;
par.rate = s->sample_rate;
 
if (!sio_setpar(hdl, &par) || !sio_getpar(hdl, &par)) {
av_log(s1, AV_LOG_ERROR, "Impossible to set sndio parameters, "
"channels: %d sample rate: %d\n", s->channels, s->sample_rate);
goto fail;
}
 
if (par.bits != 16 || par.sig != 1 ||
(is_output && (par.pchan != s->channels)) ||
(!is_output && (par.rchan != s->channels)) ||
(par.rate != s->sample_rate)) {
av_log(s1, AV_LOG_ERROR, "Could not set appropriate sndio parameters, "
"channels: %d sample rate: %d\n", s->channels, s->sample_rate);
goto fail;
}
 
s->buffer_size = par.round * par.bps *
(is_output ? par.pchan : par.rchan);
 
if (is_output) {
s->buffer = av_malloc(s->buffer_size);
if (!s->buffer) {
av_log(s1, AV_LOG_ERROR, "Could not allocate buffer\n");
goto fail;
}
}
 
s->codec_id = par.le ? AV_CODEC_ID_PCM_S16LE : AV_CODEC_ID_PCM_S16BE;
s->channels = is_output ? par.pchan : par.rchan;
s->sample_rate = par.rate;
s->bps = par.bps;
 
sio_onmove(hdl, movecb, s);
 
if (!sio_start(hdl)) {
av_log(s1, AV_LOG_ERROR, "Could not start sndio\n");
goto fail;
}
 
s->hdl = hdl;
 
return 0;
 
fail:
av_freep(&s->buffer);
 
if (hdl)
sio_close(hdl);
 
return AVERROR(EIO);
}
 
int ff_sndio_close(SndioData *s)
{
av_freep(&s->buffer);
 
if (s->hdl)
sio_close(s->hdl);
 
return 0;
}
/contrib/sdk/sources/ffmpeg/libavdevice/sndio_common.h
0,0 → 1,48
/*
* sndio play and grab interface
* Copyright (c) 2010 Jacob Meuser
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVDEVICE_SNDIO_COMMON_H
#define AVDEVICE_SNDIO_COMMON_H
 
#include <stdint.h>
#include <sndio.h>
 
#include "libavutil/log.h"
#include "avdevice.h"
 
typedef struct SndioData {
AVClass *class;
struct sio_hdl *hdl;
enum AVCodecID codec_id;
int64_t hwpos;
int64_t softpos;
uint8_t *buffer;
int bps;
int buffer_size;
int buffer_offset;
int channels;
int sample_rate;
} SndioData;
 
int ff_sndio_open(AVFormatContext *s1, int is_output, const char *audio_device);
int ff_sndio_close(SndioData *s);
 
#endif /* AVDEVICE_SNDIO_COMMON_H */
/contrib/sdk/sources/ffmpeg/libavdevice/sndio_dec.c
0,0 → 1,118
/*
* sndio play and grab interface
* Copyright (c) 2010 Jacob Meuser
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <stdint.h>
#include <sndio.h>
 
#include "libavformat/avformat.h"
#include "libavformat/internal.h"
#include "libavutil/opt.h"
#include "libavutil/time.h"
 
#include "sndio_common.h"
 
static av_cold int audio_read_header(AVFormatContext *s1)
{
SndioData *s = s1->priv_data;
AVStream *st;
int ret;
 
st = avformat_new_stream(s1, NULL);
if (!st)
return AVERROR(ENOMEM);
 
ret = ff_sndio_open(s1, 0, s1->filename);
if (ret < 0)
return ret;
 
/* take real parameters */
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = s->codec_id;
st->codec->sample_rate = s->sample_rate;
st->codec->channels = s->channels;
 
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
 
return 0;
}
 
static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt)
{
SndioData *s = s1->priv_data;
int64_t bdelay, cur_time;
int ret;
 
if ((ret = av_new_packet(pkt, s->buffer_size)) < 0)
return ret;
 
ret = sio_read(s->hdl, pkt->data, pkt->size);
if (ret == 0 || sio_eof(s->hdl)) {
av_free_packet(pkt);
return AVERROR_EOF;
}
 
pkt->size = ret;
s->softpos += ret;
 
/* compute pts of the start of the packet */
cur_time = av_gettime();
 
bdelay = ret + s->hwpos - s->softpos;
 
/* convert to pts */
pkt->pts = cur_time - ((bdelay * 1000000) /
(s->bps * s->channels * s->sample_rate));
 
return 0;
}
 
static av_cold int audio_read_close(AVFormatContext *s1)
{
SndioData *s = s1->priv_data;
 
ff_sndio_close(s);
 
return 0;
}
 
static const AVOption options[] = {
{ "sample_rate", "", offsetof(SndioData, sample_rate), AV_OPT_TYPE_INT, {.i64 = 48000}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ "channels", "", offsetof(SndioData, channels), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
 
static const AVClass sndio_demuxer_class = {
.class_name = "sndio indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVInputFormat ff_sndio_demuxer = {
.name = "sndio",
.long_name = NULL_IF_CONFIG_SMALL("sndio audio capture"),
.priv_data_size = sizeof(SndioData),
.read_header = audio_read_header,
.read_packet = audio_read_packet,
.read_close = audio_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &sndio_demuxer_class,
};
/contrib/sdk/sources/ffmpeg/libavdevice/sndio_enc.c
0,0 → 1,92
/*
* sndio play and grab interface
* Copyright (c) 2010 Jacob Meuser
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include <stdint.h>
#include <sndio.h>
 
#include "avdevice.h"
#include "sndio_common.h"
 
static av_cold int audio_write_header(AVFormatContext *s1)
{
SndioData *s = s1->priv_data;
AVStream *st;
int ret;
 
st = s1->streams[0];
s->sample_rate = st->codec->sample_rate;
s->channels = st->codec->channels;
 
ret = ff_sndio_open(s1, 1, s1->filename);
 
return ret;
}
 
static int audio_write_packet(AVFormatContext *s1, AVPacket *pkt)
{
SndioData *s = s1->priv_data;
uint8_t *buf= pkt->data;
int size = pkt->size;
int len, ret;
 
while (size > 0) {
len = FFMIN(s->buffer_size - s->buffer_offset, size);
memcpy(s->buffer + s->buffer_offset, buf, len);
buf += len;
size -= len;
s->buffer_offset += len;
if (s->buffer_offset >= s->buffer_size) {
ret = sio_write(s->hdl, s->buffer, s->buffer_size);
if (ret == 0 || sio_eof(s->hdl))
return AVERROR(EIO);
s->softpos += ret;
s->buffer_offset = 0;
}
}
 
return 0;
}
 
static int audio_write_trailer(AVFormatContext *s1)
{
SndioData *s = s1->priv_data;
 
sio_write(s->hdl, s->buffer, s->buffer_offset);
 
ff_sndio_close(s);
 
return 0;
}
 
AVOutputFormat ff_sndio_muxer = {
.name = "sndio",
.long_name = NULL_IF_CONFIG_SMALL("sndio audio playback"),
.priv_data_size = sizeof(SndioData),
/* XXX: we make the assumption that the soundcard accepts this format */
/* XXX: find better solution with "preinit" method, needed also in
other formats */
.audio_codec = AV_NE(AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE),
.video_codec = AV_CODEC_ID_NONE,
.write_header = audio_write_header,
.write_packet = audio_write_packet,
.write_trailer = audio_write_trailer,
.flags = AVFMT_NOFILE,
};
/contrib/sdk/sources/ffmpeg/libavdevice/timefilter.c
0,0 → 1,168
/*
* Delay Locked Loop based time filter
* Copyright (c) 2009 Samalyse
* Copyright (c) 2009 Michael Niedermayer
* Author: Olivier Guilyardi <olivier samalyse com>
* Michael Niedermayer <michaelni gmx at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavutil/common.h"
#include "libavutil/mem.h"
#include "config.h"
#include "timefilter.h"
 
struct TimeFilter {
// Delay Locked Loop data. These variables refer to mathematical
// concepts described in: http://www.kokkinizita.net/papers/usingdll.pdf
double cycle_time;
double feedback2_factor;
double feedback3_factor;
double clock_period;
int count;
};
 
/* 1 - exp(-x) using a 3-order power series */
static double qexpneg(double x)
{
return 1 - 1 / (1 + x * (1 + x / 2 * (1 + x / 3)));
}
 
TimeFilter *ff_timefilter_new(double time_base,
double period,
double bandwidth)
{
TimeFilter *self = av_mallocz(sizeof(TimeFilter));
double o = 2 * M_PI * bandwidth * period * time_base;
 
if (!self)
return NULL;
 
self->clock_period = time_base;
self->feedback2_factor = qexpneg(M_SQRT2 * o);
self->feedback3_factor = qexpneg(o * o) / period;
return self;
}
 
void ff_timefilter_destroy(TimeFilter *self)
{
av_freep(&self);
}
 
void ff_timefilter_reset(TimeFilter *self)
{
self->count = 0;
}
 
double ff_timefilter_update(TimeFilter *self, double system_time, double period)
{
self->count++;
if (self->count == 1) {
self->cycle_time = system_time;
} else {
double loop_error;
self->cycle_time += self->clock_period * period;
loop_error = system_time - self->cycle_time;
 
self->cycle_time += FFMAX(self->feedback2_factor, 1.0 / self->count) * loop_error;
self->clock_period += self->feedback3_factor * loop_error;
}
return self->cycle_time;
}
 
double ff_timefilter_eval(TimeFilter *self, double delta)
{
return self->cycle_time + self->clock_period * delta;
}
 
#ifdef TEST
#include "libavutil/lfg.h"
#define LFG_MAX ((1LL << 32) - 1)
 
int main(void)
{
AVLFG prng;
double n0, n1;
#define SAMPLES 1000
double ideal[SAMPLES];
double samples[SAMPLES];
double samplet[SAMPLES];
for (n0 = 0; n0 < 40; n0 = 2 * n0 + 1) {
for (n1 = 0; n1 < 10; n1 = 2 * n1 + 1) {
double best_error = 1000000000;
double bestpar0 = n0 ? 1 : 100000;
double bestpar1 = 1;
int better, i;
 
av_lfg_init(&prng, 123);
for (i = 0; i < SAMPLES; i++) {
samplet[i] = 10 + i + (av_lfg_get(&prng) < LFG_MAX/2 ? 0 : 0.999);
ideal[i] = samplet[i] + n1 * i / (1000);
samples[i] = ideal[i] + n0 * (av_lfg_get(&prng) - LFG_MAX / 2) / (LFG_MAX * 10LL);
if(i && samples[i]<samples[i-1])
samples[i]=samples[i-1]+0.001;
}
 
do {
double par0, par1;
better = 0;
for (par0 = bestpar0 * 0.8; par0 <= bestpar0 * 1.21; par0 += bestpar0 * 0.05) {
for (par1 = bestpar1 * 0.8; par1 <= bestpar1 * 1.21; par1 += bestpar1 * 0.05) {
double error = 0;
TimeFilter *tf = ff_timefilter_new(1, par0, par1);
if (!tf) {
printf("Could not alocate memory for timefilter.\n");
exit(1);
}
for (i = 0; i < SAMPLES; i++) {
double filtered;
filtered = ff_timefilter_update(tf, samples[i], i ? (samplet[i] - samplet[i-1]) : 1);
if(filtered < 0 || filtered > 1000000000)
printf("filter is unstable\n");
error += (filtered - ideal[i]) * (filtered - ideal[i]);
}
ff_timefilter_destroy(tf);
if (error < best_error) {
best_error = error;
bestpar0 = par0;
bestpar1 = par1;
better = 1;
}
}
}
} while (better);
#if 0
double lastfil = 9;
TimeFilter *tf = ff_timefilter_new(1, bestpar0, bestpar1);
for (i = 0; i < SAMPLES; i++) {
double filtered;
filtered = ff_timefilter_update(tf, samples[i], 1);
printf("%f %f %f %f\n", i - samples[i] + 10, filtered - samples[i],
samples[FFMAX(i, 1)] - samples[FFMAX(i - 1, 0)], filtered - lastfil);
lastfil = filtered;
}
ff_timefilter_destroy(tf);
#else
printf(" [%12f %11f %9f]", bestpar0, bestpar1, best_error);
#endif
}
printf("\n");
}
return 0;
}
#endif
/contrib/sdk/sources/ffmpeg/libavdevice/timefilter.h
0,0 → 1,110
/*
* Delay Locked Loop based time filter prototypes and declarations
* Copyright (c) 2009 Samalyse
* Copyright (c) 2009 Michael Niedermayer
* Author: Olivier Guilyardi <olivier samalyse com>
* Michael Niedermayer <michaelni gmx at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVDEVICE_TIMEFILTER_H
#define AVDEVICE_TIMEFILTER_H
 
/**
* Opaque type representing a time filter state
*
* The purpose of this filter is to provide a way to compute accurate time
* stamps that can be compared to wall clock time, especially when dealing
* with two clocks: the system clock and a hardware device clock, such as
* a soundcard.
*/
typedef struct TimeFilter TimeFilter;
 
 
/**
* Create a new Delay Locked Loop time filter
*
* feedback2_factor and feedback3_factor are the factors used for the
* multiplications that are respectively performed in the second and third
* feedback paths of the loop.
*
* Unless you know what you are doing, you should set these as follow:
*
* o = 2 * M_PI * bandwidth * period_in_seconds
* feedback2_factor = sqrt(2) * o
* feedback3_factor = o * o
*
* Where bandwidth is up to you to choose. Smaller values will filter out more
* of the jitter, but also take a longer time for the loop to settle. A good
* starting point is something between 0.3 and 3 Hz.
*
* @param time_base period of the hardware clock in seconds
* (for example 1.0/44100)
* @param period expected update interval, in input units
* @param brandwidth filtering bandwidth, in Hz
*
* @return a pointer to a TimeFilter struct, or NULL on error
*
* For more details about these parameters and background concepts please see:
* http://www.kokkinizita.net/papers/usingdll.pdf
*/
TimeFilter * ff_timefilter_new(double clock_period, double feedback2_factor, double feedback3_factor);
 
/**
* Update the filter
*
* This function must be called in real time, at each process cycle.
*
* @param period the device cycle duration in clock_periods. For example, at
* 44.1kHz and a buffer size of 512 frames, period = 512 when clock_period
* was 1.0/44100, or 512/44100 if clock_period was 1.
*
* system_time, in seconds, should be the value of the system clock time,
* at (or as close as possible to) the moment the device hardware interrupt
* occurred (or any other event the device clock raises at the beginning of a
* cycle).
*
* @return the filtered time, in seconds
*/
double ff_timefilter_update(TimeFilter *self, double system_time, double period);
 
/**
* Evaluate the filter at a specified time
*
* @param delta difference between the requested time and the current time
* (last call to ff_timefilter_update).
* @return the filtered time
*/
double ff_timefilter_eval(TimeFilter *self, double delta);
 
/**
* Reset the filter
*
* This function should mainly be called in case of XRUN.
*
* Warning: after calling this, the filter is in an undetermined state until
* the next call to ff_timefilter_update()
*/
void ff_timefilter_reset(TimeFilter *);
 
/**
* Free all resources associated with the filter
*/
void ff_timefilter_destroy(TimeFilter *);
 
#endif /* AVDEVICE_TIMEFILTER_H */
/contrib/sdk/sources/ffmpeg/libavdevice/v4l.c
0,0 → 1,362
/*
* Linux video grab interface
* Copyright (c) 2000,2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "avdevice.h"
 
#undef __STRICT_ANSI__ //workaround due to broken kernel headers
#include "config.h"
#include "libavutil/rational.h"
#include "libavutil/imgutils.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavformat/internal.h"
#include "libavcodec/dsputil.h"
#include <unistd.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/time.h>
#define _LINUX_TIME_H 1
#include <linux/videodev.h>
#include <time.h>
 
typedef struct {
AVClass *class;
int fd;
int frame_format; /* see VIDEO_PALETTE_xxx */
int use_mmap;
AVRational time_base;
int64_t time_frame;
int frame_size;
struct video_capability video_cap;
struct video_audio audio_saved;
struct video_window video_win;
uint8_t *video_buf;
struct video_mbuf gb_buffers;
struct video_mmap gb_buf;
int gb_frame;
int standard;
} VideoData;
 
static const struct {
int palette;
int depth;
enum AVPixelFormat pix_fmt;
} video_formats [] = {
{.palette = VIDEO_PALETTE_YUV420P, .depth = 12, .pix_fmt = AV_PIX_FMT_YUV420P },
{.palette = VIDEO_PALETTE_YUV422, .depth = 16, .pix_fmt = AV_PIX_FMT_YUYV422 },
{.palette = VIDEO_PALETTE_UYVY, .depth = 16, .pix_fmt = AV_PIX_FMT_UYVY422 },
{.palette = VIDEO_PALETTE_YUYV, .depth = 16, .pix_fmt = AV_PIX_FMT_YUYV422 },
/* NOTE: v4l uses BGR24, not RGB24 */
{.palette = VIDEO_PALETTE_RGB24, .depth = 24, .pix_fmt = AV_PIX_FMT_BGR24 },
{.palette = VIDEO_PALETTE_RGB565, .depth = 16, .pix_fmt = AV_PIX_FMT_BGR565 },
{.palette = VIDEO_PALETTE_GREY, .depth = 8, .pix_fmt = AV_PIX_FMT_GRAY8 },
};
 
 
static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
{
VideoData *s = s1->priv_data;
AVStream *st;
int video_fd;
int desired_palette, desired_depth;
struct video_tuner tuner;
struct video_audio audio;
struct video_picture pict;
int j;
int vformat_num = FF_ARRAY_ELEMS(video_formats);
 
av_log(s1, AV_LOG_WARNING, "V4L input device is deprecated and will be removed in the next release.");
 
if (ap->time_base.den <= 0) {
av_log(s1, AV_LOG_ERROR, "Wrong time base (%d)\n", ap->time_base.den);
return -1;
}
s->time_base = ap->time_base;
 
s->video_win.width = ap->width;
s->video_win.height = ap->height;
 
st = avformat_new_stream(s1, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
 
video_fd = open(s1->filename, O_RDWR);
if (video_fd < 0) {
av_log(s1, AV_LOG_ERROR, "%s: %s\n", s1->filename, strerror(errno));
goto fail;
}
 
if (ioctl(video_fd, VIDIOCGCAP, &s->video_cap) < 0) {
av_log(s1, AV_LOG_ERROR, "VIDIOCGCAP: %s\n", strerror(errno));
goto fail;
}
 
if (!(s->video_cap.type & VID_TYPE_CAPTURE)) {
av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not handle capture\n");
goto fail;
}
 
/* no values set, autodetect them */
if (s->video_win.width <= 0 || s->video_win.height <= 0) {
if (ioctl(video_fd, VIDIOCGWIN, &s->video_win, sizeof(s->video_win)) < 0) {
av_log(s1, AV_LOG_ERROR, "VIDIOCGWIN: %s\n", strerror(errno));
goto fail;
}
}
 
if(av_image_check_size(s->video_win.width, s->video_win.height, 0, s1) < 0)
return -1;
 
desired_palette = -1;
desired_depth = -1;
for (j = 0; j < vformat_num; j++) {
if (ap->pix_fmt == video_formats[j].pix_fmt) {
desired_palette = video_formats[j].palette;
desired_depth = video_formats[j].depth;
break;
}
}
 
/* set tv standard */
if (!ioctl(video_fd, VIDIOCGTUNER, &tuner)) {
tuner.mode = s->standard;
ioctl(video_fd, VIDIOCSTUNER, &tuner);
}
 
/* unmute audio */
audio.audio = 0;
ioctl(video_fd, VIDIOCGAUDIO, &audio);
memcpy(&s->audio_saved, &audio, sizeof(audio));
audio.flags &= ~VIDEO_AUDIO_MUTE;
ioctl(video_fd, VIDIOCSAUDIO, &audio);
 
ioctl(video_fd, VIDIOCGPICT, &pict);
av_dlog(s1, "v4l: colour=%d hue=%d brightness=%d constrast=%d whiteness=%d\n",
pict.colour, pict.hue, pict.brightness, pict.contrast, pict.whiteness);
/* try to choose a suitable video format */
pict.palette = desired_palette;
pict.depth= desired_depth;
if (desired_palette == -1 || ioctl(video_fd, VIDIOCSPICT, &pict) < 0) {
for (j = 0; j < vformat_num; j++) {
pict.palette = video_formats[j].palette;
pict.depth = video_formats[j].depth;
if (-1 != ioctl(video_fd, VIDIOCSPICT, &pict))
break;
}
if (j >= vformat_num)
goto fail1;
}
 
if (ioctl(video_fd, VIDIOCGMBUF, &s->gb_buffers) < 0) {
/* try to use read based access */
int val;
 
s->video_win.x = 0;
s->video_win.y = 0;
s->video_win.chromakey = -1;
s->video_win.flags = 0;
 
if (ioctl(video_fd, VIDIOCSWIN, s->video_win) < 0) {
av_log(s1, AV_LOG_ERROR, "VIDIOCSWIN: %s\n", strerror(errno));
goto fail;
}
 
s->frame_format = pict.palette;
 
val = 1;
if (ioctl(video_fd, VIDIOCCAPTURE, &val) < 0) {
av_log(s1, AV_LOG_ERROR, "VIDIOCCAPTURE: %s\n", strerror(errno));
goto fail;
}
 
s->time_frame = av_gettime() * s->time_base.den / s->time_base.num;
s->use_mmap = 0;
} else {
s->video_buf = mmap(0, s->gb_buffers.size, PROT_READ|PROT_WRITE, MAP_SHARED, video_fd, 0);
if ((unsigned char*)-1 == s->video_buf) {
s->video_buf = mmap(0, s->gb_buffers.size, PROT_READ|PROT_WRITE, MAP_PRIVATE, video_fd, 0);
if ((unsigned char*)-1 == s->video_buf) {
av_log(s1, AV_LOG_ERROR, "mmap: %s\n", strerror(errno));
goto fail;
}
}
s->gb_frame = 0;
s->time_frame = av_gettime() * s->time_base.den / s->time_base.num;
 
/* start to grab the first frame */
s->gb_buf.frame = s->gb_frame % s->gb_buffers.frames;
s->gb_buf.height = s->video_win.height;
s->gb_buf.width = s->video_win.width;
s->gb_buf.format = pict.palette;
 
if (ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf) < 0) {
if (errno != EAGAIN) {
fail1:
av_log(s1, AV_LOG_ERROR, "VIDIOCMCAPTURE: %s\n", strerror(errno));
} else {
av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not receive any video signal\n");
}
goto fail;
}
for (j = 1; j < s->gb_buffers.frames; j++) {
s->gb_buf.frame = j;
ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf);
}
s->frame_format = s->gb_buf.format;
s->use_mmap = 1;
}
 
for (j = 0; j < vformat_num; j++) {
if (s->frame_format == video_formats[j].palette) {
s->frame_size = s->video_win.width * s->video_win.height * video_formats[j].depth / 8;
st->codec->pix_fmt = video_formats[j].pix_fmt;
break;
}
}
 
if (j >= vformat_num)
goto fail;
 
s->fd = video_fd;
 
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
st->codec->width = s->video_win.width;
st->codec->height = s->video_win.height;
st->codec->time_base = s->time_base;
st->codec->bit_rate = s->frame_size * 1/av_q2d(st->codec->time_base) * 8;
 
return 0;
fail:
if (video_fd >= 0)
close(video_fd);
return AVERROR(EIO);
}
 
static int v4l_mm_read_picture(VideoData *s, uint8_t *buf)
{
uint8_t *ptr;
 
while (ioctl(s->fd, VIDIOCSYNC, &s->gb_frame) < 0 &&
(errno == EAGAIN || errno == EINTR));
 
ptr = s->video_buf + s->gb_buffers.offsets[s->gb_frame];
memcpy(buf, ptr, s->frame_size);
 
/* Setup to capture the next frame */
s->gb_buf.frame = s->gb_frame;
if (ioctl(s->fd, VIDIOCMCAPTURE, &s->gb_buf) < 0) {
if (errno == EAGAIN)
av_log(NULL, AV_LOG_ERROR, "Cannot Sync\n");
else
av_log(NULL, AV_LOG_ERROR, "VIDIOCMCAPTURE: %s\n", strerror(errno));
return AVERROR(EIO);
}
 
/* This is now the grabbing frame */
s->gb_frame = (s->gb_frame + 1) % s->gb_buffers.frames;
 
return s->frame_size;
}
 
static int grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
{
VideoData *s = s1->priv_data;
int64_t curtime, delay;
struct timespec ts;
 
/* Calculate the time of the next frame */
s->time_frame += INT64_C(1000000);
 
/* wait based on the frame rate */
for(;;) {
curtime = av_gettime();
delay = s->time_frame * s->time_base.num / s->time_base.den - curtime;
if (delay <= 0) {
if (delay < INT64_C(-1000000) * s->time_base.num / s->time_base.den) {
/* printf("grabbing is %d frames late (dropping)\n", (int) -(delay / 16666)); */
s->time_frame += INT64_C(1000000);
}
break;
}
ts.tv_sec = delay / 1000000;
ts.tv_nsec = (delay % 1000000) * 1000;
nanosleep(&ts, NULL);
}
 
if (av_new_packet(pkt, s->frame_size) < 0)
return AVERROR(EIO);
 
pkt->pts = curtime;
 
/* read one frame */
if (s->use_mmap) {
return v4l_mm_read_picture(s, pkt->data);
} else {
if (read(s->fd, pkt->data, pkt->size) != pkt->size)
return AVERROR(EIO);
return s->frame_size;
}
}
 
static int grab_read_close(AVFormatContext *s1)
{
VideoData *s = s1->priv_data;
 
if (s->use_mmap)
munmap(s->video_buf, s->gb_buffers.size);
 
/* mute audio. we must force it because the BTTV driver does not
return its state correctly */
s->audio_saved.flags |= VIDEO_AUDIO_MUTE;
ioctl(s->fd, VIDIOCSAUDIO, &s->audio_saved);
 
close(s->fd);
return 0;
}
 
static const AVOption options[] = {
{ "standard", "", offsetof(VideoData, standard), AV_OPT_TYPE_INT, {.i64 = VIDEO_MODE_NTSC}, VIDEO_MODE_PAL, VIDEO_MODE_NTSC, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "PAL", "", 0, AV_OPT_TYPE_CONST, {.i64 = VIDEO_MODE_PAL}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "SECAM", "", 0, AV_OPT_TYPE_CONST, {.i64 = VIDEO_MODE_SECAM}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "NTSC", "", 0, AV_OPT_TYPE_CONST, {.i64 = VIDEO_MODE_NTSC}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ NULL },
};
 
static const AVClass v4l_class = {
.class_name = "V4L indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVInputFormat ff_v4l_demuxer = {
.name = "video4linux,v4l",
.long_name = NULL_IF_CONFIG_SMALL("Video4Linux device grab"),
.priv_data_size = sizeof(VideoData),
.read_header = grab_read_header,
.read_packet = grab_read_packet,
.read_close = grab_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &v4l_class,
};
/contrib/sdk/sources/ffmpeg/libavdevice/v4l2-common.c
0,0 → 1,96
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "v4l2-common.h"
 
const struct fmt_map avpriv_fmt_conversion_table[] = {
//ff_fmt codec_id v4l2_fmt
{ AV_PIX_FMT_YUV420P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV420 },
{ AV_PIX_FMT_YUV420P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YVU420 },
{ AV_PIX_FMT_YUV422P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV422P },
{ AV_PIX_FMT_YUYV422, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUYV },
{ AV_PIX_FMT_UYVY422, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_UYVY },
{ AV_PIX_FMT_YUV411P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV411P },
{ AV_PIX_FMT_YUV410P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV410 },
{ AV_PIX_FMT_YUV410P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YVU410 },
{ AV_PIX_FMT_RGB555LE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB555 },
{ AV_PIX_FMT_RGB555BE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB555X },
{ AV_PIX_FMT_RGB565LE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB565 },
{ AV_PIX_FMT_RGB565BE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB565X },
{ AV_PIX_FMT_BGR24, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_BGR24 },
{ AV_PIX_FMT_RGB24, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB24 },
{ AV_PIX_FMT_BGR0, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_BGR32 },
{ AV_PIX_FMT_0RGB, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB32 },
{ AV_PIX_FMT_GRAY8, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_GREY },
#ifdef V4L2_PIX_FMT_Y16
{ AV_PIX_FMT_GRAY16LE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_Y16 },
#endif
{ AV_PIX_FMT_NV12, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_NV12 },
{ AV_PIX_FMT_NONE, AV_CODEC_ID_MJPEG, V4L2_PIX_FMT_MJPEG },
{ AV_PIX_FMT_NONE, AV_CODEC_ID_MJPEG, V4L2_PIX_FMT_JPEG },
#ifdef V4L2_PIX_FMT_H264
{ AV_PIX_FMT_NONE, AV_CODEC_ID_H264, V4L2_PIX_FMT_H264 },
#endif
#ifdef V4L2_PIX_FMT_CPIA1
{ AV_PIX_FMT_NONE, AV_CODEC_ID_CPIA, V4L2_PIX_FMT_CPIA1 },
#endif
{ AV_PIX_FMT_NONE, AV_CODEC_ID_NONE, 0 },
};
 
uint32_t avpriv_fmt_ff2v4l(enum AVPixelFormat pix_fmt, enum AVCodecID codec_id)
{
int i;
 
for (i = 0; avpriv_fmt_conversion_table[i].codec_id != AV_CODEC_ID_NONE; i++) {
if ((codec_id == AV_CODEC_ID_NONE ||
avpriv_fmt_conversion_table[i].codec_id == codec_id) &&
(pix_fmt == AV_PIX_FMT_NONE ||
avpriv_fmt_conversion_table[i].ff_fmt == pix_fmt)) {
return avpriv_fmt_conversion_table[i].v4l2_fmt;
}
}
 
return 0;
}
 
enum AVPixelFormat avpriv_fmt_v4l2ff(uint32_t v4l2_fmt, enum AVCodecID codec_id)
{
int i;
 
for (i = 0; avpriv_fmt_conversion_table[i].codec_id != AV_CODEC_ID_NONE; i++) {
if (avpriv_fmt_conversion_table[i].v4l2_fmt == v4l2_fmt &&
avpriv_fmt_conversion_table[i].codec_id == codec_id) {
return avpriv_fmt_conversion_table[i].ff_fmt;
}
}
 
return AV_PIX_FMT_NONE;
}
 
enum AVCodecID avpriv_fmt_v4l2codec(uint32_t v4l2_fmt)
{
int i;
 
for (i = 0; avpriv_fmt_conversion_table[i].codec_id != AV_CODEC_ID_NONE; i++) {
if (avpriv_fmt_conversion_table[i].v4l2_fmt == v4l2_fmt) {
return avpriv_fmt_conversion_table[i].codec_id;
}
}
 
return AV_CODEC_ID_NONE;
}
/contrib/sdk/sources/ffmpeg/libavdevice/v4l2-common.h
0,0 → 1,62
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVDEVICE_V4L2_COMMON_H
#define AVDEVICE_V4L2_COMMON_H
 
#undef __STRICT_ANSI__ //workaround due to broken kernel headers
#include "config.h"
#include "libavformat/internal.h"
#include <unistd.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/time.h>
#if HAVE_SYS_VIDEOIO_H
#include <sys/videoio.h>
#else
#if HAVE_ASM_TYPES_H
#include <asm/types.h>
#endif
#include <linux/videodev2.h>
#endif
#include "libavutil/atomic.h"
#include "libavutil/avassert.h"
#include "libavutil/imgutils.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "avdevice.h"
#include "timefilter.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
#include "libavutil/time.h"
#include "libavutil/avstring.h"
 
struct fmt_map {
enum AVPixelFormat ff_fmt;
enum AVCodecID codec_id;
uint32_t v4l2_fmt;
};
 
extern av_export const struct fmt_map avpriv_fmt_conversion_table[];
 
uint32_t avpriv_fmt_ff2v4l(enum AVPixelFormat pix_fmt, enum AVCodecID codec_id);
enum AVPixelFormat avpriv_fmt_v4l2ff(uint32_t v4l2_fmt, enum AVCodecID codec_id);
enum AVCodecID avpriv_fmt_v4l2codec(uint32_t v4l2_fmt);
 
#endif /* AVDEVICE_V4L2_COMMON_H */
/contrib/sdk/sources/ffmpeg/libavdevice/v4l2.c
0,0 → 1,1031
/*
* Copyright (c) 2000,2001 Fabrice Bellard
* Copyright (c) 2006 Luca Abeni
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* Video4Linux2 grab interface
*
* Part of this file is based on the V4L2 video capture example
* (http://linuxtv.org/downloads/v4l-dvb-apis/capture-example.html)
*
* Thanks to Michael Niedermayer for providing the mapping between
* V4L2_PIX_FMT_* and AV_PIX_FMT_*
*/
 
#include "v4l2-common.h"
 
#if CONFIG_LIBV4L2
#include <libv4l2.h>
#endif
 
static const int desired_video_buffers = 256;
 
#define V4L_ALLFORMATS 3
#define V4L_RAWFORMATS 1
#define V4L_COMPFORMATS 2
 
/**
* Return timestamps to the user exactly as returned by the kernel
*/
#define V4L_TS_DEFAULT 0
/**
* Autodetect the kind of timestamps returned by the kernel and convert to
* absolute (wall clock) timestamps.
*/
#define V4L_TS_ABS 1
/**
* Assume kernel timestamps are from the monotonic clock and convert to
* absolute timestamps.
*/
#define V4L_TS_MONO2ABS 2
 
/**
* Once the kind of timestamps returned by the kernel have been detected,
* the value of the timefilter (NULL or not) determines whether a conversion
* takes place.
*/
#define V4L_TS_CONVERT_READY V4L_TS_DEFAULT
 
struct video_data {
AVClass *class;
int fd;
int frame_format; /* V4L2_PIX_FMT_* */
int width, height;
int frame_size;
int interlaced;
int top_field_first;
int ts_mode;
TimeFilter *timefilter;
int64_t last_time_m;
 
int buffers;
volatile int buffers_queued;
void **buf_start;
unsigned int *buf_len;
char *standard;
v4l2_std_id std_id;
int channel;
char *pixel_format; /**< Set by a private option. */
int list_format; /**< Set by a private option. */
int list_standard; /**< Set by a private option. */
char *framerate; /**< Set by a private option. */
 
int use_libv4l2;
int (*open_f)(const char *file, int oflag, ...);
int (*close_f)(int fd);
int (*dup_f)(int fd);
int (*ioctl_f)(int fd, unsigned long int request, ...);
ssize_t (*read_f)(int fd, void *buffer, size_t n);
void *(*mmap_f)(void *start, size_t length, int prot, int flags, int fd, int64_t offset);
int (*munmap_f)(void *_start, size_t length);
};
 
struct buff_data {
struct video_data *s;
int index;
};
 
static int device_open(AVFormatContext *ctx)
{
struct video_data *s = ctx->priv_data;
struct v4l2_capability cap;
int fd;
int ret;
int flags = O_RDWR;
 
#define SET_WRAPPERS(prefix) do { \
s->open_f = prefix ## open; \
s->close_f = prefix ## close; \
s->dup_f = prefix ## dup; \
s->ioctl_f = prefix ## ioctl; \
s->read_f = prefix ## read; \
s->mmap_f = prefix ## mmap; \
s->munmap_f = prefix ## munmap; \
} while (0)
 
if (s->use_libv4l2) {
#if CONFIG_LIBV4L2
SET_WRAPPERS(v4l2_);
#else
av_log(ctx, AV_LOG_ERROR, "libavdevice is not build with libv4l2 support.\n");
return AVERROR(EINVAL);
#endif
} else {
SET_WRAPPERS();
}
 
#define v4l2_open s->open_f
#define v4l2_close s->close_f
#define v4l2_dup s->dup_f
#define v4l2_ioctl s->ioctl_f
#define v4l2_read s->read_f
#define v4l2_mmap s->mmap_f
#define v4l2_munmap s->munmap_f
 
if (ctx->flags & AVFMT_FLAG_NONBLOCK) {
flags |= O_NONBLOCK;
}
 
fd = v4l2_open(ctx->filename, flags, 0);
if (fd < 0) {
ret = AVERROR(errno);
av_log(ctx, AV_LOG_ERROR, "Cannot open video device %s: %s\n",
ctx->filename, av_err2str(ret));
return ret;
}
 
if (v4l2_ioctl(fd, VIDIOC_QUERYCAP, &cap) < 0) {
ret = AVERROR(errno);
av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QUERYCAP): %s\n",
av_err2str(ret));
goto fail;
}
 
av_log(ctx, AV_LOG_VERBOSE, "fd:%d capabilities:%x\n",
fd, cap.capabilities);
 
if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE)) {
av_log(ctx, AV_LOG_ERROR, "Not a video capture device.\n");
ret = AVERROR(ENODEV);
goto fail;
}
 
if (!(cap.capabilities & V4L2_CAP_STREAMING)) {
av_log(ctx, AV_LOG_ERROR,
"The device does not support the streaming I/O method.\n");
ret = AVERROR(ENOSYS);
goto fail;
}
 
return fd;
 
fail:
v4l2_close(fd);
return ret;
}
 
static int device_init(AVFormatContext *ctx, int *width, int *height,
uint32_t pix_fmt)
{
struct video_data *s = ctx->priv_data;
int fd = s->fd;
struct v4l2_format fmt = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE };
struct v4l2_pix_format *pix = &fmt.fmt.pix;
 
int res = 0;
 
pix->width = *width;
pix->height = *height;
pix->pixelformat = pix_fmt;
pix->field = V4L2_FIELD_ANY;
 
if (v4l2_ioctl(fd, VIDIOC_S_FMT, &fmt) < 0)
res = AVERROR(errno);
 
if ((*width != fmt.fmt.pix.width) || (*height != fmt.fmt.pix.height)) {
av_log(ctx, AV_LOG_INFO,
"The V4L2 driver changed the video from %dx%d to %dx%d\n",
*width, *height, fmt.fmt.pix.width, fmt.fmt.pix.height);
*width = fmt.fmt.pix.width;
*height = fmt.fmt.pix.height;
}
 
if (pix_fmt != fmt.fmt.pix.pixelformat) {
av_log(ctx, AV_LOG_DEBUG,
"The V4L2 driver changed the pixel format "
"from 0x%08X to 0x%08X\n",
pix_fmt, fmt.fmt.pix.pixelformat);
res = AVERROR(EINVAL);
}
 
if (fmt.fmt.pix.field == V4L2_FIELD_INTERLACED) {
av_log(ctx, AV_LOG_DEBUG,
"The V4L2 driver is using the interlaced mode\n");
s->interlaced = 1;
}
 
return res;
}
 
static int first_field(const struct video_data *s, int fd)
{
int res;
v4l2_std_id std;
 
res = v4l2_ioctl(fd, VIDIOC_G_STD, &std);
if (res < 0) {
return 0;
}
if (std & V4L2_STD_NTSC) {
return 0;
}
 
return 1;
}
 
#if HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE
static void list_framesizes(AVFormatContext *ctx, int fd, uint32_t pixelformat)
{
const struct video_data *s = ctx->priv_data;
struct v4l2_frmsizeenum vfse = { .pixel_format = pixelformat };
 
while(!v4l2_ioctl(fd, VIDIOC_ENUM_FRAMESIZES, &vfse)) {
switch (vfse.type) {
case V4L2_FRMSIZE_TYPE_DISCRETE:
av_log(ctx, AV_LOG_INFO, " %ux%u",
vfse.discrete.width, vfse.discrete.height);
break;
case V4L2_FRMSIZE_TYPE_CONTINUOUS:
case V4L2_FRMSIZE_TYPE_STEPWISE:
av_log(ctx, AV_LOG_INFO, " {%u-%u, %u}x{%u-%u, %u}",
vfse.stepwise.min_width,
vfse.stepwise.max_width,
vfse.stepwise.step_width,
vfse.stepwise.min_height,
vfse.stepwise.max_height,
vfse.stepwise.step_height);
}
vfse.index++;
}
}
#endif
 
static void list_formats(AVFormatContext *ctx, int fd, int type)
{
const struct video_data *s = ctx->priv_data;
struct v4l2_fmtdesc vfd = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE };
 
while(!v4l2_ioctl(fd, VIDIOC_ENUM_FMT, &vfd)) {
enum AVCodecID codec_id = avpriv_fmt_v4l2codec(vfd.pixelformat);
enum AVPixelFormat pix_fmt = avpriv_fmt_v4l2ff(vfd.pixelformat, codec_id);
 
vfd.index++;
 
if (!(vfd.flags & V4L2_FMT_FLAG_COMPRESSED) &&
type & V4L_RAWFORMATS) {
const char *fmt_name = av_get_pix_fmt_name(pix_fmt);
av_log(ctx, AV_LOG_INFO, "Raw : %9s : %20s :",
fmt_name ? fmt_name : "Unsupported",
vfd.description);
} else if (vfd.flags & V4L2_FMT_FLAG_COMPRESSED &&
type & V4L_COMPFORMATS) {
AVCodec *codec = avcodec_find_decoder(codec_id);
av_log(ctx, AV_LOG_INFO, "Compressed: %9s : %20s :",
codec ? codec->name : "Unsupported",
vfd.description);
} else {
continue;
}
 
#ifdef V4L2_FMT_FLAG_EMULATED
if (vfd.flags & V4L2_FMT_FLAG_EMULATED)
av_log(ctx, AV_LOG_INFO, " Emulated :");
#endif
#if HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE
list_framesizes(ctx, fd, vfd.pixelformat);
#endif
av_log(ctx, AV_LOG_INFO, "\n");
}
}
 
static void list_standards(AVFormatContext *ctx)
{
int ret;
struct video_data *s = ctx->priv_data;
struct v4l2_standard standard;
 
if (s->std_id == 0)
return;
 
for (standard.index = 0; ; standard.index++) {
if (v4l2_ioctl(s->fd, VIDIOC_ENUMSTD, &standard) < 0) {
ret = AVERROR(errno);
if (ret == AVERROR(EINVAL)) {
break;
} else {
av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_ENUMSTD): %s\n", av_err2str(ret));
return;
}
}
av_log(ctx, AV_LOG_INFO, "%2d, %16"PRIx64", %s\n",
standard.index, (uint64_t)standard.id, standard.name);
}
}
 
static int mmap_init(AVFormatContext *ctx)
{
int i, res;
struct video_data *s = ctx->priv_data;
struct v4l2_requestbuffers req = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
.count = desired_video_buffers,
.memory = V4L2_MEMORY_MMAP
};
 
if (v4l2_ioctl(s->fd, VIDIOC_REQBUFS, &req) < 0) {
res = AVERROR(errno);
av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_REQBUFS): %s\n", av_err2str(res));
return res;
}
 
if (req.count < 2) {
av_log(ctx, AV_LOG_ERROR, "Insufficient buffer memory\n");
return AVERROR(ENOMEM);
}
s->buffers = req.count;
s->buf_start = av_malloc(sizeof(void *) * s->buffers);
if (s->buf_start == NULL) {
av_log(ctx, AV_LOG_ERROR, "Cannot allocate buffer pointers\n");
return AVERROR(ENOMEM);
}
s->buf_len = av_malloc(sizeof(unsigned int) * s->buffers);
if (s->buf_len == NULL) {
av_log(ctx, AV_LOG_ERROR, "Cannot allocate buffer sizes\n");
av_free(s->buf_start);
return AVERROR(ENOMEM);
}
 
for (i = 0; i < req.count; i++) {
struct v4l2_buffer buf = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
.index = i,
.memory = V4L2_MEMORY_MMAP
};
if (v4l2_ioctl(s->fd, VIDIOC_QUERYBUF, &buf) < 0) {
res = AVERROR(errno);
av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QUERYBUF): %s\n", av_err2str(res));
return res;
}
 
s->buf_len[i] = buf.length;
if (s->frame_size > 0 && s->buf_len[i] < s->frame_size) {
av_log(ctx, AV_LOG_ERROR,
"buf_len[%d] = %d < expected frame size %d\n",
i, s->buf_len[i], s->frame_size);
return AVERROR(ENOMEM);
}
s->buf_start[i] = v4l2_mmap(NULL, buf.length,
PROT_READ | PROT_WRITE, MAP_SHARED,
s->fd, buf.m.offset);
 
if (s->buf_start[i] == MAP_FAILED) {
res = AVERROR(errno);
av_log(ctx, AV_LOG_ERROR, "mmap: %s\n", av_err2str(res));
return res;
}
}
 
return 0;
}
 
#if FF_API_DESTRUCT_PACKET
static void dummy_release_buffer(AVPacket *pkt)
{
av_assert0(0);
}
#endif
 
static void mmap_release_buffer(void *opaque, uint8_t *data)
{
struct v4l2_buffer buf = { 0 };
int res;
struct buff_data *buf_descriptor = opaque;
struct video_data *s = buf_descriptor->s;
 
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = buf_descriptor->index;
av_free(buf_descriptor);
 
if (v4l2_ioctl(s->fd, VIDIOC_QBUF, &buf) < 0) {
res = AVERROR(errno);
av_log(NULL, AV_LOG_ERROR, "ioctl(VIDIOC_QBUF): %s\n",
av_err2str(res));
}
 
avpriv_atomic_int_add_and_fetch(&s->buffers_queued, 1);
}
 
#if HAVE_CLOCK_GETTIME && defined(CLOCK_MONOTONIC)
static int64_t av_gettime_monotonic(void)
{
struct timespec tv;
 
clock_gettime(CLOCK_MONOTONIC, &tv);
return (int64_t)tv.tv_sec * 1000000 + tv.tv_nsec / 1000;
}
#endif
 
static int init_convert_timestamp(AVFormatContext *ctx, int64_t ts)
{
struct video_data *s = ctx->priv_data;
int64_t now;
 
now = av_gettime();
if (s->ts_mode == V4L_TS_ABS &&
ts <= now + 1 * AV_TIME_BASE && ts >= now - 10 * AV_TIME_BASE) {
av_log(ctx, AV_LOG_INFO, "Detected absolute timestamps\n");
s->ts_mode = V4L_TS_CONVERT_READY;
return 0;
}
#if HAVE_CLOCK_GETTIME && defined(CLOCK_MONOTONIC)
now = av_gettime_monotonic();
if (s->ts_mode == V4L_TS_MONO2ABS ||
(ts <= now + 1 * AV_TIME_BASE && ts >= now - 10 * AV_TIME_BASE)) {
AVRational tb = {AV_TIME_BASE, 1};
int64_t period = av_rescale_q(1, tb, ctx->streams[0]->avg_frame_rate);
av_log(ctx, AV_LOG_INFO, "Detected monotonic timestamps, converting\n");
/* microseconds instead of seconds, MHz instead of Hz */
s->timefilter = ff_timefilter_new(1, period, 1.0E-6);
if (!s->timefilter)
return AVERROR(ENOMEM);
s->ts_mode = V4L_TS_CONVERT_READY;
return 0;
}
#endif
av_log(ctx, AV_LOG_ERROR, "Unknown timestamps\n");
return AVERROR(EIO);
}
 
static int convert_timestamp(AVFormatContext *ctx, int64_t *ts)
{
struct video_data *s = ctx->priv_data;
 
if (s->ts_mode) {
int r = init_convert_timestamp(ctx, *ts);
if (r < 0)
return r;
}
#if HAVE_CLOCK_GETTIME && defined(CLOCK_MONOTONIC)
if (s->timefilter) {
int64_t nowa = av_gettime();
int64_t nowm = av_gettime_monotonic();
ff_timefilter_update(s->timefilter, nowa, nowm - s->last_time_m);
s->last_time_m = nowm;
*ts = ff_timefilter_eval(s->timefilter, *ts - nowm);
}
#endif
return 0;
}
 
static int mmap_read_frame(AVFormatContext *ctx, AVPacket *pkt)
{
struct video_data *s = ctx->priv_data;
struct v4l2_buffer buf = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
.memory = V4L2_MEMORY_MMAP
};
int res;
 
/* FIXME: Some special treatment might be needed in case of loss of signal... */
while ((res = v4l2_ioctl(s->fd, VIDIOC_DQBUF, &buf)) < 0 && (errno == EINTR));
if (res < 0) {
if (errno == EAGAIN) {
pkt->size = 0;
return AVERROR(EAGAIN);
}
res = AVERROR(errno);
av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_DQBUF): %s\n", av_err2str(res));
return res;
}
 
if (buf.index >= s->buffers) {
av_log(ctx, AV_LOG_ERROR, "Invalid buffer index received.\n");
return AVERROR(EINVAL);
}
avpriv_atomic_int_add_and_fetch(&s->buffers_queued, -1);
// always keep at least one buffer queued
av_assert0(avpriv_atomic_int_get(&s->buffers_queued) >= 1);
 
/* CPIA is a compressed format and we don't know the exact number of bytes
* used by a frame, so set it here as the driver announces it.
*/
if (ctx->video_codec_id == AV_CODEC_ID_CPIA)
s->frame_size = buf.bytesused;
 
if (s->frame_size > 0 && buf.bytesused != s->frame_size) {
av_log(ctx, AV_LOG_ERROR,
"The v4l2 frame is %d bytes, but %d bytes are expected\n",
buf.bytesused, s->frame_size);
return AVERROR_INVALIDDATA;
}
 
/* Image is at s->buff_start[buf.index] */
if (avpriv_atomic_int_get(&s->buffers_queued) == FFMAX(s->buffers / 8, 1)) {
/* when we start getting low on queued buffers, fall back on copying data */
res = av_new_packet(pkt, buf.bytesused);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "Error allocating a packet.\n");
if (v4l2_ioctl(s->fd, VIDIOC_QBUF, &buf) == 0)
avpriv_atomic_int_add_and_fetch(&s->buffers_queued, 1);
return res;
}
memcpy(pkt->data, s->buf_start[buf.index], buf.bytesused);
 
if (v4l2_ioctl(s->fd, VIDIOC_QBUF, &buf) < 0) {
res = AVERROR(errno);
av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QBUF): %s\n", av_err2str(res));
av_free_packet(pkt);
return res;
}
avpriv_atomic_int_add_and_fetch(&s->buffers_queued, 1);
} else {
struct buff_data *buf_descriptor;
 
pkt->data = s->buf_start[buf.index];
pkt->size = buf.bytesused;
#if FF_API_DESTRUCT_PACKET
FF_DISABLE_DEPRECATION_WARNINGS
pkt->destruct = dummy_release_buffer;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
 
buf_descriptor = av_malloc(sizeof(struct buff_data));
if (buf_descriptor == NULL) {
/* Something went wrong... Since av_malloc() failed, we cannot even
* allocate a buffer for memcpying into it
*/
av_log(ctx, AV_LOG_ERROR, "Failed to allocate a buffer descriptor\n");
if (v4l2_ioctl(s->fd, VIDIOC_QBUF, &buf) == 0)
avpriv_atomic_int_add_and_fetch(&s->buffers_queued, 1);
 
return AVERROR(ENOMEM);
}
buf_descriptor->index = buf.index;
buf_descriptor->s = s;
 
pkt->buf = av_buffer_create(pkt->data, pkt->size, mmap_release_buffer,
buf_descriptor, 0);
if (!pkt->buf) {
av_log(ctx, AV_LOG_ERROR, "Failed to create a buffer\n");
if (v4l2_ioctl(s->fd, VIDIOC_QBUF, &buf) == 0)
avpriv_atomic_int_add_and_fetch(&s->buffers_queued, 1);
av_freep(&buf_descriptor);
return AVERROR(ENOMEM);
}
}
pkt->pts = buf.timestamp.tv_sec * INT64_C(1000000) + buf.timestamp.tv_usec;
convert_timestamp(ctx, &pkt->pts);
 
return s->buf_len[buf.index];
}
 
static int mmap_start(AVFormatContext *ctx)
{
struct video_data *s = ctx->priv_data;
enum v4l2_buf_type type;
int i, res;
 
for (i = 0; i < s->buffers; i++) {
struct v4l2_buffer buf = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
.index = i,
.memory = V4L2_MEMORY_MMAP
};
 
if (v4l2_ioctl(s->fd, VIDIOC_QBUF, &buf) < 0) {
res = AVERROR(errno);
av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QBUF): %s\n", av_err2str(res));
return res;
}
}
s->buffers_queued = s->buffers;
 
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (v4l2_ioctl(s->fd, VIDIOC_STREAMON, &type) < 0) {
res = AVERROR(errno);
av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_STREAMON): %s\n", av_err2str(res));
return res;
}
 
return 0;
}
 
static void mmap_close(struct video_data *s)
{
enum v4l2_buf_type type;
int i;
 
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
/* We do not check for the result, because we could
* not do anything about it anyway...
*/
v4l2_ioctl(s->fd, VIDIOC_STREAMOFF, &type);
for (i = 0; i < s->buffers; i++) {
v4l2_munmap(s->buf_start[i], s->buf_len[i]);
}
av_free(s->buf_start);
av_free(s->buf_len);
}
 
static int v4l2_set_parameters(AVFormatContext *s1)
{
struct video_data *s = s1->priv_data;
struct v4l2_standard standard = { 0 };
struct v4l2_streamparm streamparm = { 0 };
struct v4l2_fract *tpf;
AVRational framerate_q = { 0 };
int i, ret;
 
if (s->framerate &&
(ret = av_parse_video_rate(&framerate_q, s->framerate)) < 0) {
av_log(s1, AV_LOG_ERROR, "Could not parse framerate '%s'.\n",
s->framerate);
return ret;
}
 
if (s->standard) {
if (s->std_id) {
ret = 0;
av_log(s1, AV_LOG_DEBUG, "Setting standard: %s\n", s->standard);
/* set tv standard */
for (i = 0; ; i++) {
standard.index = i;
if (v4l2_ioctl(s->fd, VIDIOC_ENUMSTD, &standard) < 0) {
ret = AVERROR(errno);
break;
}
if (!av_strcasecmp(standard.name, s->standard))
break;
}
if (ret < 0) {
av_log(s1, AV_LOG_ERROR, "Unknown or unsupported standard '%s'\n", s->standard);
return ret;
}
 
if (v4l2_ioctl(s->fd, VIDIOC_S_STD, &standard.id) < 0) {
ret = AVERROR(errno);
av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_S_STD): %s\n", av_err2str(ret));
return ret;
}
} else {
av_log(s1, AV_LOG_WARNING,
"This device does not support any standard\n");
}
}
 
/* get standard */
if (v4l2_ioctl(s->fd, VIDIOC_G_STD, &s->std_id) == 0) {
tpf = &standard.frameperiod;
for (i = 0; ; i++) {
standard.index = i;
if (v4l2_ioctl(s->fd, VIDIOC_ENUMSTD, &standard) < 0) {
ret = AVERROR(errno);
if (ret == AVERROR(EINVAL)) {
tpf = &streamparm.parm.capture.timeperframe;
break;
}
av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_ENUMSTD): %s\n", av_err2str(ret));
return ret;
}
if (standard.id == s->std_id) {
av_log(s1, AV_LOG_DEBUG,
"Current standard: %s, id: %"PRIx64", frameperiod: %d/%d\n",
standard.name, (uint64_t)standard.id, tpf->numerator, tpf->denominator);
break;
}
}
} else {
tpf = &streamparm.parm.capture.timeperframe;
}
 
streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (v4l2_ioctl(s->fd, VIDIOC_G_PARM, &streamparm) < 0) {
ret = AVERROR(errno);
av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_G_PARM): %s\n", av_err2str(ret));
return ret;
}
 
if (framerate_q.num && framerate_q.den) {
if (streamparm.parm.capture.capability & V4L2_CAP_TIMEPERFRAME) {
tpf = &streamparm.parm.capture.timeperframe;
 
av_log(s1, AV_LOG_DEBUG, "Setting time per frame to %d/%d\n",
framerate_q.den, framerate_q.num);
tpf->numerator = framerate_q.den;
tpf->denominator = framerate_q.num;
 
if (v4l2_ioctl(s->fd, VIDIOC_S_PARM, &streamparm) < 0) {
ret = AVERROR(errno);
av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_S_PARM): %s\n", av_err2str(ret));
return ret;
}
 
if (framerate_q.num != tpf->denominator ||
framerate_q.den != tpf->numerator) {
av_log(s1, AV_LOG_INFO,
"The driver changed the time per frame from "
"%d/%d to %d/%d\n",
framerate_q.den, framerate_q.num,
tpf->numerator, tpf->denominator);
}
} else {
av_log(s1, AV_LOG_WARNING,
"The driver does not allow to change time per frame\n");
}
}
s1->streams[0]->avg_frame_rate.num = tpf->denominator;
s1->streams[0]->avg_frame_rate.den = tpf->numerator;
s1->streams[0]->r_frame_rate = s1->streams[0]->avg_frame_rate;
 
return 0;
}
 
static int device_try_init(AVFormatContext *s1,
enum AVPixelFormat pix_fmt,
int *width,
int *height,
uint32_t *desired_format,
enum AVCodecID *codec_id)
{
int ret, i;
 
*desired_format = avpriv_fmt_ff2v4l(pix_fmt, s1->video_codec_id);
 
if (*desired_format) {
ret = device_init(s1, width, height, *desired_format);
if (ret < 0) {
*desired_format = 0;
if (ret != AVERROR(EINVAL))
return ret;
}
}
 
if (!*desired_format) {
for (i = 0; avpriv_fmt_conversion_table[i].codec_id != AV_CODEC_ID_NONE; i++) {
if (s1->video_codec_id == AV_CODEC_ID_NONE ||
avpriv_fmt_conversion_table[i].codec_id == s1->video_codec_id) {
av_log(s1, AV_LOG_DEBUG, "Trying to set codec:%s pix_fmt:%s\n",
avcodec_get_name(avpriv_fmt_conversion_table[i].codec_id),
(char *)av_x_if_null(av_get_pix_fmt_name(avpriv_fmt_conversion_table[i].ff_fmt), "none"));
 
*desired_format = avpriv_fmt_conversion_table[i].v4l2_fmt;
ret = device_init(s1, width, height, *desired_format);
if (ret >= 0)
break;
else if (ret != AVERROR(EINVAL))
return ret;
*desired_format = 0;
}
}
 
if (*desired_format == 0) {
av_log(s1, AV_LOG_ERROR, "Cannot find a proper format for "
"codec '%s' (id %d), pixel format '%s' (id %d)\n",
avcodec_get_name(s1->video_codec_id), s1->video_codec_id,
(char *)av_x_if_null(av_get_pix_fmt_name(pix_fmt), "none"), pix_fmt);
ret = AVERROR(EINVAL);
}
}
 
*codec_id = avpriv_fmt_v4l2codec(*desired_format);
av_assert0(*codec_id != AV_CODEC_ID_NONE);
return ret;
}
 
static int v4l2_read_header(AVFormatContext *s1)
{
struct video_data *s = s1->priv_data;
AVStream *st;
int res = 0;
uint32_t desired_format;
enum AVCodecID codec_id = AV_CODEC_ID_NONE;
enum AVPixelFormat pix_fmt = AV_PIX_FMT_NONE;
struct v4l2_input input = { 0 };
 
st = avformat_new_stream(s1, NULL);
if (!st)
return AVERROR(ENOMEM);
 
#if CONFIG_LIBV4L2
/* silence libv4l2 logging. if fopen() fails v4l2_log_file will be NULL
and errors will get sent to stderr */
if (s->use_libv4l2)
v4l2_log_file = fopen("/dev/null", "w");
#endif
 
s->fd = device_open(s1);
if (s->fd < 0)
return s->fd;
 
if (s->channel != -1) {
/* set video input */
av_log(s1, AV_LOG_DEBUG, "Selecting input_channel: %d\n", s->channel);
if (v4l2_ioctl(s->fd, VIDIOC_S_INPUT, &s->channel) < 0) {
res = AVERROR(errno);
av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_S_INPUT): %s\n", av_err2str(res));
return res;
}
} else {
/* get current video input */
if (v4l2_ioctl(s->fd, VIDIOC_G_INPUT, &s->channel) < 0) {
res = AVERROR(errno);
av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_G_INPUT): %s\n", av_err2str(res));
return res;
}
}
 
/* enum input */
input.index = s->channel;
if (v4l2_ioctl(s->fd, VIDIOC_ENUMINPUT, &input) < 0) {
res = AVERROR(errno);
av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_ENUMINPUT): %s\n", av_err2str(res));
return res;
}
s->std_id = input.std;
av_log(s1, AV_LOG_DEBUG, "Current input_channel: %d, input_name: %s, input_std: %"PRIx64"\n",
s->channel, input.name, (uint64_t)input.std);
 
if (s->list_format) {
list_formats(s1, s->fd, s->list_format);
return AVERROR_EXIT;
}
 
if (s->list_standard) {
list_standards(s1);
return AVERROR_EXIT;
}
 
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
 
if (s->pixel_format) {
AVCodec *codec = avcodec_find_decoder_by_name(s->pixel_format);
 
if (codec)
s1->video_codec_id = codec->id;
 
pix_fmt = av_get_pix_fmt(s->pixel_format);
 
if (pix_fmt == AV_PIX_FMT_NONE && !codec) {
av_log(s1, AV_LOG_ERROR, "No such input format: %s.\n",
s->pixel_format);
 
return AVERROR(EINVAL);
}
}
 
if (!s->width && !s->height) {
struct v4l2_format fmt = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE };
 
av_log(s1, AV_LOG_VERBOSE,
"Querying the device for the current frame size\n");
if (v4l2_ioctl(s->fd, VIDIOC_G_FMT, &fmt) < 0) {
res = AVERROR(errno);
av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_G_FMT): %s\n", av_err2str(res));
return res;
}
 
s->width = fmt.fmt.pix.width;
s->height = fmt.fmt.pix.height;
av_log(s1, AV_LOG_VERBOSE,
"Setting frame size to %dx%d\n", s->width, s->height);
}
 
res = device_try_init(s1, pix_fmt, &s->width, &s->height, &desired_format, &codec_id);
if (res < 0) {
v4l2_close(s->fd);
return res;
}
 
/* If no pixel_format was specified, the codec_id was not known up
* until now. Set video_codec_id in the context, as codec_id will
* not be available outside this function
*/
if (codec_id != AV_CODEC_ID_NONE && s1->video_codec_id == AV_CODEC_ID_NONE)
s1->video_codec_id = codec_id;
 
if ((res = av_image_check_size(s->width, s->height, 0, s1)) < 0)
return res;
 
s->frame_format = desired_format;
 
if ((res = v4l2_set_parameters(s1)) < 0)
return res;
 
st->codec->pix_fmt = avpriv_fmt_v4l2ff(desired_format, codec_id);
s->frame_size =
avpicture_get_size(st->codec->pix_fmt, s->width, s->height);
 
if ((res = mmap_init(s1)) ||
(res = mmap_start(s1)) < 0) {
v4l2_close(s->fd);
return res;
}
 
s->top_field_first = first_field(s, s->fd);
 
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = codec_id;
if (codec_id == AV_CODEC_ID_RAWVIDEO)
st->codec->codec_tag =
avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
else if (codec_id == AV_CODEC_ID_H264) {
st->need_parsing = AVSTREAM_PARSE_HEADERS;
}
if (desired_format == V4L2_PIX_FMT_YVU420)
st->codec->codec_tag = MKTAG('Y', 'V', '1', '2');
else if (desired_format == V4L2_PIX_FMT_YVU410)
st->codec->codec_tag = MKTAG('Y', 'V', 'U', '9');
st->codec->width = s->width;
st->codec->height = s->height;
st->codec->bit_rate = s->frame_size * av_q2d(st->avg_frame_rate) * 8;
 
return 0;
}
 
static int v4l2_read_packet(AVFormatContext *s1, AVPacket *pkt)
{
struct video_data *s = s1->priv_data;
AVFrame *frame = s1->streams[0]->codec->coded_frame;
int res;
 
av_init_packet(pkt);
if ((res = mmap_read_frame(s1, pkt)) < 0) {
return res;
}
 
if (frame && s->interlaced) {
frame->interlaced_frame = 1;
frame->top_field_first = s->top_field_first;
}
 
return pkt->size;
}
 
static int v4l2_read_close(AVFormatContext *s1)
{
struct video_data *s = s1->priv_data;
 
if (avpriv_atomic_int_get(&s->buffers_queued) != s->buffers)
av_log(s1, AV_LOG_WARNING, "Some buffers are still owned by the caller on "
"close.\n");
 
mmap_close(s);
 
v4l2_close(s->fd);
return 0;
}
 
#define OFFSET(x) offsetof(struct video_data, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
 
static const AVOption options[] = {
{ "standard", "set TV standard, used only by analog frame grabber", OFFSET(standard), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC },
{ "channel", "set TV channel, used only by frame grabber", OFFSET(channel), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, INT_MAX, DEC },
{ "video_size", "set frame size", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, DEC },
{ "pixel_format", "set preferred pixel format", OFFSET(pixel_format), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
{ "input_format", "set preferred pixel format (for raw video) or codec name", OFFSET(pixel_format), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
{ "framerate", "set frame rate", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
 
{ "list_formats", "list available formats and exit", OFFSET(list_format), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, DEC, "list_formats" },
{ "all", "show all available formats", OFFSET(list_format), AV_OPT_TYPE_CONST, {.i64 = V4L_ALLFORMATS }, 0, INT_MAX, DEC, "list_formats" },
{ "raw", "show only non-compressed formats", OFFSET(list_format), AV_OPT_TYPE_CONST, {.i64 = V4L_RAWFORMATS }, 0, INT_MAX, DEC, "list_formats" },
{ "compressed", "show only compressed formats", OFFSET(list_format), AV_OPT_TYPE_CONST, {.i64 = V4L_COMPFORMATS }, 0, INT_MAX, DEC, "list_formats" },
 
{ "list_standards", "list supported standards and exit", OFFSET(list_standard), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, DEC, "list_standards" },
{ "all", "show all supported standards", OFFSET(list_standard), AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, DEC, "list_standards" },
 
{ "timestamps", "set type of timestamps for grabbed frames", OFFSET(ts_mode), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 2, DEC, "timestamps" },
{ "ts", "set type of timestamps for grabbed frames", OFFSET(ts_mode), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 2, DEC, "timestamps" },
{ "default", "use timestamps from the kernel", OFFSET(ts_mode), AV_OPT_TYPE_CONST, {.i64 = V4L_TS_DEFAULT }, 0, 2, DEC, "timestamps" },
{ "abs", "use absolute timestamps (wall clock)", OFFSET(ts_mode), AV_OPT_TYPE_CONST, {.i64 = V4L_TS_ABS }, 0, 2, DEC, "timestamps" },
{ "mono2abs", "force conversion from monotonic to absolute timestamps", OFFSET(ts_mode), AV_OPT_TYPE_CONST, {.i64 = V4L_TS_MONO2ABS }, 0, 2, DEC, "timestamps" },
{ "use_libv4l2", "use libv4l2 (v4l-utils) convertion functions", OFFSET(use_libv4l2), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, DEC },
{ NULL },
};
 
static const AVClass v4l2_class = {
.class_name = "V4L2 indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVInputFormat ff_v4l2_demuxer = {
.name = "video4linux2,v4l2",
.long_name = NULL_IF_CONFIG_SMALL("Video4Linux2 device grab"),
.priv_data_size = sizeof(struct video_data),
.read_header = v4l2_read_header,
.read_packet = v4l2_read_packet,
.read_close = v4l2_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &v4l2_class,
};
/contrib/sdk/sources/ffmpeg/libavdevice/v4l2enc.c
0,0 → 1,110
/*
* Copyright (c) 2013 Clément Bœsch
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "v4l2-common.h"
#include "avdevice.h"
 
typedef struct {
int fd;
} V4L2Context;
 
static av_cold int write_header(AVFormatContext *s1)
{
int res = 0, flags = O_RDWR;
struct v4l2_format fmt = {
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT
};
V4L2Context *s = s1->priv_data;
AVCodecContext *enc_ctx;
uint32_t v4l2_pixfmt;
 
if (s1->flags & AVFMT_FLAG_NONBLOCK)
flags |= O_NONBLOCK;
 
s->fd = open(s1->filename, flags);
if (s->fd < 0) {
res = AVERROR(errno);
av_log(s1, AV_LOG_ERROR, "Unable to open V4L2 device '%s'\n", s1->filename);
return res;
}
 
if (s1->nb_streams != 1 ||
s1->streams[0]->codec->codec_type != AVMEDIA_TYPE_VIDEO ||
s1->streams[0]->codec->codec_id != AV_CODEC_ID_RAWVIDEO) {
av_log(s1, AV_LOG_ERROR,
"V4L2 output device supports only a single raw video stream\n");
return AVERROR(EINVAL);
}
 
enc_ctx = s1->streams[0]->codec;
 
v4l2_pixfmt = avpriv_fmt_ff2v4l(enc_ctx->pix_fmt, AV_CODEC_ID_RAWVIDEO);
if (!v4l2_pixfmt) { // XXX: try to force them one by one?
av_log(s1, AV_LOG_ERROR, "Unknown V4L2 pixel format equivalent for %s\n",
av_get_pix_fmt_name(enc_ctx->pix_fmt));
return AVERROR(EINVAL);
}
 
if (ioctl(s->fd, VIDIOC_G_FMT, &fmt) < 0) {
res = AVERROR(errno);
av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_G_FMT): %s\n", av_err2str(res));
return res;
}
 
fmt.fmt.pix.width = enc_ctx->width;
fmt.fmt.pix.height = enc_ctx->height;
fmt.fmt.pix.pixelformat = v4l2_pixfmt;
fmt.fmt.pix.sizeimage = av_image_get_buffer_size(enc_ctx->pix_fmt, enc_ctx->width, enc_ctx->height, 1);
 
if (ioctl(s->fd, VIDIOC_S_FMT, &fmt) < 0) {
res = AVERROR(errno);
av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_S_FMT): %s\n", av_err2str(res));
return res;
}
 
return res;
}
 
static int write_packet(AVFormatContext *s1, AVPacket *pkt)
{
const V4L2Context *s = s1->priv_data;
if (write(s->fd, pkt->data, pkt->size) == -1)
return AVERROR(errno);
return 0;
}
 
static int write_trailer(AVFormatContext *s1)
{
const V4L2Context *s = s1->priv_data;
close(s->fd);
return 0;
}
 
AVOutputFormat ff_v4l2_muxer = {
.name = "v4l2",
.long_name = NULL_IF_CONFIG_SMALL("Video4Linux2 output device"),
.priv_data_size = sizeof(V4L2Context),
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_RAWVIDEO,
.write_header = write_header,
.write_packet = write_packet,
.write_trailer = write_trailer,
.flags = AVFMT_NOFILE,
};
/contrib/sdk/sources/ffmpeg/libavdevice/version.h
0,0 → 1,50
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#ifndef AVDEVICE_VERSION_H
#define AVDEVICE_VERSION_H
 
/**
* @file
* @ingroup lavd
* Libavdevice version macros
*/
 
#include "libavutil/avutil.h"
 
#define LIBAVDEVICE_VERSION_MAJOR 55
#define LIBAVDEVICE_VERSION_MINOR 5
#define LIBAVDEVICE_VERSION_MICRO 100
 
#define LIBAVDEVICE_VERSION_INT AV_VERSION_INT(LIBAVDEVICE_VERSION_MAJOR, \
LIBAVDEVICE_VERSION_MINOR, \
LIBAVDEVICE_VERSION_MICRO)
#define LIBAVDEVICE_VERSION AV_VERSION(LIBAVDEVICE_VERSION_MAJOR, \
LIBAVDEVICE_VERSION_MINOR, \
LIBAVDEVICE_VERSION_MICRO)
#define LIBAVDEVICE_BUILD LIBAVDEVICE_VERSION_INT
 
#define LIBAVDEVICE_IDENT "Lavd" AV_STRINGIFY(LIBAVDEVICE_VERSION)
 
/**
* FF_API_* defines may be placed below to indicate public API that will be
* dropped at a future version bump. The defines themselves are not part of
* the public API and may change, break or disappear at any time.
*/
 
#endif /* AVDEVICE_VERSION_H */
/contrib/sdk/sources/ffmpeg/libavdevice/vfwcap.c
0,0 → 1,483
/*
* VFW capture interface
* Copyright (c) 2006-2008 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
#include "libavformat/internal.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include <windows.h>
#include <vfw.h>
#include "avdevice.h"
 
/* Defines for VFW missing from MinGW.
* Remove this when MinGW incorporates them. */
#define HWND_MESSAGE ((HWND)-3)
 
/* End of missing MinGW defines */
 
struct vfw_ctx {
const AVClass *class;
HWND hwnd;
HANDLE mutex;
HANDLE event;
AVPacketList *pktl;
unsigned int curbufsize;
unsigned int frame_num;
char *video_size; /**< A string describing video size, set by a private option. */
char *framerate; /**< Set by a private option. */
};
 
static enum AVPixelFormat vfw_pixfmt(DWORD biCompression, WORD biBitCount)
{
switch(biCompression) {
case MKTAG('U', 'Y', 'V', 'Y'):
return AV_PIX_FMT_UYVY422;
case MKTAG('Y', 'U', 'Y', '2'):
return AV_PIX_FMT_YUYV422;
case MKTAG('I', '4', '2', '0'):
return AV_PIX_FMT_YUV420P;
case BI_RGB:
switch(biBitCount) { /* 1-8 are untested */
case 1:
return AV_PIX_FMT_MONOWHITE;
case 4:
return AV_PIX_FMT_RGB4;
case 8:
return AV_PIX_FMT_RGB8;
case 16:
return AV_PIX_FMT_RGB555;
case 24:
return AV_PIX_FMT_BGR24;
case 32:
return AV_PIX_FMT_RGB32;
}
}
return AV_PIX_FMT_NONE;
}
 
static enum AVCodecID vfw_codecid(DWORD biCompression)
{
switch(biCompression) {
case MKTAG('d', 'v', 's', 'd'):
return AV_CODEC_ID_DVVIDEO;
case MKTAG('M', 'J', 'P', 'G'):
case MKTAG('m', 'j', 'p', 'g'):
return AV_CODEC_ID_MJPEG;
}
return AV_CODEC_ID_NONE;
}
 
#define dstruct(pctx, sname, var, type) \
av_log(pctx, AV_LOG_DEBUG, #var":\t%"type"\n", sname->var)
 
static void dump_captureparms(AVFormatContext *s, CAPTUREPARMS *cparms)
{
av_log(s, AV_LOG_DEBUG, "CAPTUREPARMS\n");
dstruct(s, cparms, dwRequestMicroSecPerFrame, "lu");
dstruct(s, cparms, fMakeUserHitOKToCapture, "d");
dstruct(s, cparms, wPercentDropForError, "u");
dstruct(s, cparms, fYield, "d");
dstruct(s, cparms, dwIndexSize, "lu");
dstruct(s, cparms, wChunkGranularity, "u");
dstruct(s, cparms, fUsingDOSMemory, "d");
dstruct(s, cparms, wNumVideoRequested, "u");
dstruct(s, cparms, fCaptureAudio, "d");
dstruct(s, cparms, wNumAudioRequested, "u");
dstruct(s, cparms, vKeyAbort, "u");
dstruct(s, cparms, fAbortLeftMouse, "d");
dstruct(s, cparms, fAbortRightMouse, "d");
dstruct(s, cparms, fLimitEnabled, "d");
dstruct(s, cparms, wTimeLimit, "u");
dstruct(s, cparms, fMCIControl, "d");
dstruct(s, cparms, fStepMCIDevice, "d");
dstruct(s, cparms, dwMCIStartTime, "lu");
dstruct(s, cparms, dwMCIStopTime, "lu");
dstruct(s, cparms, fStepCaptureAt2x, "d");
dstruct(s, cparms, wStepCaptureAverageFrames, "u");
dstruct(s, cparms, dwAudioBufferSize, "lu");
dstruct(s, cparms, fDisableWriteCache, "d");
dstruct(s, cparms, AVStreamMaster, "u");
}
 
static void dump_videohdr(AVFormatContext *s, VIDEOHDR *vhdr)
{
#ifdef DEBUG
av_log(s, AV_LOG_DEBUG, "VIDEOHDR\n");
dstruct(s, vhdr, lpData, "p");
dstruct(s, vhdr, dwBufferLength, "lu");
dstruct(s, vhdr, dwBytesUsed, "lu");
dstruct(s, vhdr, dwTimeCaptured, "lu");
dstruct(s, vhdr, dwUser, "lu");
dstruct(s, vhdr, dwFlags, "lu");
dstruct(s, vhdr, dwReserved[0], "lu");
dstruct(s, vhdr, dwReserved[1], "lu");
dstruct(s, vhdr, dwReserved[2], "lu");
dstruct(s, vhdr, dwReserved[3], "lu");
#endif
}
 
static void dump_bih(AVFormatContext *s, BITMAPINFOHEADER *bih)
{
av_log(s, AV_LOG_DEBUG, "BITMAPINFOHEADER\n");
dstruct(s, bih, biSize, "lu");
dstruct(s, bih, biWidth, "ld");
dstruct(s, bih, biHeight, "ld");
dstruct(s, bih, biPlanes, "d");
dstruct(s, bih, biBitCount, "d");
dstruct(s, bih, biCompression, "lu");
av_log(s, AV_LOG_DEBUG, " biCompression:\t\"%.4s\"\n",
(char*) &bih->biCompression);
dstruct(s, bih, biSizeImage, "lu");
dstruct(s, bih, biXPelsPerMeter, "lu");
dstruct(s, bih, biYPelsPerMeter, "lu");
dstruct(s, bih, biClrUsed, "lu");
dstruct(s, bih, biClrImportant, "lu");
}
 
static int shall_we_drop(AVFormatContext *s)
{
struct vfw_ctx *ctx = s->priv_data;
static const uint8_t dropscore[] = {62, 75, 87, 100};
const int ndropscores = FF_ARRAY_ELEMS(dropscore);
unsigned int buffer_fullness = (ctx->curbufsize*100)/s->max_picture_buffer;
 
if(dropscore[++ctx->frame_num%ndropscores] <= buffer_fullness) {
av_log(s, AV_LOG_ERROR,
"real-time buffer %d%% full! frame dropped!\n", buffer_fullness);
return 1;
}
 
return 0;
}
 
static LRESULT CALLBACK videostream_cb(HWND hwnd, LPVIDEOHDR vdhdr)
{
AVFormatContext *s;
struct vfw_ctx *ctx;
AVPacketList **ppktl, *pktl_next;
 
s = (AVFormatContext *) GetWindowLongPtr(hwnd, GWLP_USERDATA);
ctx = s->priv_data;
 
dump_videohdr(s, vdhdr);
 
if(shall_we_drop(s))
return FALSE;
 
WaitForSingleObject(ctx->mutex, INFINITE);
 
pktl_next = av_mallocz(sizeof(AVPacketList));
if(!pktl_next)
goto fail;
 
if(av_new_packet(&pktl_next->pkt, vdhdr->dwBytesUsed) < 0) {
av_free(pktl_next);
goto fail;
}
 
pktl_next->pkt.pts = vdhdr->dwTimeCaptured;
memcpy(pktl_next->pkt.data, vdhdr->lpData, vdhdr->dwBytesUsed);
 
for(ppktl = &ctx->pktl ; *ppktl ; ppktl = &(*ppktl)->next);
*ppktl = pktl_next;
 
ctx->curbufsize += vdhdr->dwBytesUsed;
 
SetEvent(ctx->event);
ReleaseMutex(ctx->mutex);
 
return TRUE;
fail:
ReleaseMutex(ctx->mutex);
return FALSE;
}
 
static int vfw_read_close(AVFormatContext *s)
{
struct vfw_ctx *ctx = s->priv_data;
AVPacketList *pktl;
 
if(ctx->hwnd) {
SendMessage(ctx->hwnd, WM_CAP_SET_CALLBACK_VIDEOSTREAM, 0, 0);
SendMessage(ctx->hwnd, WM_CAP_DRIVER_DISCONNECT, 0, 0);
DestroyWindow(ctx->hwnd);
}
if(ctx->mutex)
CloseHandle(ctx->mutex);
if(ctx->event)
CloseHandle(ctx->event);
 
pktl = ctx->pktl;
while (pktl) {
AVPacketList *next = pktl->next;
av_destruct_packet(&pktl->pkt);
av_free(pktl);
pktl = next;
}
 
return 0;
}
 
static int vfw_read_header(AVFormatContext *s)
{
struct vfw_ctx *ctx = s->priv_data;
AVCodecContext *codec;
AVStream *st;
int devnum;
int bisize;
BITMAPINFO *bi = NULL;
CAPTUREPARMS cparms;
DWORD biCompression;
WORD biBitCount;
int ret;
AVRational framerate_q;
 
if (!strcmp(s->filename, "list")) {
for (devnum = 0; devnum <= 9; devnum++) {
char driver_name[256];
char driver_ver[256];
ret = capGetDriverDescription(devnum,
driver_name, sizeof(driver_name),
driver_ver, sizeof(driver_ver));
if (ret) {
av_log(s, AV_LOG_INFO, "Driver %d\n", devnum);
av_log(s, AV_LOG_INFO, " %s\n", driver_name);
av_log(s, AV_LOG_INFO, " %s\n", driver_ver);
}
}
return AVERROR(EIO);
}
 
ctx->hwnd = capCreateCaptureWindow(NULL, 0, 0, 0, 0, 0, HWND_MESSAGE, 0);
if(!ctx->hwnd) {
av_log(s, AV_LOG_ERROR, "Could not create capture window.\n");
return AVERROR(EIO);
}
 
/* If atoi fails, devnum==0 and the default device is used */
devnum = atoi(s->filename);
 
ret = SendMessage(ctx->hwnd, WM_CAP_DRIVER_CONNECT, devnum, 0);
if(!ret) {
av_log(s, AV_LOG_ERROR, "Could not connect to device.\n");
DestroyWindow(ctx->hwnd);
return AVERROR(ENODEV);
}
 
SendMessage(ctx->hwnd, WM_CAP_SET_OVERLAY, 0, 0);
SendMessage(ctx->hwnd, WM_CAP_SET_PREVIEW, 0, 0);
 
ret = SendMessage(ctx->hwnd, WM_CAP_SET_CALLBACK_VIDEOSTREAM, 0,
(LPARAM) videostream_cb);
if(!ret) {
av_log(s, AV_LOG_ERROR, "Could not set video stream callback.\n");
goto fail;
}
 
SetWindowLongPtr(ctx->hwnd, GWLP_USERDATA, (LONG_PTR) s);
 
st = avformat_new_stream(s, NULL);
if(!st) {
vfw_read_close(s);
return AVERROR(ENOMEM);
}
 
/* Set video format */
bisize = SendMessage(ctx->hwnd, WM_CAP_GET_VIDEOFORMAT, 0, 0);
if(!bisize)
goto fail;
bi = av_malloc(bisize);
if(!bi) {
vfw_read_close(s);
return AVERROR(ENOMEM);
}
ret = SendMessage(ctx->hwnd, WM_CAP_GET_VIDEOFORMAT, bisize, (LPARAM) bi);
if(!ret)
goto fail;
 
dump_bih(s, &bi->bmiHeader);
 
ret = av_parse_video_rate(&framerate_q, ctx->framerate);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "Could not parse framerate '%s'.\n", ctx->framerate);
goto fail;
}
 
if (ctx->video_size) {
ret = av_parse_video_size(&bi->bmiHeader.biWidth, &bi->bmiHeader.biHeight, ctx->video_size);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "Couldn't parse video size.\n");
goto fail;
}
}
 
if (0) {
/* For testing yet unsupported compressions
* Copy these values from user-supplied verbose information */
bi->bmiHeader.biWidth = 320;
bi->bmiHeader.biHeight = 240;
bi->bmiHeader.biPlanes = 1;
bi->bmiHeader.biBitCount = 12;
bi->bmiHeader.biCompression = MKTAG('I','4','2','0');
bi->bmiHeader.biSizeImage = 115200;
dump_bih(s, &bi->bmiHeader);
}
 
ret = SendMessage(ctx->hwnd, WM_CAP_SET_VIDEOFORMAT, bisize, (LPARAM) bi);
if(!ret) {
av_log(s, AV_LOG_ERROR, "Could not set Video Format.\n");
goto fail;
}
 
biCompression = bi->bmiHeader.biCompression;
biBitCount = bi->bmiHeader.biBitCount;
 
/* Set sequence setup */
ret = SendMessage(ctx->hwnd, WM_CAP_GET_SEQUENCE_SETUP, sizeof(cparms),
(LPARAM) &cparms);
if(!ret)
goto fail;
 
dump_captureparms(s, &cparms);
 
cparms.fYield = 1; // Spawn a background thread
cparms.dwRequestMicroSecPerFrame =
(framerate_q.den*1000000) / framerate_q.num;
cparms.fAbortLeftMouse = 0;
cparms.fAbortRightMouse = 0;
cparms.fCaptureAudio = 0;
cparms.vKeyAbort = 0;
 
ret = SendMessage(ctx->hwnd, WM_CAP_SET_SEQUENCE_SETUP, sizeof(cparms),
(LPARAM) &cparms);
if(!ret)
goto fail;
 
codec = st->codec;
codec->time_base = av_inv_q(framerate_q);
codec->codec_type = AVMEDIA_TYPE_VIDEO;
codec->width = bi->bmiHeader.biWidth;
codec->height = bi->bmiHeader.biHeight;
codec->pix_fmt = vfw_pixfmt(biCompression, biBitCount);
if(codec->pix_fmt == AV_PIX_FMT_NONE) {
codec->codec_id = vfw_codecid(biCompression);
if(codec->codec_id == AV_CODEC_ID_NONE) {
av_log(s, AV_LOG_ERROR, "Unknown compression type. "
"Please report verbose (-v 9) debug information.\n");
vfw_read_close(s);
return AVERROR_PATCHWELCOME;
}
codec->bits_per_coded_sample = biBitCount;
} else {
codec->codec_id = AV_CODEC_ID_RAWVIDEO;
if(biCompression == BI_RGB) {
codec->bits_per_coded_sample = biBitCount;
codec->extradata = av_malloc(9 + FF_INPUT_BUFFER_PADDING_SIZE);
if (codec->extradata) {
codec->extradata_size = 9;
memcpy(codec->extradata, "BottomUp", 9);
}
}
}
 
av_freep(&bi);
 
avpriv_set_pts_info(st, 32, 1, 1000);
 
ctx->mutex = CreateMutex(NULL, 0, NULL);
if(!ctx->mutex) {
av_log(s, AV_LOG_ERROR, "Could not create Mutex.\n" );
goto fail;
}
ctx->event = CreateEvent(NULL, 1, 0, NULL);
if(!ctx->event) {
av_log(s, AV_LOG_ERROR, "Could not create Event.\n" );
goto fail;
}
 
ret = SendMessage(ctx->hwnd, WM_CAP_SEQUENCE_NOFILE, 0, 0);
if(!ret) {
av_log(s, AV_LOG_ERROR, "Could not start capture sequence.\n" );
goto fail;
}
 
return 0;
 
fail:
av_freep(&bi);
vfw_read_close(s);
return AVERROR(EIO);
}
 
static int vfw_read_packet(AVFormatContext *s, AVPacket *pkt)
{
struct vfw_ctx *ctx = s->priv_data;
AVPacketList *pktl = NULL;
 
while(!pktl) {
WaitForSingleObject(ctx->mutex, INFINITE);
pktl = ctx->pktl;
if(ctx->pktl) {
*pkt = ctx->pktl->pkt;
ctx->pktl = ctx->pktl->next;
av_free(pktl);
}
ResetEvent(ctx->event);
ReleaseMutex(ctx->mutex);
if(!pktl) {
if(s->flags & AVFMT_FLAG_NONBLOCK) {
return AVERROR(EAGAIN);
} else {
WaitForSingleObject(ctx->event, INFINITE);
}
}
}
 
ctx->curbufsize -= pkt->size;
 
return pkt->size;
}
 
#define OFFSET(x) offsetof(struct vfw_ctx, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
{ "framerate", "", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = "ntsc"}, 0, 0, DEC },
{ NULL },
};
 
static const AVClass vfw_class = {
.class_name = "VFW indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVInputFormat ff_vfwcap_demuxer = {
.name = "vfwcap",
.long_name = NULL_IF_CONFIG_SMALL("VfW video capture"),
.priv_data_size = sizeof(struct vfw_ctx),
.read_header = vfw_read_header,
.read_packet = vfw_read_packet,
.read_close = vfw_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &vfw_class,
};
/contrib/sdk/sources/ffmpeg/libavdevice/x11grab.c
0,0 → 1,648
/*
* X11 video grab interface
*
* This file is part of FFmpeg.
*
* FFmpeg integration:
* Copyright (C) 2006 Clemens Fruhwirth <clemens@endorphin.org>
* Edouard Gomez <ed.gomez@free.fr>
*
* This file contains code from grab.c:
* Copyright (c) 2000-2001 Fabrice Bellard
*
* This file contains code from the xvidcap project:
* Copyright (C) 1997-1998 Rasca, Berlin
* 2003-2004 Karl H. Beckers, Frankfurt
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* X11 frame device demuxer
* @author Clemens Fruhwirth <clemens@endorphin.org>
* @author Edouard Gomez <ed.gomez@free.fr>
*/
 
#include "config.h"
#include "libavformat/internal.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/time.h"
#include <time.h>
#include <X11/cursorfont.h>
#include <X11/X.h>
#include <X11/Xlib.h>
#include <X11/Xlibint.h>
#include <X11/Xproto.h>
#include <X11/Xutil.h>
#include <sys/shm.h>
#include <X11/extensions/shape.h>
#include <X11/extensions/XShm.h>
#include <X11/extensions/Xfixes.h>
#include "avdevice.h"
 
/**
* X11 Device Demuxer context
*/
struct x11grab {
const AVClass *class; /**< Class for private options. */
int frame_size; /**< Size in bytes of a grabbed frame */
AVRational time_base; /**< Time base */
int64_t time_frame; /**< Current time */
 
int width; /**< Width of the grab frame */
int height; /**< Height of the grab frame */
int x_off; /**< Horizontal top-left corner coordinate */
int y_off; /**< Vertical top-left corner coordinate */
 
Display *dpy; /**< X11 display from which x11grab grabs frames */
XImage *image; /**< X11 image holding the grab */
int use_shm; /**< !0 when using XShm extension */
XShmSegmentInfo shminfo; /**< When using XShm, keeps track of XShm infos */
int draw_mouse; /**< Set by a private option. */
int follow_mouse; /**< Set by a private option. */
int show_region; /**< set by a private option. */
AVRational framerate; /**< Set by a private option. */
int palette_changed;
uint32_t palette[256];
 
Cursor c;
Window region_win; /**< This is used by show_region option. */
};
 
#define REGION_WIN_BORDER 3
/**
* Draw grabbing region window
*
* @param s x11grab context
*/
static void
x11grab_draw_region_win(struct x11grab *s)
{
Display *dpy = s->dpy;
int screen;
Window win = s->region_win;
GC gc;
 
screen = DefaultScreen(dpy);
gc = XCreateGC(dpy, win, 0, 0);
XSetForeground(dpy, gc, WhitePixel(dpy, screen));
XSetBackground(dpy, gc, BlackPixel(dpy, screen));
XSetLineAttributes(dpy, gc, REGION_WIN_BORDER, LineDoubleDash, 0, 0);
XDrawRectangle(dpy, win, gc,
1, 1,
(s->width + REGION_WIN_BORDER * 2) - 1 * 2 - 1,
(s->height + REGION_WIN_BORDER * 2) - 1 * 2 - 1);
XFreeGC(dpy, gc);
}
 
/**
* Initialize grabbing region window
*
* @param s x11grab context
*/
static void
x11grab_region_win_init(struct x11grab *s)
{
Display *dpy = s->dpy;
int screen;
XSetWindowAttributes attribs;
XRectangle rect;
 
screen = DefaultScreen(dpy);
attribs.override_redirect = True;
s->region_win = XCreateWindow(dpy, RootWindow(dpy, screen),
s->x_off - REGION_WIN_BORDER,
s->y_off - REGION_WIN_BORDER,
s->width + REGION_WIN_BORDER * 2,
s->height + REGION_WIN_BORDER * 2,
0, CopyFromParent,
InputOutput, CopyFromParent,
CWOverrideRedirect, &attribs);
rect.x = 0;
rect.y = 0;
rect.width = s->width;
rect.height = s->height;
XShapeCombineRectangles(dpy, s->region_win,
ShapeBounding, REGION_WIN_BORDER, REGION_WIN_BORDER,
&rect, 1, ShapeSubtract, 0);
XMapWindow(dpy, s->region_win);
XSelectInput(dpy, s->region_win, ExposureMask | StructureNotifyMask);
x11grab_draw_region_win(s);
}
 
/**
* Initialize the x11 grab device demuxer (public device demuxer API).
*
* @param s1 Context from avformat core
* @return <ul>
* <li>AVERROR(ENOMEM) no memory left</li>
* <li>AVERROR(EIO) other failure case</li>
* <li>0 success</li>
* </ul>
*/
static int
x11grab_read_header(AVFormatContext *s1)
{
struct x11grab *x11grab = s1->priv_data;
Display *dpy;
AVStream *st = NULL;
enum AVPixelFormat input_pixfmt;
XImage *image;
int x_off = 0;
int y_off = 0;
int screen;
int use_shm;
char *dpyname, *offset;
int ret = 0;
Colormap color_map;
XColor color[256];
int i;
 
dpyname = av_strdup(s1->filename);
if (!dpyname)
goto out;
 
offset = strchr(dpyname, '+');
if (offset) {
sscanf(offset, "%d,%d", &x_off, &y_off);
if (strstr(offset, "nomouse")) {
av_log(s1, AV_LOG_WARNING,
"'nomouse' specification in argument is deprecated: "
"use 'draw_mouse' option with value 0 instead\n");
x11grab->draw_mouse = 0;
}
*offset= 0;
}
 
av_log(s1, AV_LOG_INFO, "device: %s -> display: %s x: %d y: %d width: %d height: %d\n",
s1->filename, dpyname, x_off, y_off, x11grab->width, x11grab->height);
 
dpy = XOpenDisplay(dpyname);
av_freep(&dpyname);
if(!dpy) {
av_log(s1, AV_LOG_ERROR, "Could not open X display.\n");
ret = AVERROR(EIO);
goto out;
}
 
st = avformat_new_stream(s1, NULL);
if (!st) {
ret = AVERROR(ENOMEM);
goto out;
}
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
 
screen = DefaultScreen(dpy);
 
if (x11grab->follow_mouse) {
int screen_w, screen_h;
Window w;
 
screen_w = DisplayWidth(dpy, screen);
screen_h = DisplayHeight(dpy, screen);
XQueryPointer(dpy, RootWindow(dpy, screen), &w, &w, &x_off, &y_off, &ret, &ret, &ret);
x_off -= x11grab->width / 2;
y_off -= x11grab->height / 2;
x_off = FFMIN(FFMAX(x_off, 0), screen_w - x11grab->width);
y_off = FFMIN(FFMAX(y_off, 0), screen_h - x11grab->height);
av_log(s1, AV_LOG_INFO, "followmouse is enabled, resetting grabbing region to x: %d y: %d\n", x_off, y_off);
}
 
use_shm = XShmQueryExtension(dpy);
av_log(s1, AV_LOG_INFO, "shared memory extension%s found\n", use_shm ? "" : " not");
 
if(use_shm) {
int scr = XDefaultScreen(dpy);
image = XShmCreateImage(dpy,
DefaultVisual(dpy, scr),
DefaultDepth(dpy, scr),
ZPixmap,
NULL,
&x11grab->shminfo,
x11grab->width, x11grab->height);
x11grab->shminfo.shmid = shmget(IPC_PRIVATE,
image->bytes_per_line * image->height,
IPC_CREAT|0777);
if (x11grab->shminfo.shmid == -1) {
av_log(s1, AV_LOG_ERROR, "Fatal: Can't get shared memory!\n");
ret = AVERROR(ENOMEM);
goto out;
}
x11grab->shminfo.shmaddr = image->data = shmat(x11grab->shminfo.shmid, 0, 0);
x11grab->shminfo.readOnly = False;
 
if (!XShmAttach(dpy, &x11grab->shminfo)) {
av_log(s1, AV_LOG_ERROR, "Fatal: Failed to attach shared memory!\n");
/* needs some better error subroutine :) */
ret = AVERROR(EIO);
goto out;
}
} else {
image = XGetImage(dpy, RootWindow(dpy, screen),
x_off,y_off,
x11grab->width, x11grab->height,
AllPlanes, ZPixmap);
}
 
switch (image->bits_per_pixel) {
case 8:
av_log (s1, AV_LOG_DEBUG, "8 bit palette\n");
input_pixfmt = AV_PIX_FMT_PAL8;
color_map = DefaultColormap(dpy, screen);
for (i = 0; i < 256; ++i)
color[i].pixel = i;
XQueryColors(dpy, color_map, color, 256);
for (i = 0; i < 256; ++i)
x11grab->palette[i] = (color[i].red & 0xFF00) << 8 |
(color[i].green & 0xFF00) |
(color[i].blue & 0xFF00) >> 8;
x11grab->palette_changed = 1;
break;
case 16:
if ( image->red_mask == 0xf800 &&
image->green_mask == 0x07e0 &&
image->blue_mask == 0x001f ) {
av_log (s1, AV_LOG_DEBUG, "16 bit RGB565\n");
input_pixfmt = AV_PIX_FMT_RGB565;
} else if (image->red_mask == 0x7c00 &&
image->green_mask == 0x03e0 &&
image->blue_mask == 0x001f ) {
av_log(s1, AV_LOG_DEBUG, "16 bit RGB555\n");
input_pixfmt = AV_PIX_FMT_RGB555;
} else {
av_log(s1, AV_LOG_ERROR, "RGB ordering at image depth %i not supported ... aborting\n", image->bits_per_pixel);
av_log(s1, AV_LOG_ERROR, "color masks: r 0x%.6lx g 0x%.6lx b 0x%.6lx\n", image->red_mask, image->green_mask, image->blue_mask);
ret = AVERROR(EIO);
goto out;
}
break;
case 24:
if ( image->red_mask == 0xff0000 &&
image->green_mask == 0x00ff00 &&
image->blue_mask == 0x0000ff ) {
input_pixfmt = AV_PIX_FMT_BGR24;
} else if ( image->red_mask == 0x0000ff &&
image->green_mask == 0x00ff00 &&
image->blue_mask == 0xff0000 ) {
input_pixfmt = AV_PIX_FMT_RGB24;
} else {
av_log(s1, AV_LOG_ERROR,"rgb ordering at image depth %i not supported ... aborting\n", image->bits_per_pixel);
av_log(s1, AV_LOG_ERROR, "color masks: r 0x%.6lx g 0x%.6lx b 0x%.6lx\n", image->red_mask, image->green_mask, image->blue_mask);
ret = AVERROR(EIO);
goto out;
}
break;
case 32:
input_pixfmt = AV_PIX_FMT_0RGB32;
break;
default:
av_log(s1, AV_LOG_ERROR, "image depth %i not supported ... aborting\n", image->bits_per_pixel);
ret = AVERROR(EINVAL);
goto out;
}
 
x11grab->frame_size = x11grab->width * x11grab->height * image->bits_per_pixel/8;
x11grab->dpy = dpy;
x11grab->time_base = av_inv_q(x11grab->framerate);
x11grab->time_frame = av_gettime() / av_q2d(x11grab->time_base);
x11grab->x_off = x_off;
x11grab->y_off = y_off;
x11grab->image = image;
x11grab->use_shm = use_shm;
 
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
st->codec->width = x11grab->width;
st->codec->height = x11grab->height;
st->codec->pix_fmt = input_pixfmt;
st->codec->time_base = x11grab->time_base;
st->codec->bit_rate = x11grab->frame_size * 1/av_q2d(x11grab->time_base) * 8;
 
out:
av_free(dpyname);
return ret;
}
 
/**
* Paint a mouse pointer in an X11 image.
*
* @param image image to paint the mouse pointer to
* @param s context used to retrieve original grabbing rectangle
* coordinates
*/
static void
paint_mouse_pointer(XImage *image, struct x11grab *s)
{
int x_off = s->x_off;
int y_off = s->y_off;
int width = s->width;
int height = s->height;
Display *dpy = s->dpy;
XFixesCursorImage *xcim;
int x, y;
int line, column;
int to_line, to_column;
int pixstride = image->bits_per_pixel >> 3;
/* Warning: in its insanity, xlib provides unsigned image data through a
* char* pointer, so we have to make it uint8_t to make things not break.
* Anyone who performs further investigation of the xlib API likely risks
* permanent brain damage. */
uint8_t *pix = image->data;
Window w;
XSetWindowAttributes attr;
 
/* Code doesn't currently support 16-bit or PAL8 */
if (image->bits_per_pixel != 24 && image->bits_per_pixel != 32)
return;
 
if(!s->c)
s->c = XCreateFontCursor(dpy, XC_left_ptr);
w = DefaultRootWindow(dpy);
attr.cursor = s->c;
XChangeWindowAttributes(dpy, w, CWCursor, &attr);
 
xcim = XFixesGetCursorImage(dpy);
 
x = xcim->x - xcim->xhot;
y = xcim->y - xcim->yhot;
 
to_line = FFMIN((y + xcim->height), (height + y_off));
to_column = FFMIN((x + xcim->width), (width + x_off));
 
for (line = FFMAX(y, y_off); line < to_line; line++) {
for (column = FFMAX(x, x_off); column < to_column; column++) {
int xcim_addr = (line - y) * xcim->width + column - x;
int image_addr = ((line - y_off) * width + column - x_off) * pixstride;
int r = (uint8_t)(xcim->pixels[xcim_addr] >> 0);
int g = (uint8_t)(xcim->pixels[xcim_addr] >> 8);
int b = (uint8_t)(xcim->pixels[xcim_addr] >> 16);
int a = (uint8_t)(xcim->pixels[xcim_addr] >> 24);
 
if (a == 255) {
pix[image_addr+0] = r;
pix[image_addr+1] = g;
pix[image_addr+2] = b;
} else if (a) {
/* pixel values from XFixesGetCursorImage come premultiplied by alpha */
pix[image_addr+0] = r + (pix[image_addr+0]*(255-a) + 255/2) / 255;
pix[image_addr+1] = g + (pix[image_addr+1]*(255-a) + 255/2) / 255;
pix[image_addr+2] = b + (pix[image_addr+2]*(255-a) + 255/2) / 255;
}
}
}
 
XFree(xcim);
xcim = NULL;
}
 
 
/**
* Read new data in the image structure.
*
* @param dpy X11 display to grab from
* @param d
* @param image Image where the grab will be put
* @param x Top-Left grabbing rectangle horizontal coordinate
* @param y Top-Left grabbing rectangle vertical coordinate
* @return 0 if error, !0 if successful
*/
static int
xget_zpixmap(Display *dpy, Drawable d, XImage *image, int x, int y)
{
xGetImageReply rep;
xGetImageReq *req;
long nbytes;
 
if (!image) {
return 0;
}
 
LockDisplay(dpy);
GetReq(GetImage, req);
 
/* First set up the standard stuff in the request */
req->drawable = d;
req->x = x;
req->y = y;
req->width = image->width;
req->height = image->height;
req->planeMask = (unsigned int)AllPlanes;
req->format = ZPixmap;
 
if (!_XReply(dpy, (xReply *)&rep, 0, xFalse) || !rep.length) {
UnlockDisplay(dpy);
SyncHandle();
return 0;
}
 
nbytes = (long)rep.length << 2;
_XReadPad(dpy, image->data, nbytes);
 
UnlockDisplay(dpy);
SyncHandle();
return 1;
}
 
/**
* Grab a frame from x11 (public device demuxer API).
*
* @param s1 Context from avformat core
* @param pkt Packet holding the brabbed frame
* @return frame size in bytes
*/
static int
x11grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
{
struct x11grab *s = s1->priv_data;
Display *dpy = s->dpy;
XImage *image = s->image;
int x_off = s->x_off;
int y_off = s->y_off;
 
int screen;
Window root;
int follow_mouse = s->follow_mouse;
 
int64_t curtime, delay;
struct timespec ts;
 
/* Calculate the time of the next frame */
s->time_frame += INT64_C(1000000);
 
/* wait based on the frame rate */
for(;;) {
curtime = av_gettime();
delay = s->time_frame * av_q2d(s->time_base) - curtime;
if (delay <= 0) {
if (delay < INT64_C(-1000000) * av_q2d(s->time_base)) {
s->time_frame += INT64_C(1000000);
}
break;
}
ts.tv_sec = delay / 1000000;
ts.tv_nsec = (delay % 1000000) * 1000;
nanosleep(&ts, NULL);
}
 
av_init_packet(pkt);
pkt->data = image->data;
pkt->size = s->frame_size;
pkt->pts = curtime;
if (s->palette_changed) {
uint8_t *pal = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE,
AVPALETTE_SIZE);
if (!pal) {
av_log(s, AV_LOG_ERROR, "Cannot append palette to packet\n");
} else {
memcpy(pal, s->palette, AVPALETTE_SIZE);
s->palette_changed = 0;
}
}
 
screen = DefaultScreen(dpy);
root = RootWindow(dpy, screen);
if (follow_mouse) {
int screen_w, screen_h;
int pointer_x, pointer_y, _;
Window w;
 
screen_w = DisplayWidth(dpy, screen);
screen_h = DisplayHeight(dpy, screen);
XQueryPointer(dpy, root, &w, &w, &pointer_x, &pointer_y, &_, &_, &_);
if (follow_mouse == -1) {
// follow the mouse, put it at center of grabbing region
x_off += pointer_x - s->width / 2 - x_off;
y_off += pointer_y - s->height / 2 - y_off;
} else {
// follow the mouse, but only move the grabbing region when mouse
// reaches within certain pixels to the edge.
if (pointer_x > x_off + s->width - follow_mouse) {
x_off += pointer_x - (x_off + s->width - follow_mouse);
} else if (pointer_x < x_off + follow_mouse)
x_off -= (x_off + follow_mouse) - pointer_x;
if (pointer_y > y_off + s->height - follow_mouse) {
y_off += pointer_y - (y_off + s->height - follow_mouse);
} else if (pointer_y < y_off + follow_mouse)
y_off -= (y_off + follow_mouse) - pointer_y;
}
// adjust grabbing region position if it goes out of screen.
s->x_off = x_off = FFMIN(FFMAX(x_off, 0), screen_w - s->width);
s->y_off = y_off = FFMIN(FFMAX(y_off, 0), screen_h - s->height);
 
if (s->show_region && s->region_win)
XMoveWindow(dpy, s->region_win,
s->x_off - REGION_WIN_BORDER,
s->y_off - REGION_WIN_BORDER);
}
 
if (s->show_region) {
if (s->region_win) {
XEvent evt;
// clean up the events, and do the initinal draw or redraw.
for (evt.type = NoEventMask; XCheckMaskEvent(dpy, ExposureMask | StructureNotifyMask, &evt); );
if (evt.type)
x11grab_draw_region_win(s);
} else {
x11grab_region_win_init(s);
}
}
 
if(s->use_shm) {
if (!XShmGetImage(dpy, root, image, x_off, y_off, AllPlanes)) {
av_log (s1, AV_LOG_INFO, "XShmGetImage() failed\n");
}
} else {
if (!xget_zpixmap(dpy, root, image, x_off, y_off)) {
av_log (s1, AV_LOG_INFO, "XGetZPixmap() failed\n");
}
}
 
if (s->draw_mouse) {
paint_mouse_pointer(image, s);
}
 
return s->frame_size;
}
 
/**
* Close x11 frame grabber (public device demuxer API).
*
* @param s1 Context from avformat core
* @return 0 success, !0 failure
*/
static int
x11grab_read_close(AVFormatContext *s1)
{
struct x11grab *x11grab = s1->priv_data;
 
/* Detach cleanly from shared mem */
if (x11grab->use_shm) {
XShmDetach(x11grab->dpy, &x11grab->shminfo);
shmdt(x11grab->shminfo.shmaddr);
shmctl(x11grab->shminfo.shmid, IPC_RMID, NULL);
}
 
/* Destroy X11 image */
if (x11grab->image) {
XDestroyImage(x11grab->image);
x11grab->image = NULL;
}
 
if (x11grab->region_win) {
XDestroyWindow(x11grab->dpy, x11grab->region_win);
}
 
/* Free X11 display */
XCloseDisplay(x11grab->dpy);
return 0;
}
 
#define OFFSET(x) offsetof(struct x11grab, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "draw_mouse", "draw the mouse pointer", OFFSET(draw_mouse), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, DEC },
 
{ "follow_mouse", "move the grabbing region when the mouse pointer reaches within specified amount of pixels to the edge of region",
OFFSET(follow_mouse), AV_OPT_TYPE_INT, {.i64 = 0}, -1, INT_MAX, DEC, "follow_mouse" },
{ "centered", "keep the mouse pointer at the center of grabbing region when following",
0, AV_OPT_TYPE_CONST, {.i64 = -1}, INT_MIN, INT_MAX, DEC, "follow_mouse" },
 
{ "framerate", "set video frame rate", OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, {.str = "ntsc"}, 0, 0, DEC },
{ "show_region", "show the grabbing region", OFFSET(show_region), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, DEC },
{ "video_size", "set video frame size", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, {.str = "vga"}, 0, 0, DEC },
{ NULL },
};
 
static const AVClass x11_class = {
.class_name = "X11grab indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
/** x11 grabber device demuxer declaration */
AVInputFormat ff_x11grab_demuxer = {
.name = "x11grab",
.long_name = NULL_IF_CONFIG_SMALL("X11grab"),
.priv_data_size = sizeof(struct x11grab),
.read_header = x11grab_read_header,
.read_packet = x11grab_read_packet,
.read_close = x11grab_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &x11_class,
};
/contrib/sdk/sources/ffmpeg/libavdevice/xv.c
0,0 → 1,217
/*
* Copyright (c) 2013 Jeff Moguillansky
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
 
/**
* @file
* XVideo output device
*
* TODO:
* - add support to more formats
* - add support to window id specification
*/
 
#include <X11/Xlib.h>
#include <X11/extensions/Xv.h>
#include <X11/extensions/XShm.h>
#include <X11/extensions/Xvlib.h>
#include <sys/shm.h>
 
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avdevice.h"
 
typedef struct {
AVClass *class;
GC gc;
 
Window window;
char *window_title;
int window_width, window_height;
int window_x, window_y;
 
Display* display;
char *display_name;
 
XvImage* yuv_image;
int image_width, image_height;
XShmSegmentInfo yuv_shminfo;
int xv_port;
} XVContext;
 
static int xv_write_header(AVFormatContext *s)
{
XVContext *xv = s->priv_data;
unsigned int num_adaptors;
XvAdaptorInfo *ai;
XvImageFormatValues *fv;
int num_formats = 0, j;
AVCodecContext *encctx = s->streams[0]->codec;
 
if ( s->nb_streams > 1
|| encctx->codec_type != AVMEDIA_TYPE_VIDEO
|| encctx->codec_id != AV_CODEC_ID_RAWVIDEO) {
av_log(s, AV_LOG_ERROR, "Only supports one rawvideo stream\n");
return AVERROR(EINVAL);
}
 
xv->display = XOpenDisplay(xv->display_name);
if (!xv->display) {
av_log(s, AV_LOG_ERROR, "Could not open the X11 display '%s'\n", xv->display_name);
return AVERROR(EINVAL);
}
 
xv->image_width = encctx->width;
xv->image_height = encctx->height;
if (!xv->window_width && !xv->window_height) {
xv->window_width = encctx->width;
xv->window_height = encctx->height;
}
xv->window = XCreateSimpleWindow(xv->display, DefaultRootWindow(xv->display),
xv->window_x, xv->window_y,
xv->window_width, xv->window_height,
0, 0, 0);
if (!xv->window_title) {
if (!(xv->window_title = av_strdup(s->filename)))
return AVERROR(ENOMEM);
}
XStoreName(xv->display, xv->window, xv->window_title);
XMapWindow(xv->display, xv->window);
 
if (XvQueryAdaptors(xv->display, DefaultRootWindow(xv->display), &num_adaptors, &ai) != Success)
return AVERROR_EXTERNAL;
xv->xv_port = ai[0].base_id;
 
if (encctx->pix_fmt != AV_PIX_FMT_YUV420P) {
av_log(s, AV_LOG_ERROR,
"Unsupported pixel format '%s', only yuv420p is currently supported\n",
av_get_pix_fmt_name(encctx->pix_fmt));
return AVERROR_PATCHWELCOME;
}
 
fv = XvListImageFormats(xv->display, xv->xv_port, &num_formats);
if (!fv)
return AVERROR_EXTERNAL;
for (j = 0; j < num_formats; j++) {
if (fv[j].id == MKTAG('I','4','2','0')) {
break;
}
}
XFree(fv);
 
if (j >= num_formats) {
av_log(s, AV_LOG_ERROR,
"Device does not support pixel format yuv420p, aborting\n");
return AVERROR(EINVAL);
}
 
xv->gc = XCreateGC(xv->display, xv->window, 0, 0);
xv->image_width = encctx->width;
xv->image_height = encctx->height;
xv->yuv_image = XvShmCreateImage(xv->display, xv->xv_port,
MKTAG('I','4','2','0'), 0,
xv->image_width, xv->image_height, &xv->yuv_shminfo);
xv->yuv_shminfo.shmid = shmget(IPC_PRIVATE, xv->yuv_image->data_size,
IPC_CREAT | 0777);
xv->yuv_shminfo.shmaddr = (char *)shmat(xv->yuv_shminfo.shmid, 0, 0);
xv->yuv_image->data = xv->yuv_shminfo.shmaddr;
xv->yuv_shminfo.readOnly = False;
 
XShmAttach(xv->display, &xv->yuv_shminfo);
XSync(xv->display, False);
shmctl(xv->yuv_shminfo.shmid, IPC_RMID, 0);
 
return 0;
}
 
static int xv_write_packet(AVFormatContext *s, AVPacket *pkt)
{
XVContext *xv = s->priv_data;
XvImage *img = xv->yuv_image;
XWindowAttributes window_attrs;
AVPicture pict;
AVCodecContext *ctx = s->streams[0]->codec;
int y, h;
 
h = img->height / 2;
 
avpicture_fill(&pict, pkt->data, ctx->pix_fmt, ctx->width, ctx->height);
for (y = 0; y < img->height; y++) {
memcpy(&img->data[img->offsets[0] + (y * img->pitches[0])],
&pict.data[0][y * pict.linesize[0]], img->pitches[0]);
}
 
for (y = 0; y < h; ++y) {
memcpy(&img->data[img->offsets[1] + (y * img->pitches[1])],
&pict.data[1][y * pict.linesize[1]], img->pitches[1]);
memcpy(&img->data[img->offsets[2] + (y * img->pitches[2])],
&pict.data[2][y * pict.linesize[2]], img->pitches[2]);
}
 
XGetWindowAttributes(xv->display, xv->window, &window_attrs);
if (XvShmPutImage(xv->display, xv->xv_port, xv->window, xv->gc,
xv->yuv_image, 0, 0, xv->image_width, xv->image_height, 0, 0,
window_attrs.width, window_attrs.height, True) != Success) {
av_log(s, AV_LOG_ERROR, "Could not copy image to XV shared memory buffer\n");
return AVERROR_EXTERNAL;
}
return 0;
}
 
static int xv_write_trailer(AVFormatContext *s)
{
XVContext *xv = s->priv_data;
 
XShmDetach(xv->display, &xv->yuv_shminfo);
shmdt(xv->yuv_image->data);
XFree(xv->yuv_image);
XCloseDisplay(xv->display);
return 0;
}
 
#define OFFSET(x) offsetof(XVContext, x)
static const AVOption options[] = {
{ "display_name", "set display name", OFFSET(display_name), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
{ "window_size", "set window forced size", OFFSET(window_width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
{ "window_title", "set window title", OFFSET(window_title), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
{ "window_x", "set window x offset", OFFSET(window_x), AV_OPT_TYPE_INT, {.i64 = 0 }, -INT_MAX, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
{ "window_y", "set window y offset", OFFSET(window_y), AV_OPT_TYPE_INT, {.i64 = 0 }, -INT_MAX, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
{ NULL }
 
};
 
static const AVClass xv_class = {
.class_name = "xvideo outdev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
 
AVOutputFormat ff_xv_muxer = {
.name = "xv",
.long_name = NULL_IF_CONFIG_SMALL("XV (XVideo) output device"),
.priv_data_size = sizeof(XVContext),
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_RAWVIDEO,
.write_header = xv_write_header,
.write_packet = xv_write_packet,
.write_trailer = xv_write_trailer,
.flags = AVFMT_NOFILE | AVFMT_VARIABLE_FPS | AVFMT_NOTIMESTAMPS,
.priv_class = &xv_class,
};