Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 6144 → Rev 6143

/contrib/media/fplay/decoder.c
File deleted
/contrib/media/fplay/Makefile
1,3 → 1,4
 
NAME= Fplay
 
FASM= fasm.exe
26,7 → 27,6
 
SOURCES = opendial.asm \
audio.c \
decoder.c \
fplay.c \
vaapi.c \
video.c \
/contrib/media/fplay/fplay.c
22,6 → 22,8
 
uint32_t win_width, win_height;
 
int have_sound = 0;
 
uint8_t *decoder_buffer;
extern int resampler_size;
extern int sample_rate;
32,7 → 34,9
 
int threads_running = DECODER_THREAD;
 
extern double audio_base;
 
 
int main( int argc, char *argv[])
{
static vst_t vst;
66,6 → 70,7
 
vst.fCtx->flags |= AVFMT_FLAG_GENPTS;
 
// Retrieve stream information
if(avformat_find_stream_info(vst.fCtx, NULL) < 0)
{
printf("Cannot find streams\n\r");
84,6 → 89,7
 
stream_duration = vst.fCtx->duration;
 
// Find the first video stream
vst.vStream = -1;
vst.aStream = -1;
 
102,6 → 108,7
vst.aStream < 0)
{
vst.aStream = i;
vst.audio_time_base = vst.fCtx->streams[i]->time_base;
if(stream_duration == 0)
stream_duration = vst.fCtx->streams[i]->duration;
}
115,6 → 122,12
 
// __asm__ __volatile__("int3");
 
// Get a pointer to the codec context for the video stream
vst.vCtx = vst.fCtx->streams[vst.vStream]->codec;
vst.aCtx = vst.fCtx->streams[vst.aStream]->codec;
 
vst.vCodec = avcodec_find_decoder(vst.vCtx->codec_id);
 
INIT_LIST_HEAD(&vst.input_list);
INIT_LIST_HEAD(&vst.output_list);
mutex_init(&vst.q_video.lock);
124,14 → 137,37
mutex_init(&vst.input_lock);
mutex_init(&vst.output_lock);
 
vst.vCtx = vst.fCtx->streams[vst.vStream]->codec;
vst.aCtx = vst.fCtx->streams[vst.aStream]->codec;
if(vst.vCodec == NULL)
{
printf("Unsupported codec with id %d for input stream %d\n",
vst.vCtx->codec_id, vst.vStream);
return -1;
}
 
if(init_video_decoder(&vst) != 0 )
vst.Frame = av_frame_alloc();
if(vst.Frame == NULL)
{
printf("Cannot alloc video frame\n");
return -1;
};
 
vst.aCtx->request_channel_layout = AV_CH_LAYOUT_STEREO;
if(fplay_init_context(&vst))
return -1;
 
if(avcodec_open2(vst.vCtx, vst.vCodec, NULL) < 0)
{
printf("Error while opening codec for input stream %d\n",
vst.vStream);
return -1; // Could not open codec
};
 
if (vst.aCtx->channels > 0)
vst.aCtx->request_channels = FFMIN(2, vst.aCtx->channels);
else
vst.aCtx->request_channels = 2;
 
vst.aCodec = avcodec_find_decoder(vst.aCtx->codec_id);
 
if(vst.aCodec)
{
if(avcodec_open2(vst.aCtx, vst.aCodec, NULL) >= 0 )
162,11 → 198,11
astream.count = 0;
astream.buffer = (char *)av_mallocz(192000*3);
if( astream.buffer != NULL )
vst.has_sound = 1;
have_sound = 1;
else
av_free(decoder_buffer);
}
if( vst.has_sound == 0)
if( have_sound == 0)
{
printf("Not enough memory for audio buffers\n");
}
193,11 → 229,9
if(astream.lock.handle)
mutex_destroy(&astream.lock);
 
fini_video_decoder(&vst);
mutex_destroy(&vst.q_video.lock);
mutex_destroy(&vst.q_audio.lock);
mutex_destroy(&vst.decoder_lock);
 
return 0;
}
 
213,14 → 247,14
if(packet.stream_index == vst->vStream)
put_packet(&vst->q_video, &packet);
else if( (packet.stream_index == vst->aStream) &&
(vst->has_sound != 0) )
(have_sound != 0) )
{
put_packet(&vst->q_audio, &packet);
if(vst->audio_timer_valid == 0 &&
packet.pts != AV_NOPTS_VALUE )
if(audio_base == -1.0)
{
vst->audio_timer_base = get_audio_base(vst) * packet.pts;
vst->audio_timer_valid = 1;
if (packet.pts != AV_NOPTS_VALUE)
audio_base = get_audio_base(vst) * packet.pts;
// printf("audio base %f\n", audio_base);
};
}
else av_free_packet(&packet);
/contrib/media/fplay/fplay.h
110,22 → 110,6
int put_packet(queue_t *q, AVPacket *pkt);
int get_packet(queue_t *q, AVPacket *pkt);
 
struct decoder
{
const char *name;
enum AVCodecID codec_id;
enum AVPixelFormat pix_fmt;
int width;
int height;
AVFrame *Frame;
vframe_t *active_frame;
void *hwctx;
int is_hw:1;
int frame_reorder:1;
int nframes;
vframe_t vframes[16];
};
 
struct vstate
{
AVFormatContext *fCtx; /* format context */
138,7 → 122,7
int vStream; /* video stream index */
int aStream; /* audio stream index */
AVRational video_time_base;
double audio_timer_base;
AVRational audio_time_base;
 
queue_t q_video; /* video packets queue */
queue_t q_audio; /* audio packets queue */
151,13 → 135,18
struct list_head input_list;
struct list_head output_list;
 
struct decoder *decoder;
AVFrame *Frame;
 
vframe_t *decoder_frame;
volatile int frames_count;
int has_sound:1;
int audio_timer_valid:1;
void *hwCtx; /* hardware context */
int hwdec:1; /* hardware decoder */
int blit_bitmap:1; /* hardware RGBA blitter */
int blit_texture:1; /* hardware RGBA blit and scale */
int blit_planar:1; /* hardbare YUV blit and scale */
int frame_reorder:1;
int nframes;
vframe_t vframes[16];
};
 
 
184,8 → 173,6
int video_thread(void *param);
void flush_video(vst_t* vst);
 
int init_video_decoder(vst_t *vst);
void fini_video_decoder(vst_t *vst);
void decoder(vst_t *vst);
int decode_video(vst_t* vst);
int decode_audio(AVCodecContext *ctx, queue_t *qa);
204,13 → 191,8
return (double)av_q2d(vst->fCtx->streams[vst->aStream]->time_base)*1000;
};
 
struct decoder* va_init_decoder(vst_t *vst);
void va_create_planar(vst_t *vst, vframe_t *vframe);
 
int init_fontlib();
char *get_moviefile();
 
#define ENTER() printf("enter %s\n",__FUNCTION__)
#define LEAVE() printf("leave %s\n",__FUNCTION__)
#define FAIL() printf("fail %s\n",__FUNCTION__)
 
/contrib/media/fplay/vaapi.c
160,7 → 160,22
return "<unknown>";
}
 
VADisplay va_open_display(void)
{
VADisplay va_dpy;
 
drm_fd = get_service("DISPLAY");
if (drm_fd == 0)
return NULL;
 
va_dpy = vaGetDisplayDRM(drm_fd);
if (va_dpy)
return va_dpy;
 
drm_fd = 0;
return NULL;
};
 
void *vaapi_init(VADisplay display)
{
struct vaapi_context *vaapi;
328,7 → 343,7
 
printf("vaCreateSurfaces %dx%d\n",picture_width,picture_height);
status = vaCreateSurfaces(vaapi->display, VA_RT_FORMAT_YUV420, picture_width, picture_height,
v_surface_id,vst->decoder->nframes,NULL,0);
v_surface_id,vst->nframes,NULL,0);
if (!vaapi_check_status(status, "vaCreateSurfaces()"))
{
FAIL();
363,7 → 378,7
status = vaCreateContext(vaapi->display, config_id,
picture_width, picture_height,
VA_PROGRESSIVE,
v_surface_id, vst->decoder->nframes,
v_surface_id, vst->nframes,
&context_id);
if (!vaapi_check_status(status, "vaCreateContext()"))
{
384,7 → 399,7
vst_t *vst = (vst_t*)avctx->opaque;
VAProfile profile = VAProfileNone;
 
ENTER();
 
for (int i = 0; fmt[i] != PIX_FMT_NONE; i++)
{
enum AVCodecID codec = avctx->codec_id;
434,7 → 449,7
vst_t *vst = (vst_t*)avctx->opaque;
void *surface;
 
surface = (void *)(uintptr_t)v_surface_id[vst->decoder->active_frame->index];
surface = (void *)(uintptr_t)v_surface_id[vst->decoder_frame->index];
 
pic->data[3] = surface;
 
451,8 → 466,73
 
struct vaapi_context va_context_storage;
 
int fplay_init_context(vst_t *vst)
{
AVCodecContext *vCtx = vst->vCtx;
 
vst->nframes = 4;
 
if(va_check_codec_support(vCtx->codec_id))
{
VADisplay dpy;
 
dpy = va_open_display();
vst->hwCtx = vaapi_init(dpy);
 
if(vst->hwCtx != NULL)
{
if(vCtx->codec_id == AV_CODEC_ID_H264)
vst->nframes = 16;
 
for(int i = 0; i < vst->nframes; i++)
{
vframe_t *vframe = &vst->vframes[i];
 
vframe->format = AV_PIX_FMT_NONE;
vframe->is_hw_pic = 1;
vframe->index = i;
vframe->pts = 0;
vframe->ready = 0;
list_add_tail(&vframe->list, &vst->input_list);
};
 
vst->hwdec = 1;
vst->frame_reorder = 1;
vCtx->opaque = vst;
vCtx->thread_count = 1;
vCtx->get_format = get_format;
vCtx->get_buffer2 = get_buffer2;
return 0;
};
};
 
vst->hwdec = 0;
 
for(int i = 0; i < vst->nframes; i++)
{
vframe_t *vframe;
int ret;
 
vframe = &vst->vframes[i];
 
ret = avpicture_alloc(&vframe->picture, vst->vCtx->pix_fmt,
vst->vCtx->width, vst->vCtx->height);
if ( ret != 0 )
{
printf("Cannot alloc video buffer\n\r");
return ret;
};
vframe->format = vst->vCtx->pix_fmt;
vframe->index = i;
vframe->pts = 0;
vframe->ready = 0;
list_add_tail(&vframe->list, &vst->input_list);
};
 
return 0;
}
 
 
#define EGL_TEXTURE_Y_U_V_WL 0x31D7
#define EGL_TEXTURE_Y_UV_WL 0x31D8
#define EGL_TEXTURE_Y_XUXV_WL 0x31D9
568,6 → 648,7
vaimage.offsets[2],vaimage.pitches[2]);
if(planar != NULL)
{
printf("create planar image\n",planar);
vframe->planar = planar;
vframe->format = AV_PIX_FMT_NV12;
};
576,78 → 657,3
vaDestroyImage(vaapi->display, vaimage.image_id);
 
}
 
struct decoder* va_init_decoder(vst_t *vst)
{
AVCodecContext *vCtx = vst->vCtx;
struct decoder *decoder;
VADisplay dpy;
 
drm_fd = get_service("DISPLAY");
if (drm_fd == 0)
return NULL;
 
dpy = vaGetDisplayDRM(drm_fd);
if (dpy == NULL)
goto err_0;
 
decoder = calloc(1, sizeof(struct decoder));
if(decoder == NULL)
goto err_0;
 
decoder->hwctx = vaapi_init(dpy);
if(decoder->hwctx == NULL)
goto err_1;
 
decoder->Frame = av_frame_alloc();
if(decoder->Frame == NULL)
goto err_1;
 
if(vCtx->codec_id == AV_CODEC_ID_H264)
decoder->nframes = 16;
else
decoder->nframes = 4;
 
for(int i = 0; i < decoder->nframes; i++)
{
vframe_t *vframe = &decoder->vframes[i];
 
vframe->format = AV_PIX_FMT_NONE;
vframe->is_hw_pic = 1;
vframe->index = i;
vframe->pts = 0;
vframe->ready = 0;
list_add_tail(&vframe->list, &vst->input_list);
};
 
vCtx->opaque = vst;
vCtx->thread_count = 1;
vCtx->get_format = get_format;
vCtx->get_buffer2 = get_buffer2;
 
if(avcodec_open2(vst->vCtx, vst->vCodec, NULL) < 0)
{
printf("Error while opening codec for input stream %d\n",
vst->vStream);
goto err_2;
};
 
decoder->name = vst->vCodec->name;
decoder->codec_id = vCtx->codec_id;
decoder->pix_fmt = vCtx->pix_fmt;
decoder->width = vCtx->width;
decoder->height = vCtx->height;
decoder->is_hw = 1;
decoder->frame_reorder = 1;
 
return decoder;
 
err_2:
av_frame_free(&decoder->Frame);
err_1:
free(decoder);
vaTerminate(dpy);
err_0:
drm_fd = 0;
return NULL;
}
/contrib/media/fplay/video.c
40,7 → 40,7
list_for_each_entry_safe(vframe, tmp, &vst->output_list, list)
list_move_tail(&vframe->list, &vst->input_list);
 
list_for_each_entry(vframe, &vst->input_list, list)
list_for_each_entry(vframe, &vst->output_list, list)
{
vframe->pts = 0;
vframe->ready = 0;
52,7 → 52,113
};
 
 
static vframe_t *get_input_frame(vst_t *vst)
{
vframe_t *vframe = NULL;
 
mutex_lock(&vst->input_lock);
if(!list_empty(&vst->input_list))
{
vframe = list_first_entry(&vst->input_list, vframe_t, list);
list_del(&vframe->list);
}
mutex_unlock(&vst->input_lock);
 
return vframe;
}
 
static void put_output_frame(vst_t *vst, vframe_t *vframe)
{
mutex_lock(&vst->output_lock);
if(list_empty(&vst->output_list))
list_add_tail(&vframe->list, &vst->output_list);
else
{
vframe_t *cur;
 
cur = list_first_entry(&vst->output_list,vframe_t,list);
if(vframe->pts < cur->pts)
list_add_tail(&vframe->list, &vst->output_list);
else
{
list_for_each_entry_reverse(cur,&vst->output_list,list)
{
if(vframe->pts > cur->pts)
{
list_add(&vframe->list, &cur->list);
break;
};
};
};
};
vst->frames_count++;
mutex_unlock(&vst->output_lock);
};
 
int decode_video(vst_t* vst)
{
double pts;
AVPacket pkt;
 
int frameFinished;
 
if(vst->decoder_frame == NULL)
vst->decoder_frame = get_input_frame(vst);
 
if(vst->decoder_frame == NULL)
return -1;
 
if( get_packet(&vst->q_video, &pkt) == 0 )
return 0;
 
frameFinished = 0;
 
mutex_lock(&vst->gpu_lock);
 
if(avcodec_decode_video2(vst->vCtx, vst->Frame, &frameFinished, &pkt) <= 0)
printf("video decoder error\n");
 
if(frameFinished)
{
vframe_t *vframe = vst->decoder_frame;;
AVPicture *dst_pic;
 
if(vst->hwdec)
pts = pkt.pts;
else
pts = av_frame_get_best_effort_timestamp(vst->Frame);
 
pts*= av_q2d(vst->video_time_base);
 
dst_pic = &vframe->picture;
 
if(vframe->is_hw_pic == 0)
av_image_copy(dst_pic->data, dst_pic->linesize,
(const uint8_t**)vst->Frame->data,
vst->Frame->linesize, vst->vCtx->pix_fmt, vst->vCtx->width, vst->vCtx->height);
else
va_create_planar(vst, vframe);
 
vframe->pts = pts*1000.0;
vframe->pkt_pts = pkt.pts*av_q2d(vst->video_time_base)*1000.0;
vframe->ready = 1;
 
put_output_frame(vst, vframe);
 
// printf("decoded index: %d pts: %f pkt_pts %f pkt_dts %f\n",
// vst->dfx, vst->vframe[vst->dfx].pts,
// vst->vframe[vst->dfx].pkt_pts, vst->vframe[vst->dfx].pkt_dts);
 
vst->decoder_frame = NULL;
};
av_frame_unref(vst->Frame);
mutex_unlock(&vst->gpu_lock);
 
av_free_packet(&pkt);
 
return 1;
}
 
extern volatile enum player_state player_state;
extern volatile enum player_state decoder_state;
extern volatile enum player_state sound_state;
376,18 → 482,6
 
__sync_and_and_fetch(&threads_running,~VIDEO_THREAD);
 
{
vframe_t *vframe, *tmp;
flush_video(vst);
 
list_for_each_entry_safe(vframe, tmp, &vst->output_list, list)
{
list_del(&vframe->list);
if(vframe->planar != NULL)
pxDestroyPlanar(vframe->planar);
}
}
 
destroy_render(main_render);
fini_winlib();
player_state = CLOSED;
643,6 → 737,7
CAPTION_HEIGHT+render->rcvideo.t,
render->rcvideo.r, render->rcvideo.b,0,0);
mutex_unlock(&render->vst->gpu_lock);
 
}
};
 
675,7 → 770,7
dst_height = render->rcvideo.b;
};
 
if(vframe->is_hw_pic)
if(vst->hwdec)
{
render_hw_planar(render, vframe);
return;
683,7 → 778,7
 
picture = &vframe->picture;
 
format = render->ctx_format;
format = render->vst->hwdec == 0 ? render->ctx_format : AV_PIX_FMT_BGRA;
cvt_ctx = sws_getCachedContext(cvt_ctx, render->ctx_width, render->ctx_height, format,
dst_width, dst_height, AV_PIX_FMT_BGRA,
SWS_FAST_BILINEAR, NULL, NULL, NULL);
/contrib/media/fplay/audio.c
29,7 → 29,10
int sample_rate;
 
static uint32_t samples_written = 0;
double audio_base = -1.0;
 
double get_audio_base();
 
int init_audio(int format)
{
int err;
125,6 → 128,14
int data_size = av_samples_get_buffer_size(&plane_size, ctx->channels,
aFrame->nb_samples,
ctx->sample_fmt, 1);
 
// if(audio_base == -1.0)
// {
// if (pkt.pts != AV_NOPTS_VALUE)
// audio_base = get_audio_base() * pkt.pts;
// printf("audio base %f\n", audio_base);
// };
 
pkt_tmp.data += len;
pkt_tmp.size -= len;
 
243,7 → 254,6
 
int audio_thread(void *param)
{
vst_t *vst = param;
SND_EVENT evnt;
 
int buffsize;
296,10 → 306,11
memcpy(astream.buffer, astream.buffer+buffsize*2, astream.count);
mutex_unlock(&astream.lock);
 
SetTimeBase(hBuff, vst->audio_timer_base);
SetTimeBase(hBuff, audio_base);
 
case PAUSE_2_PLAY:
GetTimeStamp(hBuff, &last_time_stamp);
// printf("last audio time stamp %f\n", last_time_stamp);
 
if((err = PlayBuffer(hBuff, 0)) !=0 )
{
309,6 → 320,7
active = 1;
sync_audio(hBuff, buffsize);
sound_state = PLAY;
// printf("render: set audio latency to %f\n", audio_delta);
 
/* breaktrough */
 
373,7 → 385,7
if( active )
{
ResetBuffer(hBuff, SND_RESET_ALL);
vst->audio_timer_valid = 0;
audio_base = -1.0;
active = 0;
}
sound_state = STOP;
/contrib/media/fplay/winlib/window.c
38,7 → 38,6
void blit_panel(panel_t *panel);
void update_panel_size(window_t *win);
void update_caption_size(window_t *win);
int init_fontlib();
 
//#include "timer.h"
ctrl_t *win_get_child(window_t *win, int x, int y)
/contrib/media/fplay/winlib/button.c
16,7 → 16,6
 
static int button_proc(ctrl_t *btn, uint32_t msg, uint32_t arg1, uint32_t arg2);
static int spinbtn_proc(ctrl_t *btn, uint32_t msg, uint32_t arg1, uint32_t arg2);
void panel_set_layout(ctrl_t *panel, int layout);
 
ctrl_t *create_control(size_t size, int id, int x, int y,
int w, int h, ctrl_t *parent)
/contrib/media/fplay/winlib/caption.c
25,8 → 25,6
extern uint32_t main_cursor;
 
void update_caption_size(window_t *win);
void window_update_layout(window_t *win);
int draw_text_ext(void *pixmap, uint32_t pitch, int face, char *text, rect_t *rc, int color);
 
int caption_proc(ctrl_t *ctrl, uint32_t msg, uint32_t arg1, uint32_t arg2);
 
/contrib/media/fplay/winlib/control.h
203,7 → 203,6
ctrl_t *get_child(ctrl_t *ctrl, int x, int y);
 
ctrl_t *capture_mouse(ctrl_t *newm);
void release_mouse(void);
 
void blit_raw(ctx_t *ctx, void *raw, int x, int y, int w, int h, int pitch);