Details | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
4349 | Serge | 1 | /* |
2 | * Copyright (c) 2011 Michael Niedermayer |
||
3 | * |
||
4 | * This file is part of FFmpeg. |
||
5 | * |
||
6 | * FFmpeg is free software; you can redistribute it and/or modify |
||
7 | * it under the terms of the GNU General Public License as published by |
||
8 | * the Free Software Foundation; either version 2 of the License, or |
||
9 | * (at your option) any later version. |
||
10 | * |
||
11 | * FFmpeg is distributed in the hope that it will be useful, |
||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
||
14 | * GNU General Public License for more details. |
||
15 | * |
||
16 | * You should have received a copy of the GNU General Public License |
||
17 | * along with FFmpeg; if not, write to the Free Software |
||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
||
19 | * |
||
20 | * Parts of this file have been stolen from mplayer |
||
21 | */ |
||
22 | |||
23 | /** |
||
24 | * @file |
||
25 | */ |
||
26 | |||
27 | #include "avfilter.h" |
||
28 | #include "video.h" |
||
29 | #include "formats.h" |
||
30 | #include "internal.h" |
||
31 | #include "libavutil/avassert.h" |
||
32 | #include "libavutil/pixdesc.h" |
||
33 | #include "libavutil/intreadwrite.h" |
||
34 | #include "libavutil/imgutils.h" |
||
35 | #include "libavutil/opt.h" |
||
36 | |||
37 | #include "libmpcodecs/vf.h" |
||
38 | #include "libmpcodecs/img_format.h" |
||
39 | #include "libmpcodecs/cpudetect.h" |
||
40 | #include "libmpcodecs/av_helpers.h" |
||
41 | #include "libmpcodecs/libvo/fastmemcpy.h" |
||
42 | |||
43 | #include "libswscale/swscale.h" |
||
44 | |||
45 | |||
46 | //FIXME maybe link the orig in |
||
47 | //XXX: identical pix_fmt must be following with each others |
||
48 | static const struct { |
||
49 | int fmt; |
||
50 | enum AVPixelFormat pix_fmt; |
||
51 | } conversion_map[] = { |
||
52 | {IMGFMT_ARGB, AV_PIX_FMT_ARGB}, |
||
53 | {IMGFMT_BGRA, AV_PIX_FMT_BGRA}, |
||
54 | {IMGFMT_BGR24, AV_PIX_FMT_BGR24}, |
||
55 | {IMGFMT_BGR16BE, AV_PIX_FMT_RGB565BE}, |
||
56 | {IMGFMT_BGR16LE, AV_PIX_FMT_RGB565LE}, |
||
57 | {IMGFMT_BGR15BE, AV_PIX_FMT_RGB555BE}, |
||
58 | {IMGFMT_BGR15LE, AV_PIX_FMT_RGB555LE}, |
||
59 | {IMGFMT_BGR12BE, AV_PIX_FMT_RGB444BE}, |
||
60 | {IMGFMT_BGR12LE, AV_PIX_FMT_RGB444LE}, |
||
61 | {IMGFMT_BGR8, AV_PIX_FMT_RGB8}, |
||
62 | {IMGFMT_BGR4, AV_PIX_FMT_RGB4}, |
||
63 | {IMGFMT_BGR1, AV_PIX_FMT_MONOBLACK}, |
||
64 | {IMGFMT_RGB1, AV_PIX_FMT_MONOBLACK}, |
||
65 | {IMGFMT_RG4B, AV_PIX_FMT_BGR4_BYTE}, |
||
66 | {IMGFMT_BG4B, AV_PIX_FMT_RGB4_BYTE}, |
||
67 | {IMGFMT_RGB48LE, AV_PIX_FMT_RGB48LE}, |
||
68 | {IMGFMT_RGB48BE, AV_PIX_FMT_RGB48BE}, |
||
69 | {IMGFMT_ABGR, AV_PIX_FMT_ABGR}, |
||
70 | {IMGFMT_RGBA, AV_PIX_FMT_RGBA}, |
||
71 | {IMGFMT_RGB24, AV_PIX_FMT_RGB24}, |
||
72 | {IMGFMT_RGB16BE, AV_PIX_FMT_BGR565BE}, |
||
73 | {IMGFMT_RGB16LE, AV_PIX_FMT_BGR565LE}, |
||
74 | {IMGFMT_RGB15BE, AV_PIX_FMT_BGR555BE}, |
||
75 | {IMGFMT_RGB15LE, AV_PIX_FMT_BGR555LE}, |
||
76 | {IMGFMT_RGB12BE, AV_PIX_FMT_BGR444BE}, |
||
77 | {IMGFMT_RGB12LE, AV_PIX_FMT_BGR444LE}, |
||
78 | {IMGFMT_RGB8, AV_PIX_FMT_BGR8}, |
||
79 | {IMGFMT_RGB4, AV_PIX_FMT_BGR4}, |
||
80 | {IMGFMT_BGR8, AV_PIX_FMT_PAL8}, |
||
81 | {IMGFMT_YUY2, AV_PIX_FMT_YUYV422}, |
||
82 | {IMGFMT_UYVY, AV_PIX_FMT_UYVY422}, |
||
83 | {IMGFMT_NV12, AV_PIX_FMT_NV12}, |
||
84 | {IMGFMT_NV21, AV_PIX_FMT_NV21}, |
||
85 | {IMGFMT_Y800, AV_PIX_FMT_GRAY8}, |
||
86 | {IMGFMT_Y8, AV_PIX_FMT_GRAY8}, |
||
87 | {IMGFMT_YVU9, AV_PIX_FMT_YUV410P}, |
||
88 | {IMGFMT_IF09, AV_PIX_FMT_YUV410P}, |
||
89 | {IMGFMT_YV12, AV_PIX_FMT_YUV420P}, |
||
90 | {IMGFMT_I420, AV_PIX_FMT_YUV420P}, |
||
91 | {IMGFMT_IYUV, AV_PIX_FMT_YUV420P}, |
||
92 | {IMGFMT_411P, AV_PIX_FMT_YUV411P}, |
||
93 | {IMGFMT_422P, AV_PIX_FMT_YUV422P}, |
||
94 | {IMGFMT_444P, AV_PIX_FMT_YUV444P}, |
||
95 | {IMGFMT_440P, AV_PIX_FMT_YUV440P}, |
||
96 | |||
97 | {IMGFMT_420A, AV_PIX_FMT_YUVA420P}, |
||
98 | |||
99 | {IMGFMT_420P16_LE, AV_PIX_FMT_YUV420P16LE}, |
||
100 | {IMGFMT_420P16_BE, AV_PIX_FMT_YUV420P16BE}, |
||
101 | {IMGFMT_422P16_LE, AV_PIX_FMT_YUV422P16LE}, |
||
102 | {IMGFMT_422P16_BE, AV_PIX_FMT_YUV422P16BE}, |
||
103 | {IMGFMT_444P16_LE, AV_PIX_FMT_YUV444P16LE}, |
||
104 | {IMGFMT_444P16_BE, AV_PIX_FMT_YUV444P16BE}, |
||
105 | |||
106 | // YUVJ are YUV formats that use the full Y range and not just |
||
107 | // 16 - 235 (see colorspaces.txt). |
||
108 | // Currently they are all treated the same way. |
||
109 | {IMGFMT_YV12, AV_PIX_FMT_YUVJ420P}, |
||
110 | {IMGFMT_422P, AV_PIX_FMT_YUVJ422P}, |
||
111 | {IMGFMT_444P, AV_PIX_FMT_YUVJ444P}, |
||
112 | {IMGFMT_440P, AV_PIX_FMT_YUVJ440P}, |
||
113 | |||
114 | {IMGFMT_XVMC_MOCO_MPEG2, AV_PIX_FMT_XVMC_MPEG2_MC}, |
||
115 | {IMGFMT_XVMC_IDCT_MPEG2, AV_PIX_FMT_XVMC_MPEG2_IDCT}, |
||
116 | {IMGFMT_VDPAU_MPEG1, AV_PIX_FMT_VDPAU_MPEG1}, |
||
117 | {IMGFMT_VDPAU_MPEG2, AV_PIX_FMT_VDPAU_MPEG2}, |
||
118 | {IMGFMT_VDPAU_H264, AV_PIX_FMT_VDPAU_H264}, |
||
119 | {IMGFMT_VDPAU_WMV3, AV_PIX_FMT_VDPAU_WMV3}, |
||
120 | {IMGFMT_VDPAU_VC1, AV_PIX_FMT_VDPAU_VC1}, |
||
121 | {IMGFMT_VDPAU_MPEG4, AV_PIX_FMT_VDPAU_MPEG4}, |
||
122 | {0, AV_PIX_FMT_NONE} |
||
123 | }; |
||
124 | |||
125 | extern const vf_info_t ff_vf_info_eq2; |
||
126 | extern const vf_info_t ff_vf_info_eq; |
||
127 | extern const vf_info_t ff_vf_info_fspp; |
||
128 | extern const vf_info_t ff_vf_info_ilpack; |
||
129 | extern const vf_info_t ff_vf_info_pp7; |
||
130 | extern const vf_info_t ff_vf_info_softpulldown; |
||
131 | extern const vf_info_t ff_vf_info_uspp; |
||
132 | |||
133 | |||
134 | static const vf_info_t* const filters[]={ |
||
135 | &ff_vf_info_eq2, |
||
136 | &ff_vf_info_eq, |
||
137 | &ff_vf_info_fspp, |
||
138 | &ff_vf_info_ilpack, |
||
139 | &ff_vf_info_pp7, |
||
140 | &ff_vf_info_softpulldown, |
||
141 | &ff_vf_info_uspp, |
||
142 | |||
143 | NULL |
||
144 | }; |
||
145 | |||
146 | /* |
||
147 | Unsupported filters |
||
148 | 1bpp |
||
149 | ass |
||
150 | bmovl |
||
151 | crop |
||
152 | dvbscale |
||
153 | flip |
||
154 | expand |
||
155 | format |
||
156 | halfpack |
||
157 | lavc |
||
158 | lavcdeint |
||
159 | noformat |
||
160 | pp |
||
161 | scale |
||
162 | tfields |
||
163 | vo |
||
164 | yadif |
||
165 | zrmjpeg |
||
166 | */ |
||
167 | |||
168 | CpuCaps ff_gCpuCaps; //FIXME initialize this so optims work |
||
169 | |||
170 | enum AVPixelFormat ff_mp2ff_pix_fmt(int mp){ |
||
171 | int i; |
||
172 | for(i=0; conversion_map[i].fmt && mp != conversion_map[i].fmt; i++) |
||
173 | ; |
||
174 | return mp == conversion_map[i].fmt ? conversion_map[i].pix_fmt : AV_PIX_FMT_NONE; |
||
175 | } |
||
176 | |||
177 | typedef struct { |
||
178 | const AVClass *class; |
||
179 | vf_instance_t vf; |
||
180 | vf_instance_t next_vf; |
||
181 | AVFilterContext *avfctx; |
||
182 | int frame_returned; |
||
183 | char *filter; |
||
184 | enum AVPixelFormat in_pix_fmt; |
||
185 | } MPContext; |
||
186 | |||
187 | #define OFFSET(x) offsetof(MPContext, x) |
||
188 | #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM |
||
189 | static const AVOption mp_options[] = { |
||
190 | { "filter", "set MPlayer filter name and parameters", OFFSET(filter), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS }, |
||
191 | { NULL } |
||
192 | }; |
||
193 | |||
194 | AVFILTER_DEFINE_CLASS(mp); |
||
195 | |||
196 | void ff_mp_msg(int mod, int lev, const char *format, ... ){ |
||
197 | va_list va; |
||
198 | va_start(va, format); |
||
199 | //FIXME convert lev/mod |
||
200 | av_vlog(NULL, AV_LOG_DEBUG, format, va); |
||
201 | va_end(va); |
||
202 | } |
||
203 | |||
204 | int ff_mp_msg_test(int mod, int lev){ |
||
205 | return 123; |
||
206 | } |
||
207 | |||
208 | void ff_init_avcodec(void) |
||
209 | { |
||
210 | //we maybe should init but its kinda 1. unneeded 2. a bit inpolite from here |
||
211 | } |
||
212 | |||
213 | //Exact copy of vf.c |
||
214 | void ff_vf_clone_mpi_attributes(mp_image_t* dst, mp_image_t* src){ |
||
215 | dst->pict_type= src->pict_type; |
||
216 | dst->fields = src->fields; |
||
217 | dst->qscale_type= src->qscale_type; |
||
218 | if(dst->width == src->width && dst->height == src->height){ |
||
219 | dst->qstride= src->qstride; |
||
220 | dst->qscale= src->qscale; |
||
221 | } |
||
222 | } |
||
223 | |||
224 | //Exact copy of vf.c |
||
225 | void ff_vf_next_draw_slice(struct vf_instance *vf,unsigned char** src, int * stride,int w, int h, int x, int y){ |
||
226 | if (vf->next->draw_slice) { |
||
227 | vf->next->draw_slice(vf->next,src,stride,w,h,x,y); |
||
228 | return; |
||
229 | } |
||
230 | if (!vf->dmpi) { |
||
231 | ff_mp_msg(MSGT_VFILTER,MSGL_ERR,"draw_slice: dmpi not stored by vf_%s\n", vf->info->name); |
||
232 | return; |
||
233 | } |
||
234 | if (!(vf->dmpi->flags & MP_IMGFLAG_PLANAR)) { |
||
235 | memcpy_pic(vf->dmpi->planes[0]+y*vf->dmpi->stride[0]+vf->dmpi->bpp/8*x, |
||
236 | src[0], vf->dmpi->bpp/8*w, h, vf->dmpi->stride[0], stride[0]); |
||
237 | return; |
||
238 | } |
||
239 | memcpy_pic(vf->dmpi->planes[0]+y*vf->dmpi->stride[0]+x, src[0], |
||
240 | w, h, vf->dmpi->stride[0], stride[0]); |
||
241 | memcpy_pic(vf->dmpi->planes[1]+(y>>vf->dmpi->chroma_y_shift)*vf->dmpi->stride[1]+(x>>vf->dmpi->chroma_x_shift), |
||
242 | src[1], w>>vf->dmpi->chroma_x_shift, h>>vf->dmpi->chroma_y_shift, vf->dmpi->stride[1], stride[1]); |
||
243 | memcpy_pic(vf->dmpi->planes[2]+(y>>vf->dmpi->chroma_y_shift)*vf->dmpi->stride[2]+(x>>vf->dmpi->chroma_x_shift), |
||
244 | src[2], w>>vf->dmpi->chroma_x_shift, h>>vf->dmpi->chroma_y_shift, vf->dmpi->stride[2], stride[2]); |
||
245 | } |
||
246 | |||
247 | //Exact copy of vf.c |
||
248 | void ff_vf_mpi_clear(mp_image_t* mpi,int x0,int y0,int w,int h){ |
||
249 | int y; |
||
250 | if(mpi->flags&MP_IMGFLAG_PLANAR){ |
||
251 | y0&=~1;h+=h&1; |
||
252 | if(x0==0 && w==mpi->width){ |
||
253 | // full width clear: |
||
254 | memset(mpi->planes[0]+mpi->stride[0]*y0,0,mpi->stride[0]*h); |
||
255 | memset(mpi->planes[1]+mpi->stride[1]*(y0>>mpi->chroma_y_shift),128,mpi->stride[1]*(h>>mpi->chroma_y_shift)); |
||
256 | memset(mpi->planes[2]+mpi->stride[2]*(y0>>mpi->chroma_y_shift),128,mpi->stride[2]*(h>>mpi->chroma_y_shift)); |
||
257 | } else |
||
258 | for(y=y0;y |
||
259 | memset(mpi->planes[0]+x0+mpi->stride[0]*y,0,w); |
||
260 | memset(mpi->planes[0]+x0+mpi->stride[0]*(y+1),0,w); |
||
261 | memset(mpi->planes[1]+(x0>>mpi->chroma_x_shift)+mpi->stride[1]*(y>>mpi->chroma_y_shift),128,(w>>mpi->chroma_x_shift)); |
||
262 | memset(mpi->planes[2]+(x0>>mpi->chroma_x_shift)+mpi->stride[2]*(y>>mpi->chroma_y_shift),128,(w>>mpi->chroma_x_shift)); |
||
263 | } |
||
264 | return; |
||
265 | } |
||
266 | // packed: |
||
267 | for(y=y0;y |
||
268 | unsigned char* dst=mpi->planes[0]+mpi->stride[0]*y+(mpi->bpp>>3)*x0; |
||
269 | if(mpi->flags&MP_IMGFLAG_YUV){ |
||
270 | unsigned int* p=(unsigned int*) dst; |
||
271 | int size=(mpi->bpp>>3)*w/4; |
||
272 | int i; |
||
273 | #if HAVE_BIGENDIAN |
||
274 | #define CLEAR_PACKEDYUV_PATTERN 0x00800080 |
||
275 | #define CLEAR_PACKEDYUV_PATTERN_SWAPPED 0x80008000 |
||
276 | #else |
||
277 | #define CLEAR_PACKEDYUV_PATTERN 0x80008000 |
||
278 | #define CLEAR_PACKEDYUV_PATTERN_SWAPPED 0x00800080 |
||
279 | #endif |
||
280 | if(mpi->flags&MP_IMGFLAG_SWAPPED){ |
||
281 | for(i=0;i |
||
282 | for(;i |
||
283 | } else { |
||
284 | for(i=0;i |
||
285 | for(;i |
||
286 | } |
||
287 | } else |
||
288 | memset(dst,0,(mpi->bpp>>3)*w); |
||
289 | } |
||
290 | } |
||
291 | |||
292 | int ff_vf_next_query_format(struct vf_instance *vf, unsigned int fmt){ |
||
293 | return 1; |
||
294 | } |
||
295 | |||
296 | //used by delogo |
||
297 | unsigned int ff_vf_match_csp(vf_instance_t** vfp,const unsigned int* list,unsigned int preferred){ |
||
298 | return preferred; |
||
299 | } |
||
300 | |||
301 | mp_image_t* ff_vf_get_image(vf_instance_t* vf, unsigned int outfmt, int mp_imgtype, int mp_imgflag, int w, int h){ |
||
302 | MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, next_vf)); |
||
303 | mp_image_t* mpi=NULL; |
||
304 | int w2; |
||
305 | int number = mp_imgtype >> 16; |
||
306 | |||
307 | av_assert0(vf->next == NULL); // all existing filters call this just on next |
||
308 | |||
309 | //vf_dint needs these as it calls ff_vf_get_image() before configuring the output |
||
310 | if(vf->w==0 && w>0) vf->w=w; |
||
311 | if(vf->h==0 && h>0) vf->h=h; |
||
312 | |||
313 | av_assert0(w == -1 || w >= vf->w); |
||
314 | av_assert0(h == -1 || h >= vf->h); |
||
315 | av_assert0(vf->w > 0); |
||
316 | av_assert0(vf->h > 0); |
||
317 | |||
318 | av_log(m->avfctx, AV_LOG_DEBUG, "get_image: %d:%d, vf: %d:%d\n", w,h,vf->w,vf->h); |
||
319 | |||
320 | if (w == -1) w = vf->w; |
||
321 | if (h == -1) h = vf->h; |
||
322 | |||
323 | w2=(mp_imgflag&MP_IMGFLAG_ACCEPT_ALIGNED_STRIDE)?((w+15)&(~15)):w; |
||
324 | |||
325 | // Note: we should call libvo first to check if it supports direct rendering |
||
326 | // and if not, then fallback to software buffers: |
||
327 | switch(mp_imgtype & 0xff){ |
||
328 | case MP_IMGTYPE_EXPORT: |
||
329 | if(!vf->imgctx.export_images[0]) vf->imgctx.export_images[0]=ff_new_mp_image(w2,h); |
||
330 | mpi=vf->imgctx.export_images[0]; |
||
331 | break; |
||
332 | case MP_IMGTYPE_STATIC: |
||
333 | if(!vf->imgctx.static_images[0]) vf->imgctx.static_images[0]=ff_new_mp_image(w2,h); |
||
334 | mpi=vf->imgctx.static_images[0]; |
||
335 | break; |
||
336 | case MP_IMGTYPE_TEMP: |
||
337 | if(!vf->imgctx.temp_images[0]) vf->imgctx.temp_images[0]=ff_new_mp_image(w2,h); |
||
338 | mpi=vf->imgctx.temp_images[0]; |
||
339 | break; |
||
340 | case MP_IMGTYPE_IPB: |
||
341 | if(!(mp_imgflag&MP_IMGFLAG_READABLE)){ // B frame: |
||
342 | if(!vf->imgctx.temp_images[0]) vf->imgctx.temp_images[0]=ff_new_mp_image(w2,h); |
||
343 | mpi=vf->imgctx.temp_images[0]; |
||
344 | break; |
||
345 | } |
||
346 | case MP_IMGTYPE_IP: |
||
347 | if(!vf->imgctx.static_images[vf->imgctx.static_idx]) vf->imgctx.static_images[vf->imgctx.static_idx]=ff_new_mp_image(w2,h); |
||
348 | mpi=vf->imgctx.static_images[vf->imgctx.static_idx]; |
||
349 | vf->imgctx.static_idx^=1; |
||
350 | break; |
||
351 | case MP_IMGTYPE_NUMBERED: |
||
352 | if (number == -1) { |
||
353 | int i; |
||
354 | for (i = 0; i < NUM_NUMBERED_MPI; i++) |
||
355 | if (!vf->imgctx.numbered_images[i] || !vf->imgctx.numbered_images[i]->usage_count) |
||
356 | break; |
||
357 | number = i; |
||
358 | } |
||
359 | if (number < 0 || number >= NUM_NUMBERED_MPI) return NULL; |
||
360 | if (!vf->imgctx.numbered_images[number]) vf->imgctx.numbered_images[number] = ff_new_mp_image(w2,h); |
||
361 | mpi = vf->imgctx.numbered_images[number]; |
||
362 | mpi->number = number; |
||
363 | break; |
||
364 | } |
||
365 | if(mpi){ |
||
366 | mpi->type=mp_imgtype; |
||
367 | mpi->w=vf->w; mpi->h=vf->h; |
||
368 | // keep buffer allocation status & color flags only: |
||
369 | // mpi->flags&=~(MP_IMGFLAG_PRESERVE|MP_IMGFLAG_READABLE|MP_IMGFLAG_DIRECT); |
||
370 | mpi->flags&=MP_IMGFLAG_ALLOCATED|MP_IMGFLAG_TYPE_DISPLAYED|MP_IMGFLAGMASK_COLORS; |
||
371 | // accept restrictions, draw_slice and palette flags only: |
||
372 | mpi->flags|=mp_imgflag&(MP_IMGFLAGMASK_RESTRICTIONS|MP_IMGFLAG_DRAW_CALLBACK|MP_IMGFLAG_RGB_PALETTE); |
||
373 | if(!vf->draw_slice) mpi->flags&=~MP_IMGFLAG_DRAW_CALLBACK; |
||
374 | if(mpi->width!=w2 || mpi->height!=h){ |
||
375 | // printf("vf.c: MPI parameters changed! %dx%d -> %dx%d \n", mpi->width,mpi->height,w2,h); |
||
376 | if(mpi->flags&MP_IMGFLAG_ALLOCATED){ |
||
377 | if(mpi->width |
||
378 | // need to re-allocate buffer memory: |
||
379 | av_free(mpi->planes[0]); |
||
380 | mpi->flags&=~MP_IMGFLAG_ALLOCATED; |
||
381 | ff_mp_msg(MSGT_VFILTER,MSGL_V,"vf.c: have to REALLOCATE buffer memory :(\n"); |
||
382 | } |
||
383 | // } else { |
||
384 | } { |
||
385 | mpi->width=w2; mpi->chroma_width=(w2 + (1< |
||
386 | mpi->height=h; mpi->chroma_height=(h + (1< |
||
387 | } |
||
388 | } |
||
389 | if(!mpi->bpp) ff_mp_image_setfmt(mpi,outfmt); |
||
390 | if(!(mpi->flags&MP_IMGFLAG_ALLOCATED) && mpi->type>MP_IMGTYPE_EXPORT){ |
||
391 | |||
392 | av_assert0(!vf->get_image); |
||
393 | // check libvo first! |
||
394 | if(vf->get_image) vf->get_image(vf,mpi); |
||
395 | |||
396 | if(!(mpi->flags&MP_IMGFLAG_DIRECT)){ |
||
397 | // non-direct and not yet allocated image. allocate it! |
||
398 | if (!mpi->bpp) { // no way we can allocate this |
||
399 | ff_mp_msg(MSGT_DECVIDEO, MSGL_FATAL, |
||
400 | "ff_vf_get_image: Tried to allocate a format that can not be allocated!\n"); |
||
401 | return NULL; |
||
402 | } |
||
403 | |||
404 | // check if codec prefer aligned stride: |
||
405 | if(mp_imgflag&MP_IMGFLAG_PREFER_ALIGNED_STRIDE){ |
||
406 | int align=(mpi->flags&MP_IMGFLAG_PLANAR && |
||
407 | mpi->flags&MP_IMGFLAG_YUV) ? |
||
408 | (8< |
||
409 | w2=((w+align)&(~align)); |
||
410 | if(mpi->width!=w2){ |
||
411 | #if 0 |
||
412 | // we have to change width... check if we CAN co it: |
||
413 | int flags=vf->query_format(vf,outfmt); // should not fail |
||
414 | if(!(flags&3)) ff_mp_msg(MSGT_DECVIDEO,MSGL_WARN,"??? ff_vf_get_image{vf->query_format(outfmt)} failed!\n"); |
||
415 | // printf("query -> 0x%X \n",flags); |
||
416 | if(flags&VFCAP_ACCEPT_STRIDE){ |
||
417 | #endif |
||
418 | mpi->width=w2; |
||
419 | mpi->chroma_width=(w2 + (1< |
||
420 | // } |
||
421 | } |
||
422 | } |
||
423 | |||
424 | ff_mp_image_alloc_planes(mpi); |
||
425 | // printf("clearing img!\n"); |
||
426 | ff_vf_mpi_clear(mpi,0,0,mpi->width,mpi->height); |
||
427 | } |
||
428 | } |
||
429 | av_assert0(!vf->start_slice); |
||
430 | if(mpi->flags&MP_IMGFLAG_DRAW_CALLBACK) |
||
431 | if(vf->start_slice) vf->start_slice(vf,mpi); |
||
432 | if(!(mpi->flags&MP_IMGFLAG_TYPE_DISPLAYED)){ |
||
433 | ff_mp_msg(MSGT_DECVIDEO,MSGL_V,"*** [%s] %s%s mp_image_t, %dx%dx%dbpp %s %s, %d bytes\n", |
||
434 | "NULL"/*vf->info->name*/, |
||
435 | (mpi->type==MP_IMGTYPE_EXPORT)?"Exporting": |
||
436 | ((mpi->flags&MP_IMGFLAG_DIRECT)?"Direct Rendering":"Allocating"), |
||
437 | (mpi->flags&MP_IMGFLAG_DRAW_CALLBACK)?" (slices)":"", |
||
438 | mpi->width,mpi->height,mpi->bpp, |
||
439 | (mpi->flags&MP_IMGFLAG_YUV)?"YUV":((mpi->flags&MP_IMGFLAG_SWAPPED)?"BGR":"RGB"), |
||
440 | (mpi->flags&MP_IMGFLAG_PLANAR)?"planar":"packed", |
||
441 | mpi->bpp*mpi->width*mpi->height/8); |
||
442 | ff_mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"(imgfmt: %x, planes: %p,%p,%p strides: %d,%d,%d, chroma: %dx%d, shift: h:%d,v:%d)\n", |
||
443 | mpi->imgfmt, mpi->planes[0], mpi->planes[1], mpi->planes[2], |
||
444 | mpi->stride[0], mpi->stride[1], mpi->stride[2], |
||
445 | mpi->chroma_width, mpi->chroma_height, mpi->chroma_x_shift, mpi->chroma_y_shift); |
||
446 | mpi->flags|=MP_IMGFLAG_TYPE_DISPLAYED; |
||
447 | } |
||
448 | |||
449 | mpi->qscale = NULL; |
||
450 | mpi->usage_count++; |
||
451 | } |
||
452 | // printf("\rVF_MPI: %p %p %p %d %d %d \n", |
||
453 | // mpi->planes[0],mpi->planes[1],mpi->planes[2], |
||
454 | // mpi->stride[0],mpi->stride[1],mpi->stride[2]); |
||
455 | return mpi; |
||
456 | } |
||
457 | |||
458 | int ff_vf_next_put_image(struct vf_instance *vf,mp_image_t *mpi, double pts){ |
||
459 | MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, vf)); |
||
460 | AVFilterLink *outlink = m->avfctx->outputs[0]; |
||
461 | AVFrame *picref = av_frame_alloc(); |
||
462 | int i; |
||
463 | |||
464 | av_assert0(vf->next); |
||
465 | |||
466 | av_log(m->avfctx, AV_LOG_DEBUG, "ff_vf_next_put_image\n"); |
||
467 | |||
468 | if (!picref) |
||
469 | goto fail; |
||
470 | |||
471 | picref->width = mpi->w; |
||
472 | picref->height = mpi->h; |
||
473 | |||
474 | picref->type = AVMEDIA_TYPE_VIDEO; |
||
475 | |||
476 | for(i=0; conversion_map[i].fmt && mpi->imgfmt != conversion_map[i].fmt; i++); |
||
477 | picref->format = conversion_map[i].pix_fmt; |
||
478 | |||
479 | for(i=0; conversion_map[i].fmt && m->in_pix_fmt != conversion_map[i].pix_fmt; i++); |
||
480 | if (mpi->imgfmt == conversion_map[i].fmt) |
||
481 | picref->format = conversion_map[i].pix_fmt; |
||
482 | |||
483 | memcpy(picref->linesize, mpi->stride, FFMIN(sizeof(picref->linesize), sizeof(mpi->stride))); |
||
484 | |||
485 | for(i=0; i<4 && mpi->stride[i]; i++){ |
||
486 | picref->data[i] = mpi->planes[i]; |
||
487 | } |
||
488 | |||
489 | if(pts != MP_NOPTS_VALUE) |
||
490 | picref->pts= pts * av_q2d(outlink->time_base); |
||
491 | |||
492 | if(1) { // mp buffers are currently unsupported in libavfilter, we thus must copy |
||
493 | AVFrame *tofree = picref; |
||
494 | picref = av_frame_clone(picref); |
||
495 | av_frame_free(&tofree); |
||
496 | } |
||
497 | |||
498 | ff_filter_frame(outlink, picref); |
||
499 | m->frame_returned++; |
||
500 | |||
501 | return 1; |
||
502 | fail: |
||
503 | av_frame_free(&picref); |
||
504 | return 0; |
||
505 | } |
||
506 | |||
507 | int ff_vf_next_config(struct vf_instance *vf, |
||
508 | int width, int height, int d_width, int d_height, |
||
509 | unsigned int voflags, unsigned int outfmt){ |
||
510 | |||
511 | av_assert0(width>0 && height>0); |
||
512 | vf->next->w = width; vf->next->h = height; |
||
513 | |||
514 | return 1; |
||
515 | #if 0 |
||
516 | int flags=vf->next->query_format(vf->next,outfmt); |
||
517 | if(!flags){ |
||
518 | // hmm. colorspace mismatch!!! |
||
519 | //this is fatal for us ATM |
||
520 | return 0; |
||
521 | } |
||
522 | ff_mp_msg(MSGT_VFILTER,MSGL_V,"REQ: flags=0x%X req=0x%X \n",flags,vf->default_reqs); |
||
523 | miss=vf->default_reqs - (flags&vf->default_reqs); |
||
524 | if(miss&VFCAP_ACCEPT_STRIDE){ |
||
525 | // vf requires stride support but vf->next doesn't support it! |
||
526 | // let's insert the 'expand' filter, it does the job for us: |
||
527 | vf_instance_t* vf2=vf_open_filter(vf->next,"expand",NULL); |
||
528 | if(!vf2) return 0; // shouldn't happen! |
||
529 | vf->next=vf2; |
||
530 | } |
||
531 | vf->next->w = width; vf->next->h = height; |
||
532 | return 1; |
||
533 | #endif |
||
534 | } |
||
535 | |||
536 | int ff_vf_next_control(struct vf_instance *vf, int request, void* data){ |
||
537 | MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, vf)); |
||
538 | av_log(m->avfctx, AV_LOG_DEBUG, "Received control %d\n", request); |
||
539 | return 0; |
||
540 | } |
||
541 | |||
542 | static int vf_default_query_format(struct vf_instance *vf, unsigned int fmt){ |
||
543 | MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, vf)); |
||
544 | int i; |
||
545 | av_log(m->avfctx, AV_LOG_DEBUG, "query %X\n", fmt); |
||
546 | |||
547 | for(i=0; conversion_map[i].fmt; i++){ |
||
548 | if(fmt==conversion_map[i].fmt) |
||
549 | return 1; //we suport all |
||
550 | } |
||
551 | return 0; |
||
552 | } |
||
553 | |||
554 | |||
555 | static av_cold int init(AVFilterContext *ctx) |
||
556 | { |
||
557 | MPContext *m = ctx->priv; |
||
558 | int cpu_flags = av_get_cpu_flags(); |
||
559 | char name[256]; |
||
560 | const char *args; |
||
561 | int i; |
||
562 | |||
563 | ff_gCpuCaps.hasMMX = cpu_flags & AV_CPU_FLAG_MMX; |
||
564 | ff_gCpuCaps.hasMMX2 = cpu_flags & AV_CPU_FLAG_MMX2; |
||
565 | ff_gCpuCaps.hasSSE = cpu_flags & AV_CPU_FLAG_SSE; |
||
566 | ff_gCpuCaps.hasSSE2 = cpu_flags & AV_CPU_FLAG_SSE2; |
||
567 | ff_gCpuCaps.hasSSE3 = cpu_flags & AV_CPU_FLAG_SSE3; |
||
568 | ff_gCpuCaps.hasSSSE3 = cpu_flags & AV_CPU_FLAG_SSSE3; |
||
569 | ff_gCpuCaps.hasSSE4 = cpu_flags & AV_CPU_FLAG_SSE4; |
||
570 | ff_gCpuCaps.hasSSE42 = cpu_flags & AV_CPU_FLAG_SSE42; |
||
571 | ff_gCpuCaps.hasAVX = cpu_flags & AV_CPU_FLAG_AVX; |
||
572 | ff_gCpuCaps.has3DNow = cpu_flags & AV_CPU_FLAG_3DNOW; |
||
573 | ff_gCpuCaps.has3DNowExt = cpu_flags & AV_CPU_FLAG_3DNOWEXT; |
||
574 | |||
575 | m->avfctx= ctx; |
||
576 | |||
577 | args = m->filter; |
||
578 | if(!args || 1!=sscanf(args, "%255[^:=]", name)){ |
||
579 | av_log(ctx, AV_LOG_ERROR, "Invalid parameter.\n"); |
||
580 | return AVERROR(EINVAL); |
||
581 | } |
||
582 | args += strlen(name); |
||
583 | if (args[0] == '=') |
||
584 | args++; |
||
585 | |||
586 | for(i=0; ;i++){ |
||
587 | if(!filters[i] || !strcmp(name, filters[i]->name)) |
||
588 | break; |
||
589 | } |
||
590 | |||
591 | if(!filters[i]){ |
||
592 | av_log(ctx, AV_LOG_ERROR, "Unknown filter %s\n", name); |
||
593 | return AVERROR(EINVAL); |
||
594 | } |
||
595 | |||
596 | av_log(ctx, AV_LOG_WARNING, |
||
597 | "'%s' is a wrapped MPlayer filter (libmpcodecs). This filter may be removed\n" |
||
598 | "once it has been ported to a native libavfilter.\n", name); |
||
599 | |||
600 | memset(&m->vf,0,sizeof(m->vf)); |
||
601 | m->vf.info= filters[i]; |
||
602 | |||
603 | m->vf.next = &m->next_vf; |
||
604 | m->vf.put_image = ff_vf_next_put_image; |
||
605 | m->vf.config = ff_vf_next_config; |
||
606 | m->vf.query_format= vf_default_query_format; |
||
607 | m->vf.control = ff_vf_next_control; |
||
608 | m->vf.default_caps=VFCAP_ACCEPT_STRIDE; |
||
609 | m->vf.default_reqs=0; |
||
610 | if(m->vf.info->opts) |
||
611 | av_log(ctx, AV_LOG_ERROR, "opts / m_struct_set is unsupported\n"); |
||
612 | #if 0 |
||
613 | if(vf->info->opts) { // vf_vo get some special argument |
||
614 | const m_struct_t* st = vf->info->opts; |
||
615 | void* vf_priv = m_struct_alloc(st); |
||
616 | int n; |
||
617 | for(n = 0 ; args && args[2*n] ; n++) |
||
618 | m_struct_set(st,vf_priv,args[2*n],args[2*n+1]); |
||
619 | vf->priv = vf_priv; |
||
620 | args = NULL; |
||
621 | } else // Otherwise we should have the '_oldargs_' |
||
622 | if(args && !strcmp(args[0],"_oldargs_")) |
||
623 | args = (char**)args[1]; |
||
624 | else |
||
625 | args = NULL; |
||
626 | #endif |
||
627 | if(m->vf.info->vf_open(&m->vf, (char*)args)<=0){ |
||
628 | av_log(ctx, AV_LOG_ERROR, "vf_open() of %s with arg=%s failed\n", name, args); |
||
629 | return -1; |
||
630 | } |
||
631 | |||
632 | return 0; |
||
633 | } |
||
634 | |||
635 | static av_cold void uninit(AVFilterContext *ctx) |
||
636 | { |
||
637 | MPContext *m = ctx->priv; |
||
638 | vf_instance_t *vf = &m->vf; |
||
639 | |||
640 | while(vf){ |
||
641 | vf_instance_t *next = vf->next; |
||
642 | if(vf->uninit) |
||
643 | vf->uninit(vf); |
||
644 | ff_free_mp_image(vf->imgctx.static_images[0]); |
||
645 | ff_free_mp_image(vf->imgctx.static_images[1]); |
||
646 | ff_free_mp_image(vf->imgctx.temp_images[0]); |
||
647 | ff_free_mp_image(vf->imgctx.export_images[0]); |
||
648 | vf = next; |
||
649 | } |
||
650 | } |
||
651 | |||
652 | static int query_formats(AVFilterContext *ctx) |
||
653 | { |
||
654 | AVFilterFormats *avfmts=NULL; |
||
655 | MPContext *m = ctx->priv; |
||
656 | enum AVPixelFormat lastpixfmt = AV_PIX_FMT_NONE; |
||
657 | int i; |
||
658 | |||
659 | for(i=0; conversion_map[i].fmt; i++){ |
||
660 | av_log(ctx, AV_LOG_DEBUG, "query: %X\n", conversion_map[i].fmt); |
||
661 | if(m->vf.query_format(&m->vf, conversion_map[i].fmt)){ |
||
662 | av_log(ctx, AV_LOG_DEBUG, "supported,adding\n"); |
||
663 | if (conversion_map[i].pix_fmt != lastpixfmt) { |
||
664 | ff_add_format(&avfmts, conversion_map[i].pix_fmt); |
||
665 | lastpixfmt = conversion_map[i].pix_fmt; |
||
666 | } |
||
667 | } |
||
668 | } |
||
669 | |||
670 | if (!avfmts) |
||
671 | return -1; |
||
672 | |||
673 | //We assume all allowed input formats are also allowed output formats |
||
674 | ff_set_common_formats(ctx, avfmts); |
||
675 | return 0; |
||
676 | } |
||
677 | |||
678 | static int config_inprops(AVFilterLink *inlink) |
||
679 | { |
||
680 | MPContext *m = inlink->dst->priv; |
||
681 | int i; |
||
682 | for(i=0; conversion_map[i].fmt && conversion_map[i].pix_fmt != inlink->format; i++); |
||
683 | |||
684 | av_assert0(conversion_map[i].fmt && inlink->w && inlink->h); |
||
685 | |||
686 | m->vf.fmt.have_configured = 1; |
||
687 | m->vf.fmt.orig_height = inlink->h; |
||
688 | m->vf.fmt.orig_width = inlink->w; |
||
689 | m->vf.fmt.orig_fmt = conversion_map[i].fmt; |
||
690 | |||
691 | if(m->vf.config(&m->vf, inlink->w, inlink->h, inlink->w, inlink->h, 0, conversion_map[i].fmt)<=0) |
||
692 | return -1; |
||
693 | |||
694 | return 0; |
||
695 | } |
||
696 | |||
697 | static int config_outprops(AVFilterLink *outlink) |
||
698 | { |
||
699 | MPContext *m = outlink->src->priv; |
||
700 | |||
701 | outlink->w = m->next_vf.w; |
||
702 | outlink->h = m->next_vf.h; |
||
703 | |||
704 | return 0; |
||
705 | } |
||
706 | |||
707 | static int request_frame(AVFilterLink *outlink) |
||
708 | { |
||
709 | MPContext *m = outlink->src->priv; |
||
710 | int ret; |
||
711 | |||
712 | av_log(m->avfctx, AV_LOG_DEBUG, "mp request_frame\n"); |
||
713 | |||
714 | for(m->frame_returned=0; !m->frame_returned;){ |
||
715 | ret=ff_request_frame(outlink->src->inputs[0]); |
||
716 | if(ret<0) |
||
717 | break; |
||
718 | } |
||
719 | |||
720 | av_log(m->avfctx, AV_LOG_DEBUG, "mp request_frame ret=%d\n", ret); |
||
721 | return ret; |
||
722 | } |
||
723 | |||
724 | static int filter_frame(AVFilterLink *inlink, AVFrame *inpic) |
||
725 | { |
||
726 | MPContext *m = inlink->dst->priv; |
||
727 | int i; |
||
728 | double pts= MP_NOPTS_VALUE; |
||
729 | mp_image_t* mpi = ff_new_mp_image(inpic->width, inpic->height); |
||
730 | |||
731 | if(inpic->pts != AV_NOPTS_VALUE) |
||
732 | pts= inpic->pts / av_q2d(inlink->time_base); |
||
733 | |||
734 | for(i=0; conversion_map[i].fmt && conversion_map[i].pix_fmt != inlink->format; i++); |
||
735 | ff_mp_image_setfmt(mpi,conversion_map[i].fmt); |
||
736 | m->in_pix_fmt = inlink->format; |
||
737 | |||
738 | memcpy(mpi->planes, inpic->data, FFMIN(sizeof(inpic->data) , sizeof(mpi->planes))); |
||
739 | memcpy(mpi->stride, inpic->linesize, FFMIN(sizeof(inpic->linesize), sizeof(mpi->stride))); |
||
740 | |||
741 | if (inpic->interlaced_frame) |
||
742 | mpi->fields |= MP_IMGFIELD_INTERLACED; |
||
743 | if (inpic->top_field_first) |
||
744 | mpi->fields |= MP_IMGFIELD_TOP_FIRST; |
||
745 | if (inpic->repeat_pict) |
||
746 | mpi->fields |= MP_IMGFIELD_REPEAT_FIRST; |
||
747 | |||
748 | // mpi->flags|=MP_IMGFLAG_ALLOCATED; ? |
||
749 | mpi->flags |= MP_IMGFLAG_READABLE; |
||
750 | if(!av_frame_is_writable(inpic)) |
||
751 | mpi->flags |= MP_IMGFLAG_PRESERVE; |
||
752 | if(m->vf.put_image(&m->vf, mpi, pts) == 0){ |
||
753 | av_log(m->avfctx, AV_LOG_DEBUG, "put_image() says skip\n"); |
||
754 | }else{ |
||
755 | av_frame_free(&inpic); |
||
756 | } |
||
757 | ff_free_mp_image(mpi); |
||
758 | return 0; |
||
759 | } |
||
760 | |||
761 | static const AVFilterPad mp_inputs[] = { |
||
762 | { |
||
763 | .name = "default", |
||
764 | .type = AVMEDIA_TYPE_VIDEO, |
||
765 | .filter_frame = filter_frame, |
||
766 | .config_props = config_inprops, |
||
767 | }, |
||
768 | { NULL } |
||
769 | }; |
||
770 | |||
771 | static const AVFilterPad mp_outputs[] = { |
||
772 | { |
||
773 | .name = "default", |
||
774 | .type = AVMEDIA_TYPE_VIDEO, |
||
775 | .request_frame = request_frame, |
||
776 | .config_props = config_outprops, |
||
777 | }, |
||
778 | { NULL } |
||
779 | }; |
||
780 | |||
781 | AVFilter avfilter_vf_mp = { |
||
782 | .name = "mp", |
||
783 | .description = NULL_IF_CONFIG_SMALL("Apply a libmpcodecs filter to the input video."), |
||
784 | .init = init, |
||
785 | .uninit = uninit, |
||
786 | .priv_size = sizeof(MPContext), |
||
787 | .query_formats = query_formats, |
||
788 | .inputs = mp_inputs, |
||
789 | .outputs = mp_outputs, |
||
790 | .priv_class = &mp_class, |
||
791 | };0) |