Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
4349 Serge 1
/*
2
 * The simplest mpeg encoder (well, it was the simplest!)
3
 * Copyright (c) 2000,2001 Fabrice Bellard
4
 * Copyright (c) 2002-2004 Michael Niedermayer 
5
 *
6
 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer 
7
 *
8
 * This file is part of FFmpeg.
9
 *
10
 * FFmpeg is free software; you can redistribute it and/or
11
 * modify it under the terms of the GNU Lesser General Public
12
 * License as published by the Free Software Foundation; either
13
 * version 2.1 of the License, or (at your option) any later version.
14
 *
15
 * FFmpeg is distributed in the hope that it will be useful,
16
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18
 * Lesser General Public License for more details.
19
 *
20
 * You should have received a copy of the GNU Lesser General Public
21
 * License along with FFmpeg; if not, write to the Free Software
22
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23
 */
24
 
25
/**
26
 * @file
27
 * The simplest mpeg encoder (well, it was the simplest!).
28
 */
29
 
30
#include "libavutil/attributes.h"
31
#include "libavutil/avassert.h"
32
#include "libavutil/imgutils.h"
33
#include "avcodec.h"
34
#include "dsputil.h"
35
#include "h264chroma.h"
36
#include "internal.h"
37
#include "mathops.h"
38
#include "mpegvideo.h"
39
#include "mjpegenc.h"
40
#include "msmpeg4.h"
41
#include "xvmc_internal.h"
42
#include "thread.h"
43
#include 
44
 
45
static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
46
                                   int16_t *block, int n, int qscale);
47
static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
48
                                   int16_t *block, int n, int qscale);
49
static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
50
                                   int16_t *block, int n, int qscale);
51
static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
52
                                   int16_t *block, int n, int qscale);
53
static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
54
                                   int16_t *block, int n, int qscale);
55
static void dct_unquantize_h263_intra_c(MpegEncContext *s,
56
                                  int16_t *block, int n, int qscale);
57
static void dct_unquantize_h263_inter_c(MpegEncContext *s,
58
                                  int16_t *block, int n, int qscale);
59
 
60
static const uint8_t ff_default_chroma_qscale_table[32] = {
61
//   0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15
62
     0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
63
    16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
64
};
65
 
66
const uint8_t ff_mpeg1_dc_scale_table[128] = {
67
//  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
68
    8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
69
    8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
70
    8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
71
    8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
72
    8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
73
    8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
74
    8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75
    8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76
};
77
 
78
static const uint8_t mpeg2_dc_scale_table1[128] = {
79
//  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
80
    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
81
    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
82
    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
83
    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
84
    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85
    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86
    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87
    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88
};
89
 
90
static const uint8_t mpeg2_dc_scale_table2[128] = {
91
//  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
92
    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93
    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94
    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95
    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
96
    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
97
    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98
    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
99
    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100
};
101
 
102
static const uint8_t mpeg2_dc_scale_table3[128] = {
103
//  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
104
    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
105
    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106
    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
107
    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
108
    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
109
    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
110
    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
111
    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
112
};
113
 
114
const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
115
    ff_mpeg1_dc_scale_table,
116
    mpeg2_dc_scale_table1,
117
    mpeg2_dc_scale_table2,
118
    mpeg2_dc_scale_table3,
119
};
120
 
121
const enum AVPixelFormat ff_pixfmt_list_420[] = {
122
    AV_PIX_FMT_YUV420P,
123
    AV_PIX_FMT_NONE
124
};
125
 
126
static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
127
                              int (*mv)[2][4][2],
128
                              int mb_x, int mb_y, int mb_intra, int mb_skipped)
129
{
130
    MpegEncContext *s = opaque;
131
 
132
    s->mv_dir     = mv_dir;
133
    s->mv_type    = mv_type;
134
    s->mb_intra   = mb_intra;
135
    s->mb_skipped = mb_skipped;
136
    s->mb_x       = mb_x;
137
    s->mb_y       = mb_y;
138
    memcpy(s->mv, mv, sizeof(*mv));
139
 
140
    ff_init_block_index(s);
141
    ff_update_block_index(s);
142
 
143
    s->dsp.clear_blocks(s->block[0]);
144
 
145
    s->dest[0] = s->current_picture.f.data[0] + (s->mb_y *  16                       * s->linesize)   + s->mb_x *  16;
146
    s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
147
    s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
148
 
149
    if (ref)
150
        av_log(s->avctx, AV_LOG_DEBUG, "Interlaced error concealment is not fully implemented\n");
151
    ff_MPV_decode_mb(s, s->block);
152
}
153
 
154
/* init common dct for both encoder and decoder */
155
av_cold int ff_dct_common_init(MpegEncContext *s)
156
{
157
    ff_dsputil_init(&s->dsp, s->avctx);
158
    ff_h264chroma_init(&s->h264chroma, 8); //for lowres
159
    ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
160
    ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
161
 
162
    s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
163
    s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
164
    s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
165
    s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
166
    s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
167
    if (s->flags & CODEC_FLAG_BITEXACT)
168
        s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
169
    s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
170
 
171
    if (ARCH_ALPHA)
172
        ff_MPV_common_init_axp(s);
173
    if (ARCH_ARM)
174
        ff_MPV_common_init_arm(s);
175
    if (ARCH_BFIN)
176
        ff_MPV_common_init_bfin(s);
177
    if (ARCH_PPC)
178
        ff_MPV_common_init_ppc(s);
179
    if (ARCH_X86)
180
        ff_MPV_common_init_x86(s);
181
 
182
    /* load & permutate scantables
183
     * note: only wmv uses different ones
184
     */
185
    if (s->alternate_scan) {
186
        ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_alternate_vertical_scan);
187
        ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_alternate_vertical_scan);
188
    } else {
189
        ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_zigzag_direct);
190
        ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_zigzag_direct);
191
    }
192
    ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
193
    ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
194
 
195
    return 0;
196
}
197
 
198
int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
199
{
200
    int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
201
 
202
    // edge emu needs blocksize + filter length - 1
203
    // (= 17x17 for  halfpel / 21x21 for  h264)
204
    // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
205
    // at uvlinesize. It supports only YUV420 so 24x24 is enough
206
    // linesize * interlaced * MBsize
207
    FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 24,
208
                      fail);
209
 
210
    FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
211
                      fail)
212
    s->me.temp         = s->me.scratchpad;
213
    s->rd_scratchpad   = s->me.scratchpad;
214
    s->b_scratchpad    = s->me.scratchpad;
215
    s->obmc_scratchpad = s->me.scratchpad + 16;
216
 
217
    return 0;
218
fail:
219
    av_freep(&s->edge_emu_buffer);
220
    return AVERROR(ENOMEM);
221
}
222
 
223
/**
224
 * Allocate a frame buffer
225
 */
226
static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
227
{
228
    int r, ret;
229
 
230
    pic->tf.f = &pic->f;
231
    if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
232
        s->codec_id != AV_CODEC_ID_VC1IMAGE  &&
233
        s->codec_id != AV_CODEC_ID_MSS2)
234
        r = ff_thread_get_buffer(s->avctx, &pic->tf,
235
                                 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
236
    else {
237
        pic->f.width  = s->avctx->width;
238
        pic->f.height = s->avctx->height;
239
        pic->f.format = s->avctx->pix_fmt;
240
        r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
241
    }
242
 
243
    if (r < 0 || !pic->f.data[0]) {
244
        av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
245
               r, pic->f.data[0]);
246
        return -1;
247
    }
248
 
249
    if (s->avctx->hwaccel) {
250
        assert(!pic->hwaccel_picture_private);
251
        if (s->avctx->hwaccel->priv_data_size) {
252
            pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
253
            if (!pic->hwaccel_priv_buf) {
254
                av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
255
                return -1;
256
            }
257
            pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
258
        }
259
    }
260
 
261
    if (s->linesize && (s->linesize   != pic->f.linesize[0] ||
262
                        s->uvlinesize != pic->f.linesize[1])) {
263
        av_log(s->avctx, AV_LOG_ERROR,
264
               "get_buffer() failed (stride changed)\n");
265
        ff_mpeg_unref_picture(s, pic);
266
        return -1;
267
    }
268
 
269
    if (pic->f.linesize[1] != pic->f.linesize[2]) {
270
        av_log(s->avctx, AV_LOG_ERROR,
271
               "get_buffer() failed (uv stride mismatch)\n");
272
        ff_mpeg_unref_picture(s, pic);
273
        return -1;
274
    }
275
 
276
    if (!s->edge_emu_buffer &&
277
        (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
278
        av_log(s->avctx, AV_LOG_ERROR,
279
               "get_buffer() failed to allocate context scratch buffers.\n");
280
        ff_mpeg_unref_picture(s, pic);
281
        return ret;
282
    }
283
 
284
    return 0;
285
}
286
 
287
static void free_picture_tables(Picture *pic)
288
{
289
    int i;
290
 
291
    pic->alloc_mb_width  =
292
    pic->alloc_mb_height = 0;
293
 
294
    av_buffer_unref(&pic->mb_var_buf);
295
    av_buffer_unref(&pic->mc_mb_var_buf);
296
    av_buffer_unref(&pic->mb_mean_buf);
297
    av_buffer_unref(&pic->mbskip_table_buf);
298
    av_buffer_unref(&pic->qscale_table_buf);
299
    av_buffer_unref(&pic->mb_type_buf);
300
 
301
    for (i = 0; i < 2; i++) {
302
        av_buffer_unref(&pic->motion_val_buf[i]);
303
        av_buffer_unref(&pic->ref_index_buf[i]);
304
    }
305
}
306
 
307
static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
308
{
309
    const int big_mb_num    = s->mb_stride * (s->mb_height + 1) + 1;
310
    const int mb_array_size = s->mb_stride * s->mb_height;
311
    const int b8_array_size = s->b8_stride * s->mb_height * 2;
312
    int i;
313
 
314
 
315
    pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
316
    pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
317
    pic->mb_type_buf      = av_buffer_allocz((big_mb_num + s->mb_stride) *
318
                                             sizeof(uint32_t));
319
    if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
320
        return AVERROR(ENOMEM);
321
 
322
    if (s->encoding) {
323
        pic->mb_var_buf    = av_buffer_allocz(mb_array_size * sizeof(int16_t));
324
        pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
325
        pic->mb_mean_buf   = av_buffer_allocz(mb_array_size);
326
        if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
327
            return AVERROR(ENOMEM);
328
    }
329
 
330
    if (s->out_format == FMT_H263 || s->encoding ||
331
               (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
332
        int mv_size        = 2 * (b8_array_size + 4) * sizeof(int16_t);
333
        int ref_index_size = 4 * mb_array_size;
334
 
335
        for (i = 0; mv_size && i < 2; i++) {
336
            pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
337
            pic->ref_index_buf[i]  = av_buffer_allocz(ref_index_size);
338
            if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
339
                return AVERROR(ENOMEM);
340
        }
341
    }
342
 
343
    pic->alloc_mb_width  = s->mb_width;
344
    pic->alloc_mb_height = s->mb_height;
345
 
346
    return 0;
347
}
348
 
349
static int make_tables_writable(Picture *pic)
350
{
351
    int ret, i;
352
#define MAKE_WRITABLE(table) \
353
do {\
354
    if (pic->table &&\
355
       (ret = av_buffer_make_writable(&pic->table)) < 0)\
356
    return ret;\
357
} while (0)
358
 
359
    MAKE_WRITABLE(mb_var_buf);
360
    MAKE_WRITABLE(mc_mb_var_buf);
361
    MAKE_WRITABLE(mb_mean_buf);
362
    MAKE_WRITABLE(mbskip_table_buf);
363
    MAKE_WRITABLE(qscale_table_buf);
364
    MAKE_WRITABLE(mb_type_buf);
365
 
366
    for (i = 0; i < 2; i++) {
367
        MAKE_WRITABLE(motion_val_buf[i]);
368
        MAKE_WRITABLE(ref_index_buf[i]);
369
    }
370
 
371
    return 0;
372
}
373
 
374
/**
375
 * Allocate a Picture.
376
 * The pixels are allocated/set by calling get_buffer() if shared = 0
377
 */
378
int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
379
{
380
    int i, ret;
381
 
382
    if (pic->qscale_table_buf)
383
        if (   pic->alloc_mb_width  != s->mb_width
384
            || pic->alloc_mb_height != s->mb_height)
385
            free_picture_tables(pic);
386
 
387
    if (shared) {
388
        av_assert0(pic->f.data[0]);
389
        pic->shared = 1;
390
    } else {
391
        av_assert0(!pic->f.data[0]);
392
 
393
        if (alloc_frame_buffer(s, pic) < 0)
394
            return -1;
395
 
396
        s->linesize   = pic->f.linesize[0];
397
        s->uvlinesize = pic->f.linesize[1];
398
    }
399
 
400
    if (!pic->qscale_table_buf)
401
        ret = alloc_picture_tables(s, pic);
402
    else
403
        ret = make_tables_writable(pic);
404
    if (ret < 0)
405
        goto fail;
406
 
407
    if (s->encoding) {
408
        pic->mb_var    = (uint16_t*)pic->mb_var_buf->data;
409
        pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
410
        pic->mb_mean   = pic->mb_mean_buf->data;
411
    }
412
 
413
    pic->mbskip_table = pic->mbskip_table_buf->data;
414
    pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
415
    pic->mb_type      = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
416
 
417
    if (pic->motion_val_buf[0]) {
418
        for (i = 0; i < 2; i++) {
419
            pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
420
            pic->ref_index[i]  = pic->ref_index_buf[i]->data;
421
        }
422
    }
423
 
424
    return 0;
425
fail:
426
    av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
427
    ff_mpeg_unref_picture(s, pic);
428
    free_picture_tables(pic);
429
    return AVERROR(ENOMEM);
430
}
431
 
432
/**
433
 * Deallocate a picture.
434
 */
435
void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
436
{
437
    int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
438
 
439
    pic->tf.f = &pic->f;
440
    /* WM Image / Screen codecs allocate internal buffers with different
441
     * dimensions / colorspaces; ignore user-defined callbacks for these. */
442
    if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
443
        s->codec_id != AV_CODEC_ID_VC1IMAGE  &&
444
        s->codec_id != AV_CODEC_ID_MSS2)
445
        ff_thread_release_buffer(s->avctx, &pic->tf);
446
    else
447
        av_frame_unref(&pic->f);
448
 
449
    av_buffer_unref(&pic->hwaccel_priv_buf);
450
 
451
    if (pic->needs_realloc)
452
        free_picture_tables(pic);
453
 
454
    memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
455
}
456
 
457
static int update_picture_tables(Picture *dst, Picture *src)
458
{
459
     int i;
460
 
461
#define UPDATE_TABLE(table)\
462
do {\
463
    if (src->table &&\
464
        (!dst->table || dst->table->buffer != src->table->buffer)) {\
465
        av_buffer_unref(&dst->table);\
466
        dst->table = av_buffer_ref(src->table);\
467
        if (!dst->table) {\
468
            free_picture_tables(dst);\
469
            return AVERROR(ENOMEM);\
470
        }\
471
    }\
472
} while (0)
473
 
474
    UPDATE_TABLE(mb_var_buf);
475
    UPDATE_TABLE(mc_mb_var_buf);
476
    UPDATE_TABLE(mb_mean_buf);
477
    UPDATE_TABLE(mbskip_table_buf);
478
    UPDATE_TABLE(qscale_table_buf);
479
    UPDATE_TABLE(mb_type_buf);
480
    for (i = 0; i < 2; i++) {
481
        UPDATE_TABLE(motion_val_buf[i]);
482
        UPDATE_TABLE(ref_index_buf[i]);
483
    }
484
 
485
    dst->mb_var        = src->mb_var;
486
    dst->mc_mb_var     = src->mc_mb_var;
487
    dst->mb_mean       = src->mb_mean;
488
    dst->mbskip_table  = src->mbskip_table;
489
    dst->qscale_table  = src->qscale_table;
490
    dst->mb_type       = src->mb_type;
491
    for (i = 0; i < 2; i++) {
492
        dst->motion_val[i] = src->motion_val[i];
493
        dst->ref_index[i]  = src->ref_index[i];
494
    }
495
 
496
    dst->alloc_mb_width  = src->alloc_mb_width;
497
    dst->alloc_mb_height = src->alloc_mb_height;
498
 
499
    return 0;
500
}
501
 
502
int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
503
{
504
    int ret;
505
 
506
    av_assert0(!dst->f.buf[0]);
507
    av_assert0(src->f.buf[0]);
508
 
509
    src->tf.f = &src->f;
510
    dst->tf.f = &dst->f;
511
    ret = ff_thread_ref_frame(&dst->tf, &src->tf);
512
    if (ret < 0)
513
        goto fail;
514
 
515
    ret = update_picture_tables(dst, src);
516
    if (ret < 0)
517
        goto fail;
518
 
519
    if (src->hwaccel_picture_private) {
520
        dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
521
        if (!dst->hwaccel_priv_buf)
522
            goto fail;
523
        dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
524
    }
525
 
526
    dst->field_picture           = src->field_picture;
527
    dst->mb_var_sum              = src->mb_var_sum;
528
    dst->mc_mb_var_sum           = src->mc_mb_var_sum;
529
    dst->b_frame_score           = src->b_frame_score;
530
    dst->needs_realloc           = src->needs_realloc;
531
    dst->reference               = src->reference;
532
    dst->shared                  = src->shared;
533
 
534
    return 0;
535
fail:
536
    ff_mpeg_unref_picture(s, dst);
537
    return ret;
538
}
539
 
540
static void exchange_uv(MpegEncContext *s)
541
{
542
    int16_t (*tmp)[64];
543
 
544
    tmp           = s->pblocks[4];
545
    s->pblocks[4] = s->pblocks[5];
546
    s->pblocks[5] = tmp;
547
}
548
 
549
static int init_duplicate_context(MpegEncContext *s)
550
{
551
    int y_size = s->b8_stride * (2 * s->mb_height + 1);
552
    int c_size = s->mb_stride * (s->mb_height + 1);
553
    int yc_size = y_size + 2 * c_size;
554
    int i;
555
 
556
    s->edge_emu_buffer =
557
    s->me.scratchpad   =
558
    s->me.temp         =
559
    s->rd_scratchpad   =
560
    s->b_scratchpad    =
561
    s->obmc_scratchpad = NULL;
562
 
563
    if (s->encoding) {
564
        FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
565
                          ME_MAP_SIZE * sizeof(uint32_t), fail)
566
        FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
567
                          ME_MAP_SIZE * sizeof(uint32_t), fail)
568
        if (s->avctx->noise_reduction) {
569
            FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
570
                              2 * 64 * sizeof(int), fail)
571
        }
572
    }
573
    FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
574
    s->block = s->blocks[0];
575
 
576
    for (i = 0; i < 12; i++) {
577
        s->pblocks[i] = &s->block[i];
578
    }
579
    if (s->avctx->codec_tag == AV_RL32("VCR2"))
580
        exchange_uv(s);
581
 
582
    if (s->out_format == FMT_H263) {
583
        /* ac values */
584
        FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
585
                          yc_size * sizeof(int16_t) * 16, fail);
586
        s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
587
        s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
588
        s->ac_val[2] = s->ac_val[1] + c_size;
589
    }
590
 
591
    return 0;
592
fail:
593
    return -1; // free() through ff_MPV_common_end()
594
}
595
 
596
static void free_duplicate_context(MpegEncContext *s)
597
{
598
    if (s == NULL)
599
        return;
600
 
601
    av_freep(&s->edge_emu_buffer);
602
    av_freep(&s->me.scratchpad);
603
    s->me.temp =
604
    s->rd_scratchpad =
605
    s->b_scratchpad =
606
    s->obmc_scratchpad = NULL;
607
 
608
    av_freep(&s->dct_error_sum);
609
    av_freep(&s->me.map);
610
    av_freep(&s->me.score_map);
611
    av_freep(&s->blocks);
612
    av_freep(&s->ac_val_base);
613
    s->block = NULL;
614
}
615
 
616
static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
617
{
618
#define COPY(a) bak->a = src->a
619
    COPY(edge_emu_buffer);
620
    COPY(me.scratchpad);
621
    COPY(me.temp);
622
    COPY(rd_scratchpad);
623
    COPY(b_scratchpad);
624
    COPY(obmc_scratchpad);
625
    COPY(me.map);
626
    COPY(me.score_map);
627
    COPY(blocks);
628
    COPY(block);
629
    COPY(start_mb_y);
630
    COPY(end_mb_y);
631
    COPY(me.map_generation);
632
    COPY(pb);
633
    COPY(dct_error_sum);
634
    COPY(dct_count[0]);
635
    COPY(dct_count[1]);
636
    COPY(ac_val_base);
637
    COPY(ac_val[0]);
638
    COPY(ac_val[1]);
639
    COPY(ac_val[2]);
640
#undef COPY
641
}
642
 
643
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
644
{
645
    MpegEncContext bak;
646
    int i, ret;
647
    // FIXME copy only needed parts
648
    // START_TIMER
649
    backup_duplicate_context(&bak, dst);
650
    memcpy(dst, src, sizeof(MpegEncContext));
651
    backup_duplicate_context(dst, &bak);
652
    for (i = 0; i < 12; i++) {
653
        dst->pblocks[i] = &dst->block[i];
654
    }
655
    if (dst->avctx->codec_tag == AV_RL32("VCR2"))
656
        exchange_uv(dst);
657
    if (!dst->edge_emu_buffer &&
658
        (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
659
        av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
660
               "scratch buffers.\n");
661
        return ret;
662
    }
663
    // STOP_TIMER("update_duplicate_context")
664
    // about 10k cycles / 0.01 sec for  1000frames on 1ghz with 2 threads
665
    return 0;
666
}
667
 
668
int ff_mpeg_update_thread_context(AVCodecContext *dst,
669
                                  const AVCodecContext *src)
670
{
671
    int i, ret;
672
    MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
673
 
674
    if (dst == src)
675
        return 0;
676
 
677
    av_assert0(s != s1);
678
 
679
    // FIXME can parameters change on I-frames?
680
    // in that case dst may need a reinit
681
    if (!s->context_initialized) {
682
        memcpy(s, s1, sizeof(MpegEncContext));
683
 
684
        s->avctx                 = dst;
685
        s->bitstream_buffer      = NULL;
686
        s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
687
 
688
        if (s1->context_initialized){
689
//             s->picture_range_start  += MAX_PICTURE_COUNT;
690
//             s->picture_range_end    += MAX_PICTURE_COUNT;
691
            if((ret = ff_MPV_common_init(s)) < 0){
692
                memset(s, 0, sizeof(MpegEncContext));
693
                s->avctx = dst;
694
                return ret;
695
            }
696
        }
697
    }
698
 
699
    if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
700
        s->context_reinit = 0;
701
        s->height = s1->height;
702
        s->width  = s1->width;
703
        if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
704
            return ret;
705
    }
706
 
707
    s->avctx->coded_height  = s1->avctx->coded_height;
708
    s->avctx->coded_width   = s1->avctx->coded_width;
709
    s->avctx->width         = s1->avctx->width;
710
    s->avctx->height        = s1->avctx->height;
711
 
712
    s->coded_picture_number = s1->coded_picture_number;
713
    s->picture_number       = s1->picture_number;
714
    s->input_picture_number = s1->input_picture_number;
715
 
716
    av_assert0(!s->picture || s->picture != s1->picture);
717
    if(s->picture)
718
    for (i = 0; i < MAX_PICTURE_COUNT; i++) {
719
        ff_mpeg_unref_picture(s, &s->picture[i]);
720
        if (s1->picture[i].f.data[0] &&
721
            (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
722
            return ret;
723
    }
724
 
725
#define UPDATE_PICTURE(pic)\
726
do {\
727
    ff_mpeg_unref_picture(s, &s->pic);\
728
    if (s1->pic.f.data[0])\
729
        ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
730
    else\
731
        ret = update_picture_tables(&s->pic, &s1->pic);\
732
    if (ret < 0)\
733
        return ret;\
734
} while (0)
735
 
736
    UPDATE_PICTURE(current_picture);
737
    UPDATE_PICTURE(last_picture);
738
    UPDATE_PICTURE(next_picture);
739
 
740
    s->last_picture_ptr    = REBASE_PICTURE(s1->last_picture_ptr,    s, s1);
741
    s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
742
    s->next_picture_ptr    = REBASE_PICTURE(s1->next_picture_ptr,    s, s1);
743
 
744
    // Error/bug resilience
745
    s->next_p_frame_damaged = s1->next_p_frame_damaged;
746
    s->workaround_bugs      = s1->workaround_bugs;
747
    s->padding_bug_score    = s1->padding_bug_score;
748
 
749
    // MPEG4 timing info
750
    memcpy(&s->time_increment_bits, &s1->time_increment_bits,
751
           (char *) &s1->shape - (char *) &s1->time_increment_bits);
752
 
753
    // B-frame info
754
    s->max_b_frames = s1->max_b_frames;
755
    s->low_delay    = s1->low_delay;
756
    s->droppable    = s1->droppable;
757
 
758
    // DivX handling (doesn't work)
759
    s->divx_packed  = s1->divx_packed;
760
 
761
    if (s1->bitstream_buffer) {
762
        if (s1->bitstream_buffer_size +
763
            FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
764
            av_fast_malloc(&s->bitstream_buffer,
765
                           &s->allocated_bitstream_buffer_size,
766
                           s1->allocated_bitstream_buffer_size);
767
            s->bitstream_buffer_size = s1->bitstream_buffer_size;
768
        memcpy(s->bitstream_buffer, s1->bitstream_buffer,
769
               s1->bitstream_buffer_size);
770
        memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
771
               FF_INPUT_BUFFER_PADDING_SIZE);
772
    }
773
 
774
    // linesize dependend scratch buffer allocation
775
    if (!s->edge_emu_buffer)
776
        if (s1->linesize) {
777
            if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
778
                av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
779
                       "scratch buffers.\n");
780
                return AVERROR(ENOMEM);
781
            }
782
        } else {
783
            av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
784
                   "be allocated due to unknown size.\n");
785
        }
786
 
787
    // MPEG2/interlacing info
788
    memcpy(&s->progressive_sequence, &s1->progressive_sequence,
789
           (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
790
 
791
    if (!s1->first_field) {
792
        s->last_pict_type = s1->pict_type;
793
        if (s1->current_picture_ptr)
794
            s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
795
 
796
        if (s1->pict_type != AV_PICTURE_TYPE_B) {
797
            s->last_non_b_pict_type = s1->pict_type;
798
        }
799
    }
800
 
801
    return 0;
802
}
803
 
804
/**
805
 * Set the given MpegEncContext to common defaults
806
 * (same for encoding and decoding).
807
 * The changed fields will not depend upon the
808
 * prior state of the MpegEncContext.
809
 */
810
void ff_MPV_common_defaults(MpegEncContext *s)
811
{
812
    s->y_dc_scale_table      =
813
    s->c_dc_scale_table      = ff_mpeg1_dc_scale_table;
814
    s->chroma_qscale_table   = ff_default_chroma_qscale_table;
815
    s->progressive_frame     = 1;
816
    s->progressive_sequence  = 1;
817
    s->picture_structure     = PICT_FRAME;
818
 
819
    s->coded_picture_number  = 0;
820
    s->picture_number        = 0;
821
    s->input_picture_number  = 0;
822
 
823
    s->picture_in_gop_number = 0;
824
 
825
    s->f_code                = 1;
826
    s->b_code                = 1;
827
 
828
    s->slice_context_count   = 1;
829
}
830
 
831
/**
832
 * Set the given MpegEncContext to defaults for decoding.
833
 * the changed fields will not depend upon
834
 * the prior state of the MpegEncContext.
835
 */
836
void ff_MPV_decode_defaults(MpegEncContext *s)
837
{
838
    ff_MPV_common_defaults(s);
839
}
840
 
841
static int init_er(MpegEncContext *s)
842
{
843
    ERContext *er = &s->er;
844
    int mb_array_size = s->mb_height * s->mb_stride;
845
    int i;
846
 
847
    er->avctx       = s->avctx;
848
    er->dsp         = &s->dsp;
849
 
850
    er->mb_index2xy = s->mb_index2xy;
851
    er->mb_num      = s->mb_num;
852
    er->mb_width    = s->mb_width;
853
    er->mb_height   = s->mb_height;
854
    er->mb_stride   = s->mb_stride;
855
    er->b8_stride   = s->b8_stride;
856
 
857
    er->er_temp_buffer     = av_malloc(s->mb_height * s->mb_stride);
858
    er->error_status_table = av_mallocz(mb_array_size);
859
    if (!er->er_temp_buffer || !er->error_status_table)
860
        goto fail;
861
 
862
    er->mbskip_table  = s->mbskip_table;
863
    er->mbintra_table = s->mbintra_table;
864
 
865
    for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
866
        er->dc_val[i] = s->dc_val[i];
867
 
868
    er->decode_mb = mpeg_er_decode_mb;
869
    er->opaque    = s;
870
 
871
    return 0;
872
fail:
873
    av_freep(&er->er_temp_buffer);
874
    av_freep(&er->error_status_table);
875
    return AVERROR(ENOMEM);
876
}
877
 
878
/**
879
 * Initialize and allocates MpegEncContext fields dependent on the resolution.
880
 */
881
static int init_context_frame(MpegEncContext *s)
882
{
883
    int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
884
 
885
    s->mb_width   = (s->width + 15) / 16;
886
    s->mb_stride  = s->mb_width + 1;
887
    s->b8_stride  = s->mb_width * 2 + 1;
888
    s->b4_stride  = s->mb_width * 4 + 1;
889
    mb_array_size = s->mb_height * s->mb_stride;
890
    mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
891
 
892
    /* set default edge pos, will be overriden
893
     * in decode_header if needed */
894
    s->h_edge_pos = s->mb_width * 16;
895
    s->v_edge_pos = s->mb_height * 16;
896
 
897
    s->mb_num     = s->mb_width * s->mb_height;
898
 
899
    s->block_wrap[0] =
900
    s->block_wrap[1] =
901
    s->block_wrap[2] =
902
    s->block_wrap[3] = s->b8_stride;
903
    s->block_wrap[4] =
904
    s->block_wrap[5] = s->mb_stride;
905
 
906
    y_size  = s->b8_stride * (2 * s->mb_height + 1);
907
    c_size  = s->mb_stride * (s->mb_height + 1);
908
    yc_size = y_size + 2   * c_size;
909
 
910
    FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
911
    for (y = 0; y < s->mb_height; y++)
912
        for (x = 0; x < s->mb_width; x++)
913
            s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
914
 
915
    s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
916
 
917
    if (s->encoding) {
918
        /* Allocate MV tables */
919
        FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,                 mv_table_size * 2 * sizeof(int16_t), fail)
920
        FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,            mv_table_size * 2 * sizeof(int16_t), fail)
921
        FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,            mv_table_size * 2 * sizeof(int16_t), fail)
922
        FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,      mv_table_size * 2 * sizeof(int16_t), fail)
923
        FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,      mv_table_size * 2 * sizeof(int16_t), fail)
924
        FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,          mv_table_size * 2 * sizeof(int16_t), fail)
925
        s->p_mv_table            = s->p_mv_table_base + s->mb_stride + 1;
926
        s->b_forw_mv_table       = s->b_forw_mv_table_base + s->mb_stride + 1;
927
        s->b_back_mv_table       = s->b_back_mv_table_base + s->mb_stride + 1;
928
        s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
929
        s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
930
        s->b_direct_mv_table     = s->b_direct_mv_table_base + s->mb_stride + 1;
931
 
932
        /* Allocate MB type table */
933
        FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
934
 
935
        FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
936
 
937
        FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
938
                         mb_array_size * sizeof(float), fail);
939
        FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
940
                         mb_array_size * sizeof(float), fail);
941
 
942
    }
943
 
944
    if (s->codec_id == AV_CODEC_ID_MPEG4 ||
945
        (s->flags & CODEC_FLAG_INTERLACED_ME)) {
946
        /* interlaced direct mode decoding tables */
947
        for (i = 0; i < 2; i++) {
948
            int j, k;
949
            for (j = 0; j < 2; j++) {
950
                for (k = 0; k < 2; k++) {
951
                    FF_ALLOCZ_OR_GOTO(s->avctx,
952
                                      s->b_field_mv_table_base[i][j][k],
953
                                      mv_table_size * 2 * sizeof(int16_t),
954
                                      fail);
955
                    s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
956
                                                   s->mb_stride + 1;
957
                }
958
                FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
959
                FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
960
                s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
961
            }
962
            FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
963
        }
964
    }
965
    if (s->out_format == FMT_H263) {
966
        /* cbp values */
967
        FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
968
        s->coded_block = s->coded_block_base + s->b8_stride + 1;
969
 
970
        /* cbp, ac_pred, pred_dir */
971
        FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table     , mb_array_size * sizeof(uint8_t), fail);
972
        FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
973
    }
974
 
975
    if (s->h263_pred || s->h263_plus || !s->encoding) {
976
        /* dc values */
977
        // MN: we need these for  error resilience of intra-frames
978
        FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
979
        s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
980
        s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
981
        s->dc_val[2] = s->dc_val[1] + c_size;
982
        for (i = 0; i < yc_size; i++)
983
            s->dc_val_base[i] = 1024;
984
    }
985
 
986
    /* which mb is a intra block */
987
    FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
988
    memset(s->mbintra_table, 1, mb_array_size);
989
 
990
    /* init macroblock skip table */
991
    FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
992
    // Note the + 1 is for  a quicker mpeg4 slice_end detection
993
 
994
    return init_er(s);
995
fail:
996
    return AVERROR(ENOMEM);
997
}
998
 
999
/**
1000
 * init common structure for both encoder and decoder.
1001
 * this assumes that some variables like width/height are already set
1002
 */
1003
av_cold int ff_MPV_common_init(MpegEncContext *s)
1004
{
1005
    int i;
1006
    int nb_slices = (HAVE_THREADS &&
1007
                     s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1008
                    s->avctx->thread_count : 1;
1009
 
1010
    if (s->encoding && s->avctx->slices)
1011
        nb_slices = s->avctx->slices;
1012
 
1013
    if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1014
        s->mb_height = (s->height + 31) / 32 * 2;
1015
    else
1016
        s->mb_height = (s->height + 15) / 16;
1017
 
1018
    if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1019
        av_log(s->avctx, AV_LOG_ERROR,
1020
               "decoding to AV_PIX_FMT_NONE is not supported.\n");
1021
        return -1;
1022
    }
1023
 
1024
    if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1025
        int max_slices;
1026
        if (s->mb_height)
1027
            max_slices = FFMIN(MAX_THREADS, s->mb_height);
1028
        else
1029
            max_slices = MAX_THREADS;
1030
        av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1031
               " reducing to %d\n", nb_slices, max_slices);
1032
        nb_slices = max_slices;
1033
    }
1034
 
1035
    if ((s->width || s->height) &&
1036
        av_image_check_size(s->width, s->height, 0, s->avctx))
1037
        return -1;
1038
 
1039
    ff_dct_common_init(s);
1040
 
1041
    s->flags  = s->avctx->flags;
1042
    s->flags2 = s->avctx->flags2;
1043
 
1044
    /* set chroma shifts */
1045
    avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1046
                                  &s->chroma_x_shift,
1047
                                  &s->chroma_y_shift);
1048
 
1049
    /* convert fourcc to upper case */
1050
    s->codec_tag        = avpriv_toupper4(s->avctx->codec_tag);
1051
    s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1052
 
1053
    s->avctx->coded_frame = &s->current_picture.f;
1054
 
1055
    if (s->encoding) {
1056
        if (s->msmpeg4_version) {
1057
            FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
1058
                                2 * 2 * (MAX_LEVEL + 1) *
1059
                                (MAX_RUN + 1) * 2 * sizeof(int), fail);
1060
        }
1061
        FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
1062
 
1063
        FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,          64 * 32   * sizeof(int), fail)
1064
        FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix,   64 * 32   * sizeof(int), fail)
1065
        FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,          64 * 32   * sizeof(int), fail)
1066
        FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,        64 * 32 * 2 * sizeof(uint16_t), fail)
1067
        FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1068
        FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,        64 * 32 * 2 * sizeof(uint16_t), fail)
1069
        FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,           MAX_PICTURE_COUNT * sizeof(Picture *), fail)
1070
        FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
1071
 
1072
        if (s->avctx->noise_reduction) {
1073
            FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail);
1074
        }
1075
    }
1076
 
1077
    FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1078
                      MAX_PICTURE_COUNT * sizeof(Picture), fail);
1079
    for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1080
        avcodec_get_frame_defaults(&s->picture[i].f);
1081
    }
1082
    memset(&s->next_picture, 0, sizeof(s->next_picture));
1083
    memset(&s->last_picture, 0, sizeof(s->last_picture));
1084
    memset(&s->current_picture, 0, sizeof(s->current_picture));
1085
    avcodec_get_frame_defaults(&s->next_picture.f);
1086
    avcodec_get_frame_defaults(&s->last_picture.f);
1087
    avcodec_get_frame_defaults(&s->current_picture.f);
1088
 
1089
        if (init_context_frame(s))
1090
            goto fail;
1091
 
1092
        s->parse_context.state = -1;
1093
 
1094
        s->context_initialized = 1;
1095
        s->thread_context[0]   = s;
1096
 
1097
//     if (s->width && s->height) {
1098
        if (nb_slices > 1) {
1099
            for (i = 1; i < nb_slices; i++) {
1100
                s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1101
                memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1102
            }
1103
 
1104
            for (i = 0; i < nb_slices; i++) {
1105
                if (init_duplicate_context(s->thread_context[i]) < 0)
1106
                    goto fail;
1107
                    s->thread_context[i]->start_mb_y =
1108
                        (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1109
                    s->thread_context[i]->end_mb_y   =
1110
                        (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1111
            }
1112
        } else {
1113
            if (init_duplicate_context(s) < 0)
1114
                goto fail;
1115
            s->start_mb_y = 0;
1116
            s->end_mb_y   = s->mb_height;
1117
        }
1118
        s->slice_context_count = nb_slices;
1119
//     }
1120
 
1121
    return 0;
1122
 fail:
1123
    ff_MPV_common_end(s);
1124
    return -1;
1125
}
1126
 
1127
/**
1128
 * Frees and resets MpegEncContext fields depending on the resolution.
1129
 * Is used during resolution changes to avoid a full reinitialization of the
1130
 * codec.
1131
 */
1132
static int free_context_frame(MpegEncContext *s)
1133
{
1134
    int i, j, k;
1135
 
1136
    av_freep(&s->mb_type);
1137
    av_freep(&s->p_mv_table_base);
1138
    av_freep(&s->b_forw_mv_table_base);
1139
    av_freep(&s->b_back_mv_table_base);
1140
    av_freep(&s->b_bidir_forw_mv_table_base);
1141
    av_freep(&s->b_bidir_back_mv_table_base);
1142
    av_freep(&s->b_direct_mv_table_base);
1143
    s->p_mv_table            = NULL;
1144
    s->b_forw_mv_table       = NULL;
1145
    s->b_back_mv_table       = NULL;
1146
    s->b_bidir_forw_mv_table = NULL;
1147
    s->b_bidir_back_mv_table = NULL;
1148
    s->b_direct_mv_table     = NULL;
1149
    for (i = 0; i < 2; i++) {
1150
        for (j = 0; j < 2; j++) {
1151
            for (k = 0; k < 2; k++) {
1152
                av_freep(&s->b_field_mv_table_base[i][j][k]);
1153
                s->b_field_mv_table[i][j][k] = NULL;
1154
            }
1155
            av_freep(&s->b_field_select_table[i][j]);
1156
            av_freep(&s->p_field_mv_table_base[i][j]);
1157
            s->p_field_mv_table[i][j] = NULL;
1158
        }
1159
        av_freep(&s->p_field_select_table[i]);
1160
    }
1161
 
1162
    av_freep(&s->dc_val_base);
1163
    av_freep(&s->coded_block_base);
1164
    av_freep(&s->mbintra_table);
1165
    av_freep(&s->cbp_table);
1166
    av_freep(&s->pred_dir_table);
1167
 
1168
    av_freep(&s->mbskip_table);
1169
 
1170
    av_freep(&s->er.error_status_table);
1171
    av_freep(&s->er.er_temp_buffer);
1172
    av_freep(&s->mb_index2xy);
1173
    av_freep(&s->lambda_table);
1174
 
1175
    av_freep(&s->cplx_tab);
1176
    av_freep(&s->bits_tab);
1177
 
1178
    s->linesize = s->uvlinesize = 0;
1179
 
1180
    return 0;
1181
}
1182
 
1183
int ff_MPV_common_frame_size_change(MpegEncContext *s)
1184
{
1185
    int i, err = 0;
1186
 
1187
    if (s->slice_context_count > 1) {
1188
        for (i = 0; i < s->slice_context_count; i++) {
1189
            free_duplicate_context(s->thread_context[i]);
1190
        }
1191
        for (i = 1; i < s->slice_context_count; i++) {
1192
            av_freep(&s->thread_context[i]);
1193
        }
1194
    } else
1195
        free_duplicate_context(s);
1196
 
1197
    if ((err = free_context_frame(s)) < 0)
1198
        return err;
1199
 
1200
    if (s->picture)
1201
        for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1202
                s->picture[i].needs_realloc = 1;
1203
        }
1204
 
1205
    s->last_picture_ptr         =
1206
    s->next_picture_ptr         =
1207
    s->current_picture_ptr      = NULL;
1208
 
1209
    // init
1210
    if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1211
        s->mb_height = (s->height + 31) / 32 * 2;
1212
    else
1213
        s->mb_height = (s->height + 15) / 16;
1214
 
1215
    if ((s->width || s->height) &&
1216
        av_image_check_size(s->width, s->height, 0, s->avctx))
1217
        return AVERROR_INVALIDDATA;
1218
 
1219
    if ((err = init_context_frame(s)))
1220
        goto fail;
1221
 
1222
    s->thread_context[0]   = s;
1223
 
1224
    if (s->width && s->height) {
1225
        int nb_slices = s->slice_context_count;
1226
        if (nb_slices > 1) {
1227
            for (i = 1; i < nb_slices; i++) {
1228
                s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1229
                memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1230
            }
1231
 
1232
            for (i = 0; i < nb_slices; i++) {
1233
                if (init_duplicate_context(s->thread_context[i]) < 0)
1234
                    goto fail;
1235
                    s->thread_context[i]->start_mb_y =
1236
                        (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1237
                    s->thread_context[i]->end_mb_y   =
1238
                        (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1239
            }
1240
        } else {
1241
            err = init_duplicate_context(s);
1242
            if (err < 0)
1243
                goto fail;
1244
            s->start_mb_y = 0;
1245
            s->end_mb_y   = s->mb_height;
1246
        }
1247
        s->slice_context_count = nb_slices;
1248
    }
1249
 
1250
    return 0;
1251
 fail:
1252
    ff_MPV_common_end(s);
1253
    return err;
1254
}
1255
 
1256
/* init common structure for both encoder and decoder */
1257
void ff_MPV_common_end(MpegEncContext *s)
1258
{
1259
    int i;
1260
 
1261
    if (s->slice_context_count > 1) {
1262
        for (i = 0; i < s->slice_context_count; i++) {
1263
            free_duplicate_context(s->thread_context[i]);
1264
        }
1265
        for (i = 1; i < s->slice_context_count; i++) {
1266
            av_freep(&s->thread_context[i]);
1267
        }
1268
        s->slice_context_count = 1;
1269
    } else free_duplicate_context(s);
1270
 
1271
    av_freep(&s->parse_context.buffer);
1272
    s->parse_context.buffer_size = 0;
1273
 
1274
    av_freep(&s->bitstream_buffer);
1275
    s->allocated_bitstream_buffer_size = 0;
1276
 
1277
    av_freep(&s->avctx->stats_out);
1278
    av_freep(&s->ac_stats);
1279
 
1280
    if(s->q_chroma_intra_matrix   != s->q_intra_matrix  ) av_freep(&s->q_chroma_intra_matrix);
1281
    if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1282
    s->q_chroma_intra_matrix=   NULL;
1283
    s->q_chroma_intra_matrix16= NULL;
1284
    av_freep(&s->q_intra_matrix);
1285
    av_freep(&s->q_inter_matrix);
1286
    av_freep(&s->q_intra_matrix16);
1287
    av_freep(&s->q_inter_matrix16);
1288
    av_freep(&s->input_picture);
1289
    av_freep(&s->reordered_input_picture);
1290
    av_freep(&s->dct_offset);
1291
 
1292
    if (s->picture) {
1293
        for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1294
            free_picture_tables(&s->picture[i]);
1295
            ff_mpeg_unref_picture(s, &s->picture[i]);
1296
        }
1297
    }
1298
    av_freep(&s->picture);
1299
    free_picture_tables(&s->last_picture);
1300
    ff_mpeg_unref_picture(s, &s->last_picture);
1301
    free_picture_tables(&s->current_picture);
1302
    ff_mpeg_unref_picture(s, &s->current_picture);
1303
    free_picture_tables(&s->next_picture);
1304
    ff_mpeg_unref_picture(s, &s->next_picture);
1305
    free_picture_tables(&s->new_picture);
1306
    ff_mpeg_unref_picture(s, &s->new_picture);
1307
 
1308
    free_context_frame(s);
1309
 
1310
    s->context_initialized      = 0;
1311
    s->last_picture_ptr         =
1312
    s->next_picture_ptr         =
1313
    s->current_picture_ptr      = NULL;
1314
    s->linesize = s->uvlinesize = 0;
1315
}
1316
 
1317
av_cold void ff_init_rl(RLTable *rl,
1318
                        uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1319
{
1320
    int8_t  max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1321
    uint8_t index_run[MAX_RUN + 1];
1322
    int last, run, level, start, end, i;
1323
 
1324
    /* If table is static, we can quit if rl->max_level[0] is not NULL */
1325
    if (static_store && rl->max_level[0])
1326
        return;
1327
 
1328
    /* compute max_level[], max_run[] and index_run[] */
1329
    for (last = 0; last < 2; last++) {
1330
        if (last == 0) {
1331
            start = 0;
1332
            end = rl->last;
1333
        } else {
1334
            start = rl->last;
1335
            end = rl->n;
1336
        }
1337
 
1338
        memset(max_level, 0, MAX_RUN + 1);
1339
        memset(max_run, 0, MAX_LEVEL + 1);
1340
        memset(index_run, rl->n, MAX_RUN + 1);
1341
        for (i = start; i < end; i++) {
1342
            run   = rl->table_run[i];
1343
            level = rl->table_level[i];
1344
            if (index_run[run] == rl->n)
1345
                index_run[run] = i;
1346
            if (level > max_level[run])
1347
                max_level[run] = level;
1348
            if (run > max_run[level])
1349
                max_run[level] = run;
1350
        }
1351
        if (static_store)
1352
            rl->max_level[last] = static_store[last];
1353
        else
1354
            rl->max_level[last] = av_malloc(MAX_RUN + 1);
1355
        memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1356
        if (static_store)
1357
            rl->max_run[last]   = static_store[last] + MAX_RUN + 1;
1358
        else
1359
            rl->max_run[last]   = av_malloc(MAX_LEVEL + 1);
1360
        memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1361
        if (static_store)
1362
            rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1363
        else
1364
            rl->index_run[last] = av_malloc(MAX_RUN + 1);
1365
        memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1366
    }
1367
}
1368
 
1369
av_cold void ff_init_vlc_rl(RLTable *rl)
1370
{
1371
    int i, q;
1372
 
1373
    for (q = 0; q < 32; q++) {
1374
        int qmul = q * 2;
1375
        int qadd = (q - 1) | 1;
1376
 
1377
        if (q == 0) {
1378
            qmul = 1;
1379
            qadd = 0;
1380
        }
1381
        for (i = 0; i < rl->vlc.table_size; i++) {
1382
            int code = rl->vlc.table[i][0];
1383
            int len  = rl->vlc.table[i][1];
1384
            int level, run;
1385
 
1386
            if (len == 0) { // illegal code
1387
                run   = 66;
1388
                level = MAX_LEVEL;
1389
            } else if (len < 0) { // more bits needed
1390
                run   = 0;
1391
                level = code;
1392
            } else {
1393
                if (code == rl->n) { // esc
1394
                    run   = 66;
1395
                    level =  0;
1396
                } else {
1397
                    run   = rl->table_run[code] + 1;
1398
                    level = rl->table_level[code] * qmul + qadd;
1399
                    if (code >= rl->last) run += 192;
1400
                }
1401
            }
1402
            rl->rl_vlc[q][i].len   = len;
1403
            rl->rl_vlc[q][i].level = level;
1404
            rl->rl_vlc[q][i].run   = run;
1405
        }
1406
    }
1407
}
1408
 
1409
void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1410
{
1411
    int i;
1412
 
1413
    /* release non reference frames */
1414
    for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1415
        if (!s->picture[i].reference &&
1416
            (remove_current || &s->picture[i] !=  s->current_picture_ptr)) {
1417
            ff_mpeg_unref_picture(s, &s->picture[i]);
1418
        }
1419
    }
1420
}
1421
 
1422
static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1423
{
1424
    if (pic == s->last_picture_ptr)
1425
        return 0;
1426
    if (pic->f.data[0] == NULL)
1427
        return 1;
1428
    if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1429
        return 1;
1430
    return 0;
1431
}
1432
 
1433
static int find_unused_picture(MpegEncContext *s, int shared)
1434
{
1435
    int i;
1436
 
1437
    if (shared) {
1438
        for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1439
            if (s->picture[i].f.data[0] == NULL && &s->picture[i] != s->last_picture_ptr)
1440
                return i;
1441
        }
1442
    } else {
1443
        for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1444
            if (pic_is_unused(s, &s->picture[i]))
1445
                return i;
1446
        }
1447
    }
1448
 
1449
    av_log(s->avctx, AV_LOG_FATAL,
1450
           "Internal error, picture buffer overflow\n");
1451
    /* We could return -1, but the codec would crash trying to draw into a
1452
     * non-existing frame anyway. This is safer than waiting for a random crash.
1453
     * Also the return of this is never useful, an encoder must only allocate
1454
     * as much as allowed in the specification. This has no relationship to how
1455
     * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1456
     * enough for such valid streams).
1457
     * Plus, a decoder has to check stream validity and remove frames if too
1458
     * many reference frames are around. Waiting for "OOM" is not correct at
1459
     * all. Similarly, missing reference frames have to be replaced by
1460
     * interpolated/MC frames, anything else is a bug in the codec ...
1461
     */
1462
    abort();
1463
    return -1;
1464
}
1465
 
1466
int ff_find_unused_picture(MpegEncContext *s, int shared)
1467
{
1468
    int ret = find_unused_picture(s, shared);
1469
 
1470
    if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1471
        if (s->picture[ret].needs_realloc) {
1472
            s->picture[ret].needs_realloc = 0;
1473
            free_picture_tables(&s->picture[ret]);
1474
            ff_mpeg_unref_picture(s, &s->picture[ret]);
1475
            avcodec_get_frame_defaults(&s->picture[ret].f);
1476
        }
1477
    }
1478
    return ret;
1479
}
1480
 
1481
static void update_noise_reduction(MpegEncContext *s)
1482
{
1483
    int intra, i;
1484
 
1485
    for (intra = 0; intra < 2; intra++) {
1486
        if (s->dct_count[intra] > (1 << 16)) {
1487
            for (i = 0; i < 64; i++) {
1488
                s->dct_error_sum[intra][i] >>= 1;
1489
            }
1490
            s->dct_count[intra] >>= 1;
1491
        }
1492
 
1493
        for (i = 0; i < 64; i++) {
1494
            s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1495
                                       s->dct_count[intra] +
1496
                                       s->dct_error_sum[intra][i] / 2) /
1497
                                      (s->dct_error_sum[intra][i] + 1);
1498
        }
1499
    }
1500
}
1501
 
1502
/**
1503
 * generic function for encode/decode called after coding/decoding
1504
 * the header and before a frame is coded/decoded.
1505
 */
1506
int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1507
{
1508
    int i, ret;
1509
    Picture *pic;
1510
    s->mb_skipped = 0;
1511
 
1512
    if (!ff_thread_can_start_frame(avctx)) {
1513
        av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1514
        return -1;
1515
    }
1516
 
1517
    /* mark & release old frames */
1518
    if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1519
        s->last_picture_ptr != s->next_picture_ptr &&
1520
        s->last_picture_ptr->f.data[0]) {
1521
        ff_mpeg_unref_picture(s, s->last_picture_ptr);
1522
    }
1523
 
1524
    /* release forgotten pictures */
1525
    /* if (mpeg124/h263) */
1526
    if (!s->encoding) {
1527
        for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1528
            if (&s->picture[i] != s->last_picture_ptr &&
1529
                &s->picture[i] != s->next_picture_ptr &&
1530
                s->picture[i].reference && !s->picture[i].needs_realloc) {
1531
                if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1532
                    av_log(avctx, AV_LOG_ERROR,
1533
                           "releasing zombie picture\n");
1534
                ff_mpeg_unref_picture(s, &s->picture[i]);
1535
            }
1536
        }
1537
    }
1538
 
1539
    ff_mpeg_unref_picture(s, &s->current_picture);
1540
 
1541
    if (!s->encoding) {
1542
        ff_release_unused_pictures(s, 1);
1543
 
1544
        if (s->current_picture_ptr &&
1545
            s->current_picture_ptr->f.data[0] == NULL) {
1546
            // we already have a unused image
1547
            // (maybe it was set before reading the header)
1548
            pic = s->current_picture_ptr;
1549
        } else {
1550
            i   = ff_find_unused_picture(s, 0);
1551
            if (i < 0) {
1552
                av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1553
                return i;
1554
            }
1555
            pic = &s->picture[i];
1556
        }
1557
 
1558
        pic->reference = 0;
1559
        if (!s->droppable) {
1560
            if (s->pict_type != AV_PICTURE_TYPE_B)
1561
                pic->reference = 3;
1562
        }
1563
 
1564
        pic->f.coded_picture_number = s->coded_picture_number++;
1565
 
1566
        if (ff_alloc_picture(s, pic, 0) < 0)
1567
            return -1;
1568
 
1569
        s->current_picture_ptr = pic;
1570
        // FIXME use only the vars from current_pic
1571
        s->current_picture_ptr->f.top_field_first = s->top_field_first;
1572
        if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1573
            s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1574
            if (s->picture_structure != PICT_FRAME)
1575
                s->current_picture_ptr->f.top_field_first =
1576
                    (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1577
        }
1578
        s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1579
                                                     !s->progressive_sequence;
1580
        s->current_picture_ptr->field_picture      =  s->picture_structure != PICT_FRAME;
1581
    }
1582
 
1583
    s->current_picture_ptr->f.pict_type = s->pict_type;
1584
    // if (s->flags && CODEC_FLAG_QSCALE)
1585
    //     s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1586
    s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1587
 
1588
    if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1589
                                   s->current_picture_ptr)) < 0)
1590
        return ret;
1591
 
1592
    if (s->pict_type != AV_PICTURE_TYPE_B) {
1593
        s->last_picture_ptr = s->next_picture_ptr;
1594
        if (!s->droppable)
1595
            s->next_picture_ptr = s->current_picture_ptr;
1596
    }
1597
    av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1598
            s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1599
            s->last_picture_ptr    ? s->last_picture_ptr->f.data[0]    : NULL,
1600
            s->next_picture_ptr    ? s->next_picture_ptr->f.data[0]    : NULL,
1601
            s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1602
            s->pict_type, s->droppable);
1603
 
1604
    if ((s->last_picture_ptr == NULL ||
1605
         s->last_picture_ptr->f.data[0] == NULL) &&
1606
        (s->pict_type != AV_PICTURE_TYPE_I ||
1607
         s->picture_structure != PICT_FRAME)) {
1608
        int h_chroma_shift, v_chroma_shift;
1609
        av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1610
                                         &h_chroma_shift, &v_chroma_shift);
1611
        if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f.data[0])
1612
            av_log(avctx, AV_LOG_DEBUG,
1613
                   "allocating dummy last picture for B frame\n");
1614
        else if (s->pict_type != AV_PICTURE_TYPE_I)
1615
            av_log(avctx, AV_LOG_ERROR,
1616
                   "warning: first frame is no keyframe\n");
1617
        else if (s->picture_structure != PICT_FRAME)
1618
            av_log(avctx, AV_LOG_DEBUG,
1619
                   "allocate dummy last picture for field based first keyframe\n");
1620
 
1621
        /* Allocate a dummy frame */
1622
        i = ff_find_unused_picture(s, 0);
1623
        if (i < 0) {
1624
            av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1625
            return i;
1626
        }
1627
        s->last_picture_ptr = &s->picture[i];
1628
        s->last_picture_ptr->f.key_frame = 0;
1629
        if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1630
            s->last_picture_ptr = NULL;
1631
            return -1;
1632
        }
1633
 
1634
        memset(s->last_picture_ptr->f.data[0], 0x80,
1635
               avctx->height * s->last_picture_ptr->f.linesize[0]);
1636
        memset(s->last_picture_ptr->f.data[1], 0x80,
1637
               (avctx->height >> v_chroma_shift) *
1638
               s->last_picture_ptr->f.linesize[1]);
1639
        memset(s->last_picture_ptr->f.data[2], 0x80,
1640
               (avctx->height >> v_chroma_shift) *
1641
               s->last_picture_ptr->f.linesize[2]);
1642
 
1643
        if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1644
            for(i=0; iheight; i++)
1645
            memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
1646
        }
1647
 
1648
        ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1649
        ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1650
    }
1651
    if ((s->next_picture_ptr == NULL ||
1652
         s->next_picture_ptr->f.data[0] == NULL) &&
1653
        s->pict_type == AV_PICTURE_TYPE_B) {
1654
        /* Allocate a dummy frame */
1655
        i = ff_find_unused_picture(s, 0);
1656
        if (i < 0) {
1657
            av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1658
            return i;
1659
        }
1660
        s->next_picture_ptr = &s->picture[i];
1661
        s->next_picture_ptr->f.key_frame = 0;
1662
        if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1663
            s->next_picture_ptr = NULL;
1664
            return -1;
1665
        }
1666
        ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1667
        ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1668
    }
1669
 
1670
#if 0 // BUFREF-FIXME
1671
    memset(s->last_picture.f.data, 0, sizeof(s->last_picture.f.data));
1672
    memset(s->next_picture.f.data, 0, sizeof(s->next_picture.f.data));
1673
#endif
1674
    if (s->last_picture_ptr) {
1675
        ff_mpeg_unref_picture(s, &s->last_picture);
1676
        if (s->last_picture_ptr->f.data[0] &&
1677
            (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1678
                                       s->last_picture_ptr)) < 0)
1679
            return ret;
1680
    }
1681
    if (s->next_picture_ptr) {
1682
        ff_mpeg_unref_picture(s, &s->next_picture);
1683
        if (s->next_picture_ptr->f.data[0] &&
1684
            (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1685
                                       s->next_picture_ptr)) < 0)
1686
            return ret;
1687
    }
1688
 
1689
    av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1690
                                                 s->last_picture_ptr->f.data[0]));
1691
 
1692
    if (s->picture_structure!= PICT_FRAME) {
1693
        int i;
1694
        for (i = 0; i < 4; i++) {
1695
            if (s->picture_structure == PICT_BOTTOM_FIELD) {
1696
                s->current_picture.f.data[i] +=
1697
                    s->current_picture.f.linesize[i];
1698
            }
1699
            s->current_picture.f.linesize[i] *= 2;
1700
            s->last_picture.f.linesize[i]    *= 2;
1701
            s->next_picture.f.linesize[i]    *= 2;
1702
        }
1703
    }
1704
 
1705
    s->err_recognition = avctx->err_recognition;
1706
 
1707
    /* set dequantizer, we can't do it during init as
1708
     * it might change for mpeg4 and we can't do it in the header
1709
     * decode as init is not called for mpeg4 there yet */
1710
    if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1711
        s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1712
        s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1713
    } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1714
        s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1715
        s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1716
    } else {
1717
        s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1718
        s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1719
    }
1720
 
1721
    if (s->dct_error_sum) {
1722
        av_assert2(s->avctx->noise_reduction && s->encoding);
1723
        update_noise_reduction(s);
1724
    }
1725
 
1726
    if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1727
        return ff_xvmc_field_start(s, avctx);
1728
 
1729
    return 0;
1730
}
1731
 
1732
/* generic function for encode/decode called after a
1733
 * frame has been coded/decoded. */
1734
void ff_MPV_frame_end(MpegEncContext *s)
1735
{
1736
    /* redraw edges for the frame if decoding didn't complete */
1737
    // just to make sure that all data is rendered.
1738
    if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1739
        ff_xvmc_field_end(s);
1740
   } else if ((s->er.error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
1741
              !s->avctx->hwaccel &&
1742
              !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1743
              s->unrestricted_mv &&
1744
              s->current_picture.reference &&
1745
              !s->intra_only &&
1746
              !(s->flags & CODEC_FLAG_EMU_EDGE) &&
1747
              !s->avctx->lowres
1748
            ) {
1749
        const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1750
        int hshift = desc->log2_chroma_w;
1751
        int vshift = desc->log2_chroma_h;
1752
        s->dsp.draw_edges(s->current_picture.f.data[0], s->current_picture.f.linesize[0],
1753
                          s->h_edge_pos, s->v_edge_pos,
1754
                          EDGE_WIDTH, EDGE_WIDTH,
1755
                          EDGE_TOP | EDGE_BOTTOM);
1756
        s->dsp.draw_edges(s->current_picture.f.data[1], s->current_picture.f.linesize[1],
1757
                          s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1758
                          EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1759
                          EDGE_TOP | EDGE_BOTTOM);
1760
        s->dsp.draw_edges(s->current_picture.f.data[2], s->current_picture.f.linesize[2],
1761
                          s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1762
                          EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1763
                          EDGE_TOP | EDGE_BOTTOM);
1764
    }
1765
 
1766
    emms_c();
1767
 
1768
    s->last_pict_type                 = s->pict_type;
1769
    s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1770
    if (s->pict_type!= AV_PICTURE_TYPE_B) {
1771
        s->last_non_b_pict_type = s->pict_type;
1772
    }
1773
#if 0
1774
    /* copy back current_picture variables */
1775
    for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1776
        if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1777
            s->picture[i] = s->current_picture;
1778
            break;
1779
        }
1780
    }
1781
    av_assert0(i < MAX_PICTURE_COUNT);
1782
#endif
1783
 
1784
    // clear copies, to avoid confusion
1785
#if 0
1786
    memset(&s->last_picture,    0, sizeof(Picture));
1787
    memset(&s->next_picture,    0, sizeof(Picture));
1788
    memset(&s->current_picture, 0, sizeof(Picture));
1789
#endif
1790
    s->avctx->coded_frame = &s->current_picture_ptr->f;
1791
 
1792
    if (s->current_picture.reference)
1793
        ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1794
}
1795
 
1796
/**
1797
 * Draw a line from (ex, ey) -> (sx, sy).
1798
 * @param w width of the image
1799
 * @param h height of the image
1800
 * @param stride stride/linesize of the image
1801
 * @param color color of the arrow
1802
 */
1803
static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1804
                      int w, int h, int stride, int color)
1805
{
1806
    int x, y, fr, f;
1807
 
1808
    sx = av_clip(sx, 0, w - 1);
1809
    sy = av_clip(sy, 0, h - 1);
1810
    ex = av_clip(ex, 0, w - 1);
1811
    ey = av_clip(ey, 0, h - 1);
1812
 
1813
    buf[sy * stride + sx] += color;
1814
 
1815
    if (FFABS(ex - sx) > FFABS(ey - sy)) {
1816
        if (sx > ex) {
1817
            FFSWAP(int, sx, ex);
1818
            FFSWAP(int, sy, ey);
1819
        }
1820
        buf += sx + sy * stride;
1821
        ex  -= sx;
1822
        f    = ((ey - sy) << 16) / ex;
1823
        for (x = 0; x <= ex; x++) {
1824
            y  = (x * f) >> 16;
1825
            fr = (x * f) & 0xFFFF;
1826
            buf[y * stride + x]       += (color * (0x10000 - fr)) >> 16;
1827
            if(fr) buf[(y + 1) * stride + x] += (color *            fr ) >> 16;
1828
        }
1829
    } else {
1830
        if (sy > ey) {
1831
            FFSWAP(int, sx, ex);
1832
            FFSWAP(int, sy, ey);
1833
        }
1834
        buf += sx + sy * stride;
1835
        ey  -= sy;
1836
        if (ey)
1837
            f = ((ex - sx) << 16) / ey;
1838
        else
1839
            f = 0;
1840
        for(y= 0; y <= ey; y++){
1841
            x  = (y*f) >> 16;
1842
            fr = (y*f) & 0xFFFF;
1843
            buf[y * stride + x]     += (color * (0x10000 - fr)) >> 16;
1844
            if(fr) buf[y * stride + x + 1] += (color *            fr ) >> 16;
1845
        }
1846
    }
1847
}
1848
 
1849
/**
1850
 * Draw an arrow from (ex, ey) -> (sx, sy).
1851
 * @param w width of the image
1852
 * @param h height of the image
1853
 * @param stride stride/linesize of the image
1854
 * @param color color of the arrow
1855
 */
1856
static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1857
                       int ey, int w, int h, int stride, int color)
1858
{
1859
    int dx,dy;
1860
 
1861
    sx = av_clip(sx, -100, w + 100);
1862
    sy = av_clip(sy, -100, h + 100);
1863
    ex = av_clip(ex, -100, w + 100);
1864
    ey = av_clip(ey, -100, h + 100);
1865
 
1866
    dx = ex - sx;
1867
    dy = ey - sy;
1868
 
1869
    if (dx * dx + dy * dy > 3 * 3) {
1870
        int rx =  dx + dy;
1871
        int ry = -dx + dy;
1872
        int length = ff_sqrt((rx * rx + ry * ry) << 8);
1873
 
1874
        // FIXME subpixel accuracy
1875
        rx = ROUNDED_DIV(rx * 3 << 4, length);
1876
        ry = ROUNDED_DIV(ry * 3 << 4, length);
1877
 
1878
        draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1879
        draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1880
    }
1881
    draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1882
}
1883
 
1884
/**
1885
 * Print debugging info for the given picture.
1886
 */
1887
void ff_print_debug_info2(AVCodecContext *avctx, Picture *p, AVFrame *pict, uint8_t *mbskip_table,
1888
                         int *low_delay,
1889
                         int mb_width, int mb_height, int mb_stride, int quarter_sample)
1890
{
1891
    if (avctx->hwaccel || !p || !p->mb_type
1892
        || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
1893
        return;
1894
 
1895
 
1896
    if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1897
        int x,y;
1898
 
1899
        av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
1900
               av_get_picture_type_char(pict->pict_type));
1901
        for (y = 0; y < mb_height; y++) {
1902
            for (x = 0; x < mb_width; x++) {
1903
                if (avctx->debug & FF_DEBUG_SKIP) {
1904
                    int count = mbskip_table[x + y * mb_stride];
1905
                    if (count > 9)
1906
                        count = 9;
1907
                    av_log(avctx, AV_LOG_DEBUG, "%1d", count);
1908
                }
1909
                if (avctx->debug & FF_DEBUG_QP) {
1910
                    av_log(avctx, AV_LOG_DEBUG, "%2d",
1911
                           p->qscale_table[x + y * mb_stride]);
1912
                }
1913
                if (avctx->debug & FF_DEBUG_MB_TYPE) {
1914
                    int mb_type = p->mb_type[x + y * mb_stride];
1915
                    // Type & MV direction
1916
                    if (IS_PCM(mb_type))
1917
                        av_log(avctx, AV_LOG_DEBUG, "P");
1918
                    else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1919
                        av_log(avctx, AV_LOG_DEBUG, "A");
1920
                    else if (IS_INTRA4x4(mb_type))
1921
                        av_log(avctx, AV_LOG_DEBUG, "i");
1922
                    else if (IS_INTRA16x16(mb_type))
1923
                        av_log(avctx, AV_LOG_DEBUG, "I");
1924
                    else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1925
                        av_log(avctx, AV_LOG_DEBUG, "d");
1926
                    else if (IS_DIRECT(mb_type))
1927
                        av_log(avctx, AV_LOG_DEBUG, "D");
1928
                    else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1929
                        av_log(avctx, AV_LOG_DEBUG, "g");
1930
                    else if (IS_GMC(mb_type))
1931
                        av_log(avctx, AV_LOG_DEBUG, "G");
1932
                    else if (IS_SKIP(mb_type))
1933
                        av_log(avctx, AV_LOG_DEBUG, "S");
1934
                    else if (!USES_LIST(mb_type, 1))
1935
                        av_log(avctx, AV_LOG_DEBUG, ">");
1936
                    else if (!USES_LIST(mb_type, 0))
1937
                        av_log(avctx, AV_LOG_DEBUG, "<");
1938
                    else {
1939
                        av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1940
                        av_log(avctx, AV_LOG_DEBUG, "X");
1941
                    }
1942
 
1943
                    // segmentation
1944
                    if (IS_8X8(mb_type))
1945
                        av_log(avctx, AV_LOG_DEBUG, "+");
1946
                    else if (IS_16X8(mb_type))
1947
                        av_log(avctx, AV_LOG_DEBUG, "-");
1948
                    else if (IS_8X16(mb_type))
1949
                        av_log(avctx, AV_LOG_DEBUG, "|");
1950
                    else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1951
                        av_log(avctx, AV_LOG_DEBUG, " ");
1952
                    else
1953
                        av_log(avctx, AV_LOG_DEBUG, "?");
1954
 
1955
 
1956
                    if (IS_INTERLACED(mb_type))
1957
                        av_log(avctx, AV_LOG_DEBUG, "=");
1958
                    else
1959
                        av_log(avctx, AV_LOG_DEBUG, " ");
1960
                }
1961
            }
1962
            av_log(avctx, AV_LOG_DEBUG, "\n");
1963
        }
1964
    }
1965
 
1966
    if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1967
        (avctx->debug_mv)) {
1968
        const int shift = 1 + quarter_sample;
1969
        int mb_y;
1970
        uint8_t *ptr;
1971
        int i;
1972
        int h_chroma_shift, v_chroma_shift, block_height;
1973
        const int width          = avctx->width;
1974
        const int height         = avctx->height;
1975
        const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
1976
        const int mv_stride      = (mb_width << mv_sample_log2) +
1977
                                   (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
1978
 
1979
        *low_delay = 0; // needed to see the vectors without trashing the buffers
1980
 
1981
        avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1982
 
1983
        av_frame_make_writable(pict);
1984
 
1985
        pict->opaque = NULL;
1986
        ptr          = pict->data[0];
1987
        block_height = 16 >> v_chroma_shift;
1988
 
1989
        for (mb_y = 0; mb_y < mb_height; mb_y++) {
1990
            int mb_x;
1991
            for (mb_x = 0; mb_x < mb_width; mb_x++) {
1992
                const int mb_index = mb_x + mb_y * mb_stride;
1993
                if ((avctx->debug_mv) && p->motion_val[0]) {
1994
                    int type;
1995
                    for (type = 0; type < 3; type++) {
1996
                        int direction = 0;
1997
                        switch (type) {
1998
                        case 0:
1999
                            if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
2000
                                (pict->pict_type!= AV_PICTURE_TYPE_P))
2001
                                continue;
2002
                            direction = 0;
2003
                            break;
2004
                        case 1:
2005
                            if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
2006
                                (pict->pict_type!= AV_PICTURE_TYPE_B))
2007
                                continue;
2008
                            direction = 0;
2009
                            break;
2010
                        case 2:
2011
                            if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
2012
                                (pict->pict_type!= AV_PICTURE_TYPE_B))
2013
                                continue;
2014
                            direction = 1;
2015
                            break;
2016
                        }
2017
                        if (!USES_LIST(p->mb_type[mb_index], direction))
2018
                            continue;
2019
 
2020
                        if (IS_8X8(p->mb_type[mb_index])) {
2021
                            int i;
2022
                            for (i = 0; i < 4; i++) {
2023
                                int sx = mb_x * 16 + 4 + 8 * (i & 1);
2024
                                int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2025
                                int xy = (mb_x * 2 + (i & 1) +
2026
                                          (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2027
                                int mx = (p->motion_val[direction][xy][0] >> shift) + sx;
2028
                                int my = (p->motion_val[direction][xy][1] >> shift) + sy;
2029
                                draw_arrow(ptr, sx, sy, mx, my, width,
2030
                                           height, pict->linesize[0], 100);
2031
                            }
2032
                        } else if (IS_16X8(p->mb_type[mb_index])) {
2033
                            int i;
2034
                            for (i = 0; i < 2; i++) {
2035
                                int sx = mb_x * 16 + 8;
2036
                                int sy = mb_y * 16 + 4 + 8 * i;
2037
                                int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2038
                                int mx = (p->motion_val[direction][xy][0] >> shift);
2039
                                int my = (p->motion_val[direction][xy][1] >> shift);
2040
 
2041
                                if (IS_INTERLACED(p->mb_type[mb_index]))
2042
                                    my *= 2;
2043
 
2044
                            draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2045
                                       height, pict->linesize[0], 100);
2046
                            }
2047
                        } else if (IS_8X16(p->mb_type[mb_index])) {
2048
                            int i;
2049
                            for (i = 0; i < 2; i++) {
2050
                                int sx = mb_x * 16 + 4 + 8 * i;
2051
                                int sy = mb_y * 16 + 8;
2052
                                int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2053
                                int mx = p->motion_val[direction][xy][0] >> shift;
2054
                                int my = p->motion_val[direction][xy][1] >> shift;
2055
 
2056
                                if (IS_INTERLACED(p->mb_type[mb_index]))
2057
                                    my *= 2;
2058
 
2059
                                draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2060
                                           height, pict->linesize[0], 100);
2061
                            }
2062
                        } else {
2063
                              int sx= mb_x * 16 + 8;
2064
                              int sy= mb_y * 16 + 8;
2065
                              int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2066
                              int mx= (p->motion_val[direction][xy][0]>>shift) + sx;
2067
                              int my= (p->motion_val[direction][xy][1]>>shift) + sy;
2068
                              draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100);
2069
                        }
2070
                    }
2071
                }
2072
                if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2073
                    uint64_t c = (p->qscale_table[mb_index] * 128 / 31) *
2074
                                 0x0101010101010101ULL;
2075
                    int y;
2076
                    for (y = 0; y < block_height; y++) {
2077
                        *(uint64_t *)(pict->data[1] + 8 * mb_x +
2078
                                      (block_height * mb_y + y) *
2079
                                      pict->linesize[1]) = c;
2080
                        *(uint64_t *)(pict->data[2] + 8 * mb_x +
2081
                                      (block_height * mb_y + y) *
2082
                                      pict->linesize[2]) = c;
2083
                    }
2084
                }
2085
                if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2086
                    p->motion_val[0]) {
2087
                    int mb_type = p->mb_type[mb_index];
2088
                    uint64_t u,v;
2089
                    int y;
2090
#define COLOR(theta, r) \
2091
    u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2092
    v = (int)(128 + r * sin(theta * 3.141592 / 180));
2093
 
2094
 
2095
                    u = v = 128;
2096
                    if (IS_PCM(mb_type)) {
2097
                        COLOR(120, 48)
2098
                    } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2099
                               IS_INTRA16x16(mb_type)) {
2100
                        COLOR(30, 48)
2101
                    } else if (IS_INTRA4x4(mb_type)) {
2102
                        COLOR(90, 48)
2103
                    } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2104
                        // COLOR(120, 48)
2105
                    } else if (IS_DIRECT(mb_type)) {
2106
                        COLOR(150, 48)
2107
                    } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2108
                        COLOR(170, 48)
2109
                    } else if (IS_GMC(mb_type)) {
2110
                        COLOR(190, 48)
2111
                    } else if (IS_SKIP(mb_type)) {
2112
                        // COLOR(180, 48)
2113
                    } else if (!USES_LIST(mb_type, 1)) {
2114
                        COLOR(240, 48)
2115
                    } else if (!USES_LIST(mb_type, 0)) {
2116
                        COLOR(0, 48)
2117
                    } else {
2118
                        av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2119
                        COLOR(300,48)
2120
                    }
2121
 
2122
                    u *= 0x0101010101010101ULL;
2123
                    v *= 0x0101010101010101ULL;
2124
                    for (y = 0; y < block_height; y++) {
2125
                        *(uint64_t *)(pict->data[1] + 8 * mb_x +
2126
                                      (block_height * mb_y + y) * pict->linesize[1]) = u;
2127
                        *(uint64_t *)(pict->data[2] + 8 * mb_x +
2128
                                      (block_height * mb_y + y) * pict->linesize[2]) = v;
2129
                    }
2130
 
2131
                    // segmentation
2132
                    if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2133
                        *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2134
                                      (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2135
                        *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2136
                                      (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2137
                    }
2138
                    if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2139
                        for (y = 0; y < 16; y++)
2140
                            pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2141
                                          pict->linesize[0]] ^= 0x80;
2142
                    }
2143
                    if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2144
                        int dm = 1 << (mv_sample_log2 - 2);
2145
                        for (i = 0; i < 4; i++) {
2146
                            int sx = mb_x * 16 + 8 * (i & 1);
2147
                            int sy = mb_y * 16 + 8 * (i >> 1);
2148
                            int xy = (mb_x * 2 + (i & 1) +
2149
                                     (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2150
                            // FIXME bidir
2151
                            int32_t *mv = (int32_t *) &p->motion_val[0][xy];
2152
                            if (mv[0] != mv[dm] ||
2153
                                mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2154
                                for (y = 0; y < 8; y++)
2155
                                    pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2156
                            if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2157
                                *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2158
                                              pict->linesize[0]) ^= 0x8080808080808080ULL;
2159
                        }
2160
                    }
2161
 
2162
                    if (IS_INTERLACED(mb_type) &&
2163
                        avctx->codec->id == AV_CODEC_ID_H264) {
2164
                        // hmm
2165
                    }
2166
                }
2167
                mbskip_table[mb_index] = 0;
2168
            }
2169
        }
2170
    }
2171
}
2172
 
2173
void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2174
{
2175
    ff_print_debug_info2(s->avctx, p, pict, s->mbskip_table, &s->low_delay,
2176
                         s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2177
}
2178
 
2179
int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2180
{
2181
    AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2182
    int offset = 2*s->mb_stride + 1;
2183
    if(!ref)
2184
        return AVERROR(ENOMEM);
2185
    av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2186
    ref->size -= offset;
2187
    ref->data += offset;
2188
    return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2189
}
2190
 
2191
static inline int hpel_motion_lowres(MpegEncContext *s,
2192
                                     uint8_t *dest, uint8_t *src,
2193
                                     int field_based, int field_select,
2194
                                     int src_x, int src_y,
2195
                                     int width, int height, ptrdiff_t stride,
2196
                                     int h_edge_pos, int v_edge_pos,
2197
                                     int w, int h, h264_chroma_mc_func *pix_op,
2198
                                     int motion_x, int motion_y)
2199
{
2200
    const int lowres   = s->avctx->lowres;
2201
    const int op_index = FFMIN(lowres, 3);
2202
    const int s_mask   = (2 << lowres) - 1;
2203
    int emu = 0;
2204
    int sx, sy;
2205
 
2206
    if (s->quarter_sample) {
2207
        motion_x /= 2;
2208
        motion_y /= 2;
2209
    }
2210
 
2211
    sx = motion_x & s_mask;
2212
    sy = motion_y & s_mask;
2213
    src_x += motion_x >> lowres + 1;
2214
    src_y += motion_y >> lowres + 1;
2215
 
2216
    src   += src_y * stride + src_x;
2217
 
2218
    if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w,                 0) ||
2219
        (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2220
        s->vdsp.emulated_edge_mc(s->edge_emu_buffer, s->linesize,
2221
                                src, s->linesize, w + 1,
2222
                                (h + 1) << field_based, src_x,
2223
                                src_y   << field_based,
2224
                                h_edge_pos,
2225
                                v_edge_pos);
2226
        src = s->edge_emu_buffer;
2227
        emu = 1;
2228
    }
2229
 
2230
    sx = (sx << 2) >> lowres;
2231
    sy = (sy << 2) >> lowres;
2232
    if (field_select)
2233
        src += s->linesize;
2234
    pix_op[op_index](dest, src, stride, h, sx, sy);
2235
    return emu;
2236
}
2237
 
2238
/* apply one mpeg motion vector to the three components */
2239
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2240
                                                uint8_t *dest_y,
2241
                                                uint8_t *dest_cb,
2242
                                                uint8_t *dest_cr,
2243
                                                int field_based,
2244
                                                int bottom_field,
2245
                                                int field_select,
2246
                                                uint8_t **ref_picture,
2247
                                                h264_chroma_mc_func *pix_op,
2248
                                                int motion_x, int motion_y,
2249
                                                int h, int mb_y)
2250
{
2251
    uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2252
    int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2253
    ptrdiff_t uvlinesize, linesize;
2254
    const int lowres     = s->avctx->lowres;
2255
    const int op_index   = FFMIN(lowres-1+s->chroma_x_shift, 3);
2256
    const int block_s    = 8>>lowres;
2257
    const int s_mask     = (2 << lowres) - 1;
2258
    const int h_edge_pos = s->h_edge_pos >> lowres;
2259
    const int v_edge_pos = s->v_edge_pos >> lowres;
2260
    linesize   = s->current_picture.f.linesize[0] << field_based;
2261
    uvlinesize = s->current_picture.f.linesize[1] << field_based;
2262
 
2263
    // FIXME obviously not perfect but qpel will not work in lowres anyway
2264
    if (s->quarter_sample) {
2265
        motion_x /= 2;
2266
        motion_y /= 2;
2267
    }
2268
 
2269
    if(field_based){
2270
        motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2271
    }
2272
 
2273
    sx = motion_x & s_mask;
2274
    sy = motion_y & s_mask;
2275
    src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2276
    src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2277
 
2278
    if (s->out_format == FMT_H263) {
2279
        uvsx    = ((motion_x >> 1) & s_mask) | (sx & 1);
2280
        uvsy    = ((motion_y >> 1) & s_mask) | (sy & 1);
2281
        uvsrc_x = src_x >> 1;
2282
        uvsrc_y = src_y >> 1;
2283
    } else if (s->out_format == FMT_H261) {
2284
        // even chroma mv's are full pel in H261
2285
        mx      = motion_x / 4;
2286
        my      = motion_y / 4;
2287
        uvsx    = (2 * mx) & s_mask;
2288
        uvsy    = (2 * my) & s_mask;
2289
        uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2290
        uvsrc_y =    mb_y * block_s + (my >> lowres);
2291
    } else {
2292
        if(s->chroma_y_shift){
2293
            mx      = motion_x / 2;
2294
            my      = motion_y / 2;
2295
            uvsx    = mx & s_mask;
2296
            uvsy    = my & s_mask;
2297
            uvsrc_x = s->mb_x * block_s                 + (mx >> lowres + 1);
2298
            uvsrc_y =   (mb_y * block_s >> field_based) + (my >> lowres + 1);
2299
        } else {
2300
            if(s->chroma_x_shift){
2301
            //Chroma422
2302
                mx = motion_x / 2;
2303
                uvsx = mx & s_mask;
2304
                uvsy = motion_y & s_mask;
2305
                uvsrc_y = src_y;
2306
                uvsrc_x = s->mb_x*block_s               + (mx >> (lowres+1));
2307
            } else {
2308
            //Chroma444
2309
                uvsx = motion_x & s_mask;
2310
                uvsy = motion_y & s_mask;
2311
                uvsrc_x = src_x;
2312
                uvsrc_y = src_y;
2313
            }
2314
        }
2315
    }
2316
 
2317
    ptr_y  = ref_picture[0] + src_y   * linesize   + src_x;
2318
    ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2319
    ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2320
 
2321
    if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s,       0) || uvsrc_y<0 ||
2322
        (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2323
        s->vdsp.emulated_edge_mc(s->edge_emu_buffer, linesize >> field_based, ptr_y,
2324
                                linesize >> field_based, 17, 17 + field_based,
2325
                                src_x, src_y << field_based, h_edge_pos,
2326
                                v_edge_pos);
2327
        ptr_y = s->edge_emu_buffer;
2328
        if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2329
            uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
2330
            s->vdsp.emulated_edge_mc(uvbuf, uvlinesize >> field_based,
2331
                                    ptr_cb, uvlinesize >> field_based, 9,
2332
                                    9 + field_based,
2333
                                    uvsrc_x, uvsrc_y << field_based,
2334
                                    h_edge_pos >> 1, v_edge_pos >> 1);
2335
            s->vdsp.emulated_edge_mc(uvbuf + 16, uvlinesize >> field_based,
2336
                                    ptr_cr, uvlinesize >> field_based, 9,
2337
                                    9 + field_based,
2338
                                    uvsrc_x, uvsrc_y << field_based,
2339
                                    h_edge_pos >> 1, v_edge_pos >> 1);
2340
            ptr_cb = uvbuf;
2341
            ptr_cr = uvbuf + 16;
2342
        }
2343
    }
2344
 
2345
    // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
2346
    if (bottom_field) {
2347
        dest_y  += s->linesize;
2348
        dest_cb += s->uvlinesize;
2349
        dest_cr += s->uvlinesize;
2350
    }
2351
 
2352
    if (field_select) {
2353
        ptr_y   += s->linesize;
2354
        ptr_cb  += s->uvlinesize;
2355
        ptr_cr  += s->uvlinesize;
2356
    }
2357
 
2358
    sx = (sx << 2) >> lowres;
2359
    sy = (sy << 2) >> lowres;
2360
    pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2361
 
2362
    if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2363
        int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2364
        uvsx = (uvsx << 2) >> lowres;
2365
        uvsy = (uvsy << 2) >> lowres;
2366
        if (hc) {
2367
            pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2368
            pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2369
        }
2370
    }
2371
    // FIXME h261 lowres loop filter
2372
}
2373
 
2374
static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2375
                                            uint8_t *dest_cb, uint8_t *dest_cr,
2376
                                            uint8_t **ref_picture,
2377
                                            h264_chroma_mc_func * pix_op,
2378
                                            int mx, int my)
2379
{
2380
    const int lowres     = s->avctx->lowres;
2381
    const int op_index   = FFMIN(lowres, 3);
2382
    const int block_s    = 8 >> lowres;
2383
    const int s_mask     = (2 << lowres) - 1;
2384
    const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2385
    const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2386
    int emu = 0, src_x, src_y, sx, sy;
2387
    ptrdiff_t offset;
2388
    uint8_t *ptr;
2389
 
2390
    if (s->quarter_sample) {
2391
        mx /= 2;
2392
        my /= 2;
2393
    }
2394
 
2395
    /* In case of 8X8, we construct a single chroma motion vector
2396
       with a special rounding */
2397
    mx = ff_h263_round_chroma(mx);
2398
    my = ff_h263_round_chroma(my);
2399
 
2400
    sx = mx & s_mask;
2401
    sy = my & s_mask;
2402
    src_x = s->mb_x * block_s + (mx >> lowres + 1);
2403
    src_y = s->mb_y * block_s + (my >> lowres + 1);
2404
 
2405
    offset = src_y * s->uvlinesize + src_x;
2406
    ptr = ref_picture[1] + offset;
2407
    if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2408
        (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2409
        s->vdsp.emulated_edge_mc(s->edge_emu_buffer, s->uvlinesize, ptr, s->uvlinesize,
2410
                                9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
2411
        ptr = s->edge_emu_buffer;
2412
        emu = 1;
2413
    }
2414
    sx = (sx << 2) >> lowres;
2415
    sy = (sy << 2) >> lowres;
2416
    pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2417
 
2418
    ptr = ref_picture[2] + offset;
2419
    if (emu) {
2420
        s->vdsp.emulated_edge_mc(s->edge_emu_buffer, s->uvlinesize,
2421
                                ptr, s->uvlinesize, 9, 9,
2422
                                src_x, src_y, h_edge_pos, v_edge_pos);
2423
        ptr = s->edge_emu_buffer;
2424
    }
2425
    pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2426
}
2427
 
2428
/**
2429
 * motion compensation of a single macroblock
2430
 * @param s context
2431
 * @param dest_y luma destination pointer
2432
 * @param dest_cb chroma cb/u destination pointer
2433
 * @param dest_cr chroma cr/v destination pointer
2434
 * @param dir direction (0->forward, 1->backward)
2435
 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2436
 * @param pix_op halfpel motion compensation function (average or put normally)
2437
 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2438
 */
2439
static inline void MPV_motion_lowres(MpegEncContext *s,
2440
                                     uint8_t *dest_y, uint8_t *dest_cb,
2441
                                     uint8_t *dest_cr,
2442
                                     int dir, uint8_t **ref_picture,
2443
                                     h264_chroma_mc_func *pix_op)
2444
{
2445
    int mx, my;
2446
    int mb_x, mb_y, i;
2447
    const int lowres  = s->avctx->lowres;
2448
    const int block_s = 8 >>lowres;
2449
 
2450
    mb_x = s->mb_x;
2451
    mb_y = s->mb_y;
2452
 
2453
    switch (s->mv_type) {
2454
    case MV_TYPE_16X16:
2455
        mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2456
                           0, 0, 0,
2457
                           ref_picture, pix_op,
2458
                           s->mv[dir][0][0], s->mv[dir][0][1],
2459
                           2 * block_s, mb_y);
2460
        break;
2461
    case MV_TYPE_8X8:
2462
        mx = 0;
2463
        my = 0;
2464
        for (i = 0; i < 4; i++) {
2465
            hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2466
                               s->linesize) * block_s,
2467
                               ref_picture[0], 0, 0,
2468
                               (2 * mb_x + (i & 1)) * block_s,
2469
                               (2 * mb_y + (i >> 1)) * block_s,
2470
                               s->width, s->height, s->linesize,
2471
                               s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2472
                               block_s, block_s, pix_op,
2473
                               s->mv[dir][i][0], s->mv[dir][i][1]);
2474
 
2475
            mx += s->mv[dir][i][0];
2476
            my += s->mv[dir][i][1];
2477
        }
2478
 
2479
        if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2480
            chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2481
                                     pix_op, mx, my);
2482
        break;
2483
    case MV_TYPE_FIELD:
2484
        if (s->picture_structure == PICT_FRAME) {
2485
            /* top field */
2486
            mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2487
                               1, 0, s->field_select[dir][0],
2488
                               ref_picture, pix_op,
2489
                               s->mv[dir][0][0], s->mv[dir][0][1],
2490
                               block_s, mb_y);
2491
            /* bottom field */
2492
            mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2493
                               1, 1, s->field_select[dir][1],
2494
                               ref_picture, pix_op,
2495
                               s->mv[dir][1][0], s->mv[dir][1][1],
2496
                               block_s, mb_y);
2497
        } else {
2498
            if (s->picture_structure != s->field_select[dir][0] + 1 &&
2499
                s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2500
                ref_picture = s->current_picture_ptr->f.data;
2501
 
2502
            }
2503
            mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2504
                               0, 0, s->field_select[dir][0],
2505
                               ref_picture, pix_op,
2506
                               s->mv[dir][0][0],
2507
                               s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2508
            }
2509
        break;
2510
    case MV_TYPE_16X8:
2511
        for (i = 0; i < 2; i++) {
2512
            uint8_t **ref2picture;
2513
 
2514
            if (s->picture_structure == s->field_select[dir][i] + 1 ||
2515
                s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2516
                ref2picture = ref_picture;
2517
            } else {
2518
                ref2picture = s->current_picture_ptr->f.data;
2519
            }
2520
 
2521
            mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2522
                               0, 0, s->field_select[dir][i],
2523
                               ref2picture, pix_op,
2524
                               s->mv[dir][i][0], s->mv[dir][i][1] +
2525
                               2 * block_s * i, block_s, mb_y >> 1);
2526
 
2527
            dest_y  +=  2 * block_s *  s->linesize;
2528
            dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2529
            dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2530
        }
2531
        break;
2532
    case MV_TYPE_DMV:
2533
        if (s->picture_structure == PICT_FRAME) {
2534
            for (i = 0; i < 2; i++) {
2535
                int j;
2536
                for (j = 0; j < 2; j++) {
2537
                    mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2538
                                       1, j, j ^ i,
2539
                                       ref_picture, pix_op,
2540
                                       s->mv[dir][2 * i + j][0],
2541
                                       s->mv[dir][2 * i + j][1],
2542
                                       block_s, mb_y);
2543
                }
2544
                pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2545
            }
2546
        } else {
2547
            for (i = 0; i < 2; i++) {
2548
                mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2549
                                   0, 0, s->picture_structure != i + 1,
2550
                                   ref_picture, pix_op,
2551
                                   s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2552
                                   2 * block_s, mb_y >> 1);
2553
 
2554
                // after put we make avg of the same block
2555
                pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2556
 
2557
                // opposite parity is always in the same
2558
                // frame if this is second field
2559
                if (!s->first_field) {
2560
                    ref_picture = s->current_picture_ptr->f.data;
2561
                }
2562
            }
2563
        }
2564
        break;
2565
    default:
2566
        av_assert2(0);
2567
    }
2568
}
2569
 
2570
/**
2571
 * find the lowest MB row referenced in the MVs
2572
 */
2573
int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2574
{
2575
    int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2576
    int my, off, i, mvs;
2577
 
2578
    if (s->picture_structure != PICT_FRAME || s->mcsel)
2579
        goto unhandled;
2580
 
2581
    switch (s->mv_type) {
2582
        case MV_TYPE_16X16:
2583
            mvs = 1;
2584
            break;
2585
        case MV_TYPE_16X8:
2586
            mvs = 2;
2587
            break;
2588
        case MV_TYPE_8X8:
2589
            mvs = 4;
2590
            break;
2591
        default:
2592
            goto unhandled;
2593
    }
2594
 
2595
    for (i = 0; i < mvs; i++) {
2596
        my = s->mv[dir][i][1]<
2597
        my_max = FFMAX(my_max, my);
2598
        my_min = FFMIN(my_min, my);
2599
    }
2600
 
2601
    off = (FFMAX(-my_min, my_max) + 63) >> 6;
2602
 
2603
    return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2604
unhandled:
2605
    return s->mb_height-1;
2606
}
2607
 
2608
/* put block[] to dest[] */
2609
static inline void put_dct(MpegEncContext *s,
2610
                           int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2611
{
2612
    s->dct_unquantize_intra(s, block, i, qscale);
2613
    s->dsp.idct_put (dest, line_size, block);
2614
}
2615
 
2616
/* add block[] to dest[] */
2617
static inline void add_dct(MpegEncContext *s,
2618
                           int16_t *block, int i, uint8_t *dest, int line_size)
2619
{
2620
    if (s->block_last_index[i] >= 0) {
2621
        s->dsp.idct_add (dest, line_size, block);
2622
    }
2623
}
2624
 
2625
static inline void add_dequant_dct(MpegEncContext *s,
2626
                           int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2627
{
2628
    if (s->block_last_index[i] >= 0) {
2629
        s->dct_unquantize_inter(s, block, i, qscale);
2630
 
2631
        s->dsp.idct_add (dest, line_size, block);
2632
    }
2633
}
2634
 
2635
/**
2636
 * Clean dc, ac, coded_block for the current non-intra MB.
2637
 */
2638
void ff_clean_intra_table_entries(MpegEncContext *s)
2639
{
2640
    int wrap = s->b8_stride;
2641
    int xy = s->block_index[0];
2642
 
2643
    s->dc_val[0][xy           ] =
2644
    s->dc_val[0][xy + 1       ] =
2645
    s->dc_val[0][xy     + wrap] =
2646
    s->dc_val[0][xy + 1 + wrap] = 1024;
2647
    /* ac pred */
2648
    memset(s->ac_val[0][xy       ], 0, 32 * sizeof(int16_t));
2649
    memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2650
    if (s->msmpeg4_version>=3) {
2651
        s->coded_block[xy           ] =
2652
        s->coded_block[xy + 1       ] =
2653
        s->coded_block[xy     + wrap] =
2654
        s->coded_block[xy + 1 + wrap] = 0;
2655
    }
2656
    /* chroma */
2657
    wrap = s->mb_stride;
2658
    xy = s->mb_x + s->mb_y * wrap;
2659
    s->dc_val[1][xy] =
2660
    s->dc_val[2][xy] = 1024;
2661
    /* ac pred */
2662
    memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2663
    memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2664
 
2665
    s->mbintra_table[xy]= 0;
2666
}
2667
 
2668
/* generic function called after a macroblock has been parsed by the
2669
   decoder or after it has been encoded by the encoder.
2670
 
2671
   Important variables used:
2672
   s->mb_intra : true if intra macroblock
2673
   s->mv_dir   : motion vector direction
2674
   s->mv_type  : motion vector type
2675
   s->mv       : motion vector
2676
   s->interlaced_dct : true if interlaced dct used (mpeg2)
2677
 */
2678
static av_always_inline
2679
void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2680
                            int lowres_flag, int is_mpeg12)
2681
{
2682
    const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2683
    if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2684
        ff_xvmc_decode_mb(s);//xvmc uses pblocks
2685
        return;
2686
    }
2687
 
2688
    if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2689
       /* print DCT coefficients */
2690
       int i,j;
2691
       av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2692
       for(i=0; i<6; i++){
2693
           for(j=0; j<64; j++){
2694
               av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
2695
           }
2696
           av_log(s->avctx, AV_LOG_DEBUG, "\n");
2697
       }
2698
    }
2699
 
2700
    s->current_picture.qscale_table[mb_xy] = s->qscale;
2701
 
2702
    /* update DC predictors for P macroblocks */
2703
    if (!s->mb_intra) {
2704
        if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2705
            if(s->mbintra_table[mb_xy])
2706
                ff_clean_intra_table_entries(s);
2707
        } else {
2708
            s->last_dc[0] =
2709
            s->last_dc[1] =
2710
            s->last_dc[2] = 128 << s->intra_dc_precision;
2711
        }
2712
    }
2713
    else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2714
        s->mbintra_table[mb_xy]=1;
2715
 
2716
    if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2717
        uint8_t *dest_y, *dest_cb, *dest_cr;
2718
        int dct_linesize, dct_offset;
2719
        op_pixels_func (*op_pix)[4];
2720
        qpel_mc_func (*op_qpix)[16];
2721
        const int linesize   = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2722
        const int uvlinesize = s->current_picture.f.linesize[1];
2723
        const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2724
        const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2725
 
2726
        /* avoid copy if macroblock skipped in last frame too */
2727
        /* skip only during decoding as we might trash the buffers during encoding a bit */
2728
        if(!s->encoding){
2729
            uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2730
 
2731
            if (s->mb_skipped) {
2732
                s->mb_skipped= 0;
2733
                av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
2734
                *mbskip_ptr = 1;
2735
            } else if(!s->current_picture.reference) {
2736
                *mbskip_ptr = 1;
2737
            } else{
2738
                *mbskip_ptr = 0; /* not skipped */
2739
            }
2740
        }
2741
 
2742
        dct_linesize = linesize << s->interlaced_dct;
2743
        dct_offset   = s->interlaced_dct ? linesize : linesize * block_size;
2744
 
2745
        if(readable){
2746
            dest_y=  s->dest[0];
2747
            dest_cb= s->dest[1];
2748
            dest_cr= s->dest[2];
2749
        }else{
2750
            dest_y = s->b_scratchpad;
2751
            dest_cb= s->b_scratchpad+16*linesize;
2752
            dest_cr= s->b_scratchpad+32*linesize;
2753
        }
2754
 
2755
        if (!s->mb_intra) {
2756
            /* motion handling */
2757
            /* decoding or more than one mb_type (MC was already done otherwise) */
2758
            if(!s->encoding){
2759
 
2760
                if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2761
                    if (s->mv_dir & MV_DIR_FORWARD) {
2762
                        ff_thread_await_progress(&s->last_picture_ptr->tf,
2763
                                                 ff_MPV_lowest_referenced_row(s, 0),
2764
                                                 0);
2765
                    }
2766
                    if (s->mv_dir & MV_DIR_BACKWARD) {
2767
                        ff_thread_await_progress(&s->next_picture_ptr->tf,
2768
                                                 ff_MPV_lowest_referenced_row(s, 1),
2769
                                                 0);
2770
                    }
2771
                }
2772
 
2773
                if(lowres_flag){
2774
                    h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
2775
 
2776
                    if (s->mv_dir & MV_DIR_FORWARD) {
2777
                        MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2778
                        op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
2779
                    }
2780
                    if (s->mv_dir & MV_DIR_BACKWARD) {
2781
                        MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2782
                    }
2783
                }else{
2784
                    op_qpix= s->me.qpel_put;
2785
                    if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2786
                        op_pix = s->hdsp.put_pixels_tab;
2787
                    }else{
2788
                        op_pix = s->hdsp.put_no_rnd_pixels_tab;
2789
                    }
2790
                    if (s->mv_dir & MV_DIR_FORWARD) {
2791
                        ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2792
                        op_pix = s->hdsp.avg_pixels_tab;
2793
                        op_qpix= s->me.qpel_avg;
2794
                    }
2795
                    if (s->mv_dir & MV_DIR_BACKWARD) {
2796
                        ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2797
                    }
2798
                }
2799
            }
2800
 
2801
            /* skip dequant / idct if we are really late ;) */
2802
            if(s->avctx->skip_idct){
2803
                if(  (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2804
                   ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2805
                   || s->avctx->skip_idct >= AVDISCARD_ALL)
2806
                    goto skip_idct;
2807
            }
2808
 
2809
            /* add dct residue */
2810
            if(s->encoding || !(   s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2811
                                || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2812
                add_dequant_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
2813
                add_dequant_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
2814
                add_dequant_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
2815
                add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2816
 
2817
                if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2818
                    if (s->chroma_y_shift){
2819
                        add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2820
                        add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2821
                    }else{
2822
                        dct_linesize >>= 1;
2823
                        dct_offset >>=1;
2824
                        add_dequant_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
2825
                        add_dequant_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
2826
                        add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2827
                        add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2828
                    }
2829
                }
2830
            } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2831
                add_dct(s, block[0], 0, dest_y                          , dct_linesize);
2832
                add_dct(s, block[1], 1, dest_y              + block_size, dct_linesize);
2833
                add_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize);
2834
                add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2835
 
2836
                if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2837
                    if(s->chroma_y_shift){//Chroma420
2838
                        add_dct(s, block[4], 4, dest_cb, uvlinesize);
2839
                        add_dct(s, block[5], 5, dest_cr, uvlinesize);
2840
                    }else{
2841
                        //chroma422
2842
                        dct_linesize = uvlinesize << s->interlaced_dct;
2843
                        dct_offset   = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2844
 
2845
                        add_dct(s, block[4], 4, dest_cb, dct_linesize);
2846
                        add_dct(s, block[5], 5, dest_cr, dct_linesize);
2847
                        add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2848
                        add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2849
                        if(!s->chroma_x_shift){//Chroma444
2850
                            add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2851
                            add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2852
                            add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2853
                            add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2854
                        }
2855
                    }
2856
                }//fi gray
2857
            }
2858
            else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2859
                ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2860
            }
2861
        } else {
2862
            /* dct only in intra block */
2863
            if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2864
                put_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
2865
                put_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
2866
                put_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
2867
                put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2868
 
2869
                if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2870
                    if(s->chroma_y_shift){
2871
                        put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2872
                        put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2873
                    }else{
2874
                        dct_offset >>=1;
2875
                        dct_linesize >>=1;
2876
                        put_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
2877
                        put_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
2878
                        put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2879
                        put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2880
                    }
2881
                }
2882
            }else{
2883
                s->dsp.idct_put(dest_y                          , dct_linesize, block[0]);
2884
                s->dsp.idct_put(dest_y              + block_size, dct_linesize, block[1]);
2885
                s->dsp.idct_put(dest_y + dct_offset             , dct_linesize, block[2]);
2886
                s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2887
 
2888
                if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2889
                    if(s->chroma_y_shift){
2890
                        s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2891
                        s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2892
                    }else{
2893
 
2894
                        dct_linesize = uvlinesize << s->interlaced_dct;
2895
                        dct_offset   = s->interlaced_dct? uvlinesize : uvlinesize*block_size;
2896
 
2897
                        s->dsp.idct_put(dest_cb,              dct_linesize, block[4]);
2898
                        s->dsp.idct_put(dest_cr,              dct_linesize, block[5]);
2899
                        s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2900
                        s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2901
                        if(!s->chroma_x_shift){//Chroma444
2902
                            s->dsp.idct_put(dest_cb + block_size,              dct_linesize, block[8]);
2903
                            s->dsp.idct_put(dest_cr + block_size,              dct_linesize, block[9]);
2904
                            s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2905
                            s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2906
                        }
2907
                    }
2908
                }//gray
2909
            }
2910
        }
2911
skip_idct:
2912
        if(!readable){
2913
            s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y ,   linesize,16);
2914
            s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2915
            s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2916
        }
2917
    }
2918
}
2919
 
2920
void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2921
#if !CONFIG_SMALL
2922
    if(s->out_format == FMT_MPEG1) {
2923
        if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2924
        else                 MPV_decode_mb_internal(s, block, 0, 1);
2925
    } else
2926
#endif
2927
    if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2928
    else                  MPV_decode_mb_internal(s, block, 0, 0);
2929
}
2930
 
2931
/**
2932
 * @param h is the normal height, this will be reduced automatically if needed for the last row
2933
 */
2934
void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
2935
                        Picture *last, int y, int h, int picture_structure,
2936
                        int first_field, int draw_edges, int low_delay,
2937
                        int v_edge_pos, int h_edge_pos)
2938
{
2939
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2940
    int hshift = desc->log2_chroma_w;
2941
    int vshift = desc->log2_chroma_h;
2942
    const int field_pic = picture_structure != PICT_FRAME;
2943
    if(field_pic){
2944
        h <<= 1;
2945
        y <<= 1;
2946
    }
2947
 
2948
    if (!avctx->hwaccel &&
2949
        !(avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
2950
        draw_edges &&
2951
        cur->reference &&
2952
        !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2953
        int *linesize = cur->f.linesize;
2954
        int sides = 0, edge_h;
2955
        if (y==0) sides |= EDGE_TOP;
2956
        if (y + h >= v_edge_pos)
2957
            sides |= EDGE_BOTTOM;
2958
 
2959
        edge_h= FFMIN(h, v_edge_pos - y);
2960
 
2961
        dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2962
                        linesize[0], h_edge_pos, edge_h,
2963
                        EDGE_WIDTH, EDGE_WIDTH, sides);
2964
        dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2965
                        linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2966
                        EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2967
        dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2968
                        linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2969
                        EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2970
    }
2971
 
2972
    h = FFMIN(h, avctx->height - y);
2973
 
2974
    if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2975
 
2976
    if (avctx->draw_horiz_band) {
2977
        AVFrame *src;
2978
        int offset[AV_NUM_DATA_POINTERS];
2979
        int i;
2980
 
2981
        if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2982
           (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
2983
            src = &cur->f;
2984
        else if (last)
2985
            src = &last->f;
2986
        else
2987
            return;
2988
 
2989
        if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
2990
            picture_structure == PICT_FRAME &&
2991
            avctx->codec_id != AV_CODEC_ID_SVQ3) {
2992
            for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2993
                offset[i] = 0;
2994
        }else{
2995
            offset[0]= y * src->linesize[0];
2996
            offset[1]=
2997
            offset[2]= (y >> vshift) * src->linesize[1];
2998
            for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2999
                offset[i] = 0;
3000
        }
3001
 
3002
        emms_c();
3003
 
3004
        avctx->draw_horiz_band(avctx, src, offset,
3005
                               y, picture_structure, h);
3006
    }
3007
}
3008
 
3009
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
3010
{
3011
    int draw_edges = s->unrestricted_mv && !s->intra_only;
3012
    ff_draw_horiz_band(s->avctx, &s->dsp, s->current_picture_ptr,
3013
                       s->last_picture_ptr, y, h, s->picture_structure,
3014
                       s->first_field, draw_edges, s->low_delay,
3015
                       s->v_edge_pos, s->h_edge_pos);
3016
}
3017
 
3018
void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3019
    const int linesize   = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
3020
    const int uvlinesize = s->current_picture.f.linesize[1];
3021
    const int mb_size= 4 - s->avctx->lowres;
3022
 
3023
    s->block_index[0]= s->b8_stride*(s->mb_y*2    ) - 2 + s->mb_x*2;
3024
    s->block_index[1]= s->b8_stride*(s->mb_y*2    ) - 1 + s->mb_x*2;
3025
    s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3026
    s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3027
    s->block_index[4]= s->mb_stride*(s->mb_y + 1)                + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3028
    s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3029
    //block_index is not used by mpeg2, so it is not affected by chroma_format
3030
 
3031
    s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) <<  mb_size);
3032
    s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3033
    s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3034
 
3035
    if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
3036
    {
3037
        if(s->picture_structure==PICT_FRAME){
3038
        s->dest[0] += s->mb_y *   linesize << mb_size;
3039
        s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3040
        s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3041
        }else{
3042
            s->dest[0] += (s->mb_y>>1) *   linesize << mb_size;
3043
            s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3044
            s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3045
            av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
3046
        }
3047
    }
3048
}
3049
 
3050
/**
3051
 * Permute an 8x8 block.
3052
 * @param block the block which will be permuted according to the given permutation vector
3053
 * @param permutation the permutation vector
3054
 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3055
 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3056
 *                  (inverse) permutated to scantable order!
3057
 */
3058
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3059
{
3060
    int i;
3061
    int16_t temp[64];
3062
 
3063
    if(last<=0) return;
3064
    //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3065
 
3066
    for(i=0; i<=last; i++){
3067
        const int j= scantable[i];
3068
        temp[j]= block[j];
3069
        block[j]=0;
3070
    }
3071
 
3072
    for(i=0; i<=last; i++){
3073
        const int j= scantable[i];
3074
        const int perm_j= permutation[j];
3075
        block[perm_j]= temp[j];
3076
    }
3077
}
3078
 
3079
void ff_mpeg_flush(AVCodecContext *avctx){
3080
    int i;
3081
    MpegEncContext *s = avctx->priv_data;
3082
 
3083
    if(s==NULL || s->picture==NULL)
3084
        return;
3085
 
3086
    for (i = 0; i < MAX_PICTURE_COUNT; i++)
3087
        ff_mpeg_unref_picture(s, &s->picture[i]);
3088
    s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3089
 
3090
    ff_mpeg_unref_picture(s, &s->current_picture);
3091
    ff_mpeg_unref_picture(s, &s->last_picture);
3092
    ff_mpeg_unref_picture(s, &s->next_picture);
3093
 
3094
    s->mb_x= s->mb_y= 0;
3095
    s->closed_gop= 0;
3096
 
3097
    s->parse_context.state= -1;
3098
    s->parse_context.frame_start_found= 0;
3099
    s->parse_context.overread= 0;
3100
    s->parse_context.overread_index= 0;
3101
    s->parse_context.index= 0;
3102
    s->parse_context.last_index= 0;
3103
    s->bitstream_buffer_size=0;
3104
    s->pp_time=0;
3105
}
3106
 
3107
static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
3108
                                   int16_t *block, int n, int qscale)
3109
{
3110
    int i, level, nCoeffs;
3111
    const uint16_t *quant_matrix;
3112
 
3113
    nCoeffs= s->block_last_index[n];
3114
 
3115
    block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3116
    /* XXX: only mpeg1 */
3117
    quant_matrix = s->intra_matrix;
3118
    for(i=1;i<=nCoeffs;i++) {
3119
        int j= s->intra_scantable.permutated[i];
3120
        level = block[j];
3121
        if (level) {
3122
            if (level < 0) {
3123
                level = -level;
3124
                level = (int)(level * qscale * quant_matrix[j]) >> 3;
3125
                level = (level - 1) | 1;
3126
                level = -level;
3127
            } else {
3128
                level = (int)(level * qscale * quant_matrix[j]) >> 3;
3129
                level = (level - 1) | 1;
3130
            }
3131
            block[j] = level;
3132
        }
3133
    }
3134
}
3135
 
3136
static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
3137
                                   int16_t *block, int n, int qscale)
3138
{
3139
    int i, level, nCoeffs;
3140
    const uint16_t *quant_matrix;
3141
 
3142
    nCoeffs= s->block_last_index[n];
3143
 
3144
    quant_matrix = s->inter_matrix;
3145
    for(i=0; i<=nCoeffs; i++) {
3146
        int j= s->intra_scantable.permutated[i];
3147
        level = block[j];
3148
        if (level) {
3149
            if (level < 0) {
3150
                level = -level;
3151
                level = (((level << 1) + 1) * qscale *
3152
                         ((int) (quant_matrix[j]))) >> 4;
3153
                level = (level - 1) | 1;
3154
                level = -level;
3155
            } else {
3156
                level = (((level << 1) + 1) * qscale *
3157
                         ((int) (quant_matrix[j]))) >> 4;
3158
                level = (level - 1) | 1;
3159
            }
3160
            block[j] = level;
3161
        }
3162
    }
3163
}
3164
 
3165
static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
3166
                                   int16_t *block, int n, int qscale)
3167
{
3168
    int i, level, nCoeffs;
3169
    const uint16_t *quant_matrix;
3170
 
3171
    if(s->alternate_scan) nCoeffs= 63;
3172
    else nCoeffs= s->block_last_index[n];
3173
 
3174
    block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3175
    quant_matrix = s->intra_matrix;
3176
    for(i=1;i<=nCoeffs;i++) {
3177
        int j= s->intra_scantable.permutated[i];
3178
        level = block[j];
3179
        if (level) {
3180
            if (level < 0) {
3181
                level = -level;
3182
                level = (int)(level * qscale * quant_matrix[j]) >> 3;
3183
                level = -level;
3184
            } else {
3185
                level = (int)(level * qscale * quant_matrix[j]) >> 3;
3186
            }
3187
            block[j] = level;
3188
        }
3189
    }
3190
}
3191
 
3192
static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
3193
                                   int16_t *block, int n, int qscale)
3194
{
3195
    int i, level, nCoeffs;
3196
    const uint16_t *quant_matrix;
3197
    int sum=-1;
3198
 
3199
    if(s->alternate_scan) nCoeffs= 63;
3200
    else nCoeffs= s->block_last_index[n];
3201
 
3202
    block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3203
    sum += block[0];
3204
    quant_matrix = s->intra_matrix;
3205
    for(i=1;i<=nCoeffs;i++) {
3206
        int j= s->intra_scantable.permutated[i];
3207
        level = block[j];
3208
        if (level) {
3209
            if (level < 0) {
3210
                level = -level;
3211
                level = (int)(level * qscale * quant_matrix[j]) >> 3;
3212
                level = -level;
3213
            } else {
3214
                level = (int)(level * qscale * quant_matrix[j]) >> 3;
3215
            }
3216
            block[j] = level;
3217
            sum+=level;
3218
        }
3219
    }
3220
    block[63]^=sum&1;
3221
}
3222
 
3223
static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
3224
                                   int16_t *block, int n, int qscale)
3225
{
3226
    int i, level, nCoeffs;
3227
    const uint16_t *quant_matrix;
3228
    int sum=-1;
3229
 
3230
    if(s->alternate_scan) nCoeffs= 63;
3231
    else nCoeffs= s->block_last_index[n];
3232
 
3233
    quant_matrix = s->inter_matrix;
3234
    for(i=0; i<=nCoeffs; i++) {
3235
        int j= s->intra_scantable.permutated[i];
3236
        level = block[j];
3237
        if (level) {
3238
            if (level < 0) {
3239
                level = -level;
3240
                level = (((level << 1) + 1) * qscale *
3241
                         ((int) (quant_matrix[j]))) >> 4;
3242
                level = -level;
3243
            } else {
3244
                level = (((level << 1) + 1) * qscale *
3245
                         ((int) (quant_matrix[j]))) >> 4;
3246
            }
3247
            block[j] = level;
3248
            sum+=level;
3249
        }
3250
    }
3251
    block[63]^=sum&1;
3252
}
3253
 
3254
static void dct_unquantize_h263_intra_c(MpegEncContext *s,
3255
                                  int16_t *block, int n, int qscale)
3256
{
3257
    int i, level, qmul, qadd;
3258
    int nCoeffs;
3259
 
3260
    av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
3261
 
3262
    qmul = qscale << 1;
3263
 
3264
    if (!s->h263_aic) {
3265
        block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3266
        qadd = (qscale - 1) | 1;
3267
    }else{
3268
        qadd = 0;
3269
    }
3270
    if(s->ac_pred)
3271
        nCoeffs=63;
3272
    else
3273
        nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3274
 
3275
    for(i=1; i<=nCoeffs; i++) {
3276
        level = block[i];
3277
        if (level) {
3278
            if (level < 0) {
3279
                level = level * qmul - qadd;
3280
            } else {
3281
                level = level * qmul + qadd;
3282
            }
3283
            block[i] = level;
3284
        }
3285
    }
3286
}
3287
 
3288
static void dct_unquantize_h263_inter_c(MpegEncContext *s,
3289
                                  int16_t *block, int n, int qscale)
3290
{
3291
    int i, level, qmul, qadd;
3292
    int nCoeffs;
3293
 
3294
    av_assert2(s->block_last_index[n]>=0);
3295
 
3296
    qadd = (qscale - 1) | 1;
3297
    qmul = qscale << 1;
3298
 
3299
    nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3300
 
3301
    for(i=0; i<=nCoeffs; i++) {
3302
        level = block[i];
3303
        if (level) {
3304
            if (level < 0) {
3305
                level = level * qmul - qadd;
3306
            } else {
3307
                level = level * qmul + qadd;
3308
            }
3309
            block[i] = level;
3310
        }
3311
    }
3312
}
3313
 
3314
/**
3315
 * set qscale and update qscale dependent variables.
3316
 */
3317
void ff_set_qscale(MpegEncContext * s, int qscale)
3318
{
3319
    if (qscale < 1)
3320
        qscale = 1;
3321
    else if (qscale > 31)
3322
        qscale = 31;
3323
 
3324
    s->qscale = qscale;
3325
    s->chroma_qscale= s->chroma_qscale_table[qscale];
3326
 
3327
    s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3328
    s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3329
}
3330
 
3331
void ff_MPV_report_decode_progress(MpegEncContext *s)
3332
{
3333
    if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3334
        ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
3335
}
3336
 
3337
#if CONFIG_ERROR_RESILIENCE
3338
void ff_mpeg_er_frame_start(MpegEncContext *s)
3339
{
3340
    ERContext *er = &s->er;
3341
 
3342
    er->cur_pic  = s->current_picture_ptr;
3343
    er->last_pic = s->last_picture_ptr;
3344
    er->next_pic = s->next_picture_ptr;
3345
 
3346
    er->pp_time           = s->pp_time;
3347
    er->pb_time           = s->pb_time;
3348
    er->quarter_sample    = s->quarter_sample;
3349
    er->partitioned_frame = s->partitioned_frame;
3350
 
3351
    ff_er_frame_start(er);
3352
}
3353
#endif /* CONFIG_ERROR_RESILIENCE */