e9aff3b316bc63d35f7046596583616e6e5721e5
[ffmpeg.git] / libavcodec / mpegvideo.c
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of Libav.
9  *
10  * Libav is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * Libav is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with Libav; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
32 #include "avcodec.h"
33 #include "dsputil.h"
34 #include "internal.h"
35 #include "mpegvideo.h"
36 #include "mjpegenc.h"
37 #include "msmpeg4.h"
38 #include "xvmc_internal.h"
39 #include "thread.h"
40 #include <limits.h>
41
42 //#undef NDEBUG
43 //#include <assert.h>
44
45 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
46                                    DCTELEM *block, int n, int qscale);
47 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
48                                    DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
50                                    DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
52                                    DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
54                                    DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
56                                   DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
58                                   DCTELEM *block, int n, int qscale);
59
60
61 /* enable all paranoid tests for rounding, overflows, etc... */
62 //#define PARANOID
63
64 //#define DEBUG
65
66
67 static const uint8_t ff_default_chroma_qscale_table[32] = {
68 //   0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15
69      0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
70     16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
71 };
72
73 const uint8_t ff_mpeg1_dc_scale_table[128] = {
74 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
75     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
83 };
84
85 static const uint8_t mpeg2_dc_scale_table1[128] = {
86 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
87     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
95 };
96
97 static const uint8_t mpeg2_dc_scale_table2[128] = {
98 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
99     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
101     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
107 };
108
109 static const uint8_t mpeg2_dc_scale_table3[128] = {
110 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
111     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
112     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
113     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
119 };
120
121 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
122     ff_mpeg1_dc_scale_table,
123     mpeg2_dc_scale_table1,
124     mpeg2_dc_scale_table2,
125     mpeg2_dc_scale_table3,
126 };
127
128 const enum PixelFormat ff_pixfmt_list_420[] = {
129     PIX_FMT_YUV420P,
130     PIX_FMT_NONE
131 };
132
133 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
134     PIX_FMT_DXVA2_VLD,
135     PIX_FMT_VAAPI_VLD,
136     PIX_FMT_VDA_VLD,
137     PIX_FMT_YUV420P,
138     PIX_FMT_NONE
139 };
140
141 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
142                                           const uint8_t *end,
143                                           uint32_t * restrict state)
144 {
145     int i;
146
147     assert(p <= end);
148     if (p >= end)
149         return end;
150
151     for (i = 0; i < 3; i++) {
152         uint32_t tmp = *state << 8;
153         *state = tmp + *(p++);
154         if (tmp == 0x100 || p == end)
155             return p;
156     }
157
158     while (p < end) {
159         if      (p[-1] > 1      ) p += 3;
160         else if (p[-2]          ) p += 2;
161         else if (p[-3]|(p[-1]-1)) p++;
162         else {
163             p++;
164             break;
165         }
166     }
167
168     p = FFMIN(p, end) - 4;
169     *state = AV_RB32(p);
170
171     return p + 4;
172 }
173
174 /* init common dct for both encoder and decoder */
175 av_cold int ff_dct_common_init(MpegEncContext *s)
176 {
177     ff_dsputil_init(&s->dsp, s->avctx);
178
179     s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
180     s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
181     s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
182     s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
183     s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
184     if (s->flags & CODEC_FLAG_BITEXACT)
185         s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
186     s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
187
188 #if ARCH_X86
189     ff_MPV_common_init_x86(s);
190 #elif ARCH_ALPHA
191     ff_MPV_common_init_axp(s);
192 #elif HAVE_MMI
193     ff_MPV_common_init_mmi(s);
194 #elif ARCH_ARM
195     ff_MPV_common_init_arm(s);
196 #elif HAVE_ALTIVEC
197     ff_MPV_common_init_altivec(s);
198 #elif ARCH_BFIN
199     ff_MPV_common_init_bfin(s);
200 #endif
201
202     /* load & permutate scantables
203      * note: only wmv uses different ones
204      */
205     if (s->alternate_scan) {
206         ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_alternate_vertical_scan);
207         ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_alternate_vertical_scan);
208     } else {
209         ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_zigzag_direct);
210         ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_zigzag_direct);
211     }
212     ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
213     ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
214
215     return 0;
216 }
217
218 void ff_copy_picture(Picture *dst, Picture *src)
219 {
220     *dst = *src;
221     dst->f.type = FF_BUFFER_TYPE_COPY;
222 }
223
224 /**
225  * Release a frame buffer
226  */
227 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
228 {
229     /* WM Image / Screen codecs allocate internal buffers with different
230      * dimensions / colorspaces; ignore user-defined callbacks for these. */
231     if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
232         s->codec_id != AV_CODEC_ID_VC1IMAGE  &&
233         s->codec_id != AV_CODEC_ID_MSS2)
234         ff_thread_release_buffer(s->avctx, &pic->f);
235     else
236         avcodec_default_release_buffer(s->avctx, &pic->f);
237     av_freep(&pic->f.hwaccel_picture_private);
238 }
239
240 /**
241  * Allocate a frame buffer
242  */
243 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
244 {
245     int r;
246
247     if (s->avctx->hwaccel) {
248         assert(!pic->f.hwaccel_picture_private);
249         if (s->avctx->hwaccel->priv_data_size) {
250             pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
251             if (!pic->f.hwaccel_picture_private) {
252                 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
253                 return -1;
254             }
255         }
256     }
257
258     if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
259         s->codec_id != AV_CODEC_ID_VC1IMAGE  &&
260         s->codec_id != AV_CODEC_ID_MSS2)
261         r = ff_thread_get_buffer(s->avctx, &pic->f);
262     else
263         r = avcodec_default_get_buffer(s->avctx, &pic->f);
264
265     if (r < 0 || !pic->f.type || !pic->f.data[0]) {
266         av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
267                r, pic->f.type, pic->f.data[0]);
268         av_freep(&pic->f.hwaccel_picture_private);
269         return -1;
270     }
271
272     if (s->linesize && (s->linesize   != pic->f.linesize[0] ||
273                         s->uvlinesize != pic->f.linesize[1])) {
274         av_log(s->avctx, AV_LOG_ERROR,
275                "get_buffer() failed (stride changed)\n");
276         free_frame_buffer(s, pic);
277         return -1;
278     }
279
280     if (pic->f.linesize[1] != pic->f.linesize[2]) {
281         av_log(s->avctx, AV_LOG_ERROR,
282                "get_buffer() failed (uv stride mismatch)\n");
283         free_frame_buffer(s, pic);
284         return -1;
285     }
286
287     return 0;
288 }
289
290 /**
291  * Allocate a Picture.
292  * The pixels are allocated/set by calling get_buffer() if shared = 0
293  */
294 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
295 {
296     const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
297
298     // the + 1 is needed so memset(,,stride*height) does not sig11
299
300     const int mb_array_size = s->mb_stride * s->mb_height;
301     const int b8_array_size = s->b8_stride * s->mb_height * 2;
302     const int b4_array_size = s->b4_stride * s->mb_height * 4;
303     int i;
304     int r = -1;
305
306     if (shared) {
307         assert(pic->f.data[0]);
308         assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
309         pic->f.type = FF_BUFFER_TYPE_SHARED;
310     } else {
311         assert(!pic->f.data[0]);
312
313         if (alloc_frame_buffer(s, pic) < 0)
314             return -1;
315
316         s->linesize   = pic->f.linesize[0];
317         s->uvlinesize = pic->f.linesize[1];
318     }
319
320     if (pic->f.qscale_table == NULL) {
321         if (s->encoding) {
322             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
323                               mb_array_size * sizeof(int16_t), fail)
324             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
325                               mb_array_size * sizeof(int16_t), fail)
326             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
327                               mb_array_size * sizeof(int8_t ), fail)
328         }
329
330         FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
331                           mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
332         FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
333                           (big_mb_num + s->mb_stride) * sizeof(uint8_t),
334                           fail)
335         FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
336                           (big_mb_num + s->mb_stride) * sizeof(uint32_t),
337                           fail)
338         pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
339         pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
340         if (s->out_format == FMT_H264) {
341             for (i = 0; i < 2; i++) {
342                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
343                                   2 * (b4_array_size + 4) * sizeof(int16_t),
344                                   fail)
345                 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
346                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
347                                   4 * mb_array_size * sizeof(uint8_t), fail)
348             }
349             pic->f.motion_subsample_log2 = 2;
350         } else if (s->out_format == FMT_H263 || s->encoding ||
351                    (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
352             for (i = 0; i < 2; i++) {
353                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
354                                   2 * (b8_array_size + 4) * sizeof(int16_t),
355                                   fail)
356                 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
357                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
358                                   4 * mb_array_size * sizeof(uint8_t), fail)
359             }
360             pic->f.motion_subsample_log2 = 3;
361         }
362         if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
363             FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
364                               64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
365         }
366         pic->f.qstride = s->mb_stride;
367         FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
368                           1 * sizeof(AVPanScan), fail)
369     }
370
371     pic->owner2 = s;
372
373     return 0;
374 fail: // for  the FF_ALLOCZ_OR_GOTO macro
375     if (r >= 0)
376         free_frame_buffer(s, pic);
377     return -1;
378 }
379
380 /**
381  * Deallocate a picture.
382  */
383 static void free_picture(MpegEncContext *s, Picture *pic)
384 {
385     int i;
386
387     if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
388         free_frame_buffer(s, pic);
389     }
390
391     av_freep(&pic->mb_var);
392     av_freep(&pic->mc_mb_var);
393     av_freep(&pic->mb_mean);
394     av_freep(&pic->f.mbskip_table);
395     av_freep(&pic->qscale_table_base);
396     av_freep(&pic->mb_type_base);
397     av_freep(&pic->f.dct_coeff);
398     av_freep(&pic->f.pan_scan);
399     pic->f.mb_type = NULL;
400     for (i = 0; i < 2; i++) {
401         av_freep(&pic->motion_val_base[i]);
402         av_freep(&pic->f.ref_index[i]);
403     }
404
405     if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
406         for (i = 0; i < 4; i++) {
407             pic->f.base[i] =
408             pic->f.data[i] = NULL;
409         }
410         pic->f.type = 0;
411     }
412 }
413
414 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
415 {
416     int y_size = s->b8_stride * (2 * s->mb_height + 1);
417     int c_size = s->mb_stride * (s->mb_height + 1);
418     int yc_size = y_size + 2 * c_size;
419     int i;
420
421     // edge emu needs blocksize + filter length - 1
422     // (= 17x17 for  halfpel / 21x21 for  h264)
423     FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
424                       (s->width + 64) * 2 * 21 * 2, fail);    // (width + edge + align)*interlaced*MBsize*tolerance
425
426     // FIXME should be linesize instead of s->width * 2
427     // but that is not known before get_buffer()
428     FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
429                       (s->width + 64) * 4 * 16 * 2 * sizeof(uint8_t), fail)
430     s->me.temp         = s->me.scratchpad;
431     s->rd_scratchpad   = s->me.scratchpad;
432     s->b_scratchpad    = s->me.scratchpad;
433     s->obmc_scratchpad = s->me.scratchpad + 16;
434     if (s->encoding) {
435         FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
436                           ME_MAP_SIZE * sizeof(uint32_t), fail)
437         FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
438                           ME_MAP_SIZE * sizeof(uint32_t), fail)
439         if (s->avctx->noise_reduction) {
440             FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
441                               2 * 64 * sizeof(int), fail)
442         }
443     }
444     FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
445     s->block = s->blocks[0];
446
447     for (i = 0; i < 12; i++) {
448         s->pblocks[i] = &s->block[i];
449     }
450
451     if (s->out_format == FMT_H263) {
452         /* ac values */
453         FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
454                           yc_size * sizeof(int16_t) * 16, fail);
455         s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
456         s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
457         s->ac_val[2] = s->ac_val[1] + c_size;
458     }
459
460     return 0;
461 fail:
462     return -1; // free() through ff_MPV_common_end()
463 }
464
465 static void free_duplicate_context(MpegEncContext *s)
466 {
467     if (s == NULL)
468         return;
469
470     av_freep(&s->edge_emu_buffer);
471     av_freep(&s->me.scratchpad);
472     s->me.temp =
473     s->rd_scratchpad =
474     s->b_scratchpad =
475     s->obmc_scratchpad = NULL;
476
477     av_freep(&s->dct_error_sum);
478     av_freep(&s->me.map);
479     av_freep(&s->me.score_map);
480     av_freep(&s->blocks);
481     av_freep(&s->ac_val_base);
482     s->block = NULL;
483 }
484
485 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
486 {
487 #define COPY(a) bak->a = src->a
488     COPY(edge_emu_buffer);
489     COPY(me.scratchpad);
490     COPY(me.temp);
491     COPY(rd_scratchpad);
492     COPY(b_scratchpad);
493     COPY(obmc_scratchpad);
494     COPY(me.map);
495     COPY(me.score_map);
496     COPY(blocks);
497     COPY(block);
498     COPY(start_mb_y);
499     COPY(end_mb_y);
500     COPY(me.map_generation);
501     COPY(pb);
502     COPY(dct_error_sum);
503     COPY(dct_count[0]);
504     COPY(dct_count[1]);
505     COPY(ac_val_base);
506     COPY(ac_val[0]);
507     COPY(ac_val[1]);
508     COPY(ac_val[2]);
509 #undef COPY
510 }
511
512 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
513 {
514     MpegEncContext bak;
515     int i;
516     // FIXME copy only needed parts
517     // START_TIMER
518     backup_duplicate_context(&bak, dst);
519     memcpy(dst, src, sizeof(MpegEncContext));
520     backup_duplicate_context(dst, &bak);
521     for (i = 0; i < 12; i++) {
522         dst->pblocks[i] = &dst->block[i];
523     }
524     // STOP_TIMER("update_duplicate_context")
525     // about 10k cycles / 0.01 sec for  1000frames on 1ghz with 2 threads
526 }
527
528 int ff_mpeg_update_thread_context(AVCodecContext *dst,
529                                   const AVCodecContext *src)
530 {
531     MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
532
533     if (dst == src || !s1->context_initialized)
534         return 0;
535
536     // FIXME can parameters change on I-frames?
537     // in that case dst may need a reinit
538     if (!s->context_initialized) {
539         memcpy(s, s1, sizeof(MpegEncContext));
540
541         s->avctx                 = dst;
542         s->picture_range_start  += MAX_PICTURE_COUNT;
543         s->picture_range_end    += MAX_PICTURE_COUNT;
544         s->bitstream_buffer      = NULL;
545         s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
546
547         ff_MPV_common_init(s);
548     }
549
550     s->avctx->coded_height  = s1->avctx->coded_height;
551     s->avctx->coded_width   = s1->avctx->coded_width;
552     s->avctx->width         = s1->avctx->width;
553     s->avctx->height        = s1->avctx->height;
554
555     s->coded_picture_number = s1->coded_picture_number;
556     s->picture_number       = s1->picture_number;
557     s->input_picture_number = s1->input_picture_number;
558
559     memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
560     memcpy(&s->last_picture, &s1->last_picture,
561            (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
562
563     s->last_picture_ptr    = REBASE_PICTURE(s1->last_picture_ptr,    s, s1);
564     s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
565     s->next_picture_ptr    = REBASE_PICTURE(s1->next_picture_ptr,    s, s1);
566
567     // Error/bug resilience
568     s->next_p_frame_damaged = s1->next_p_frame_damaged;
569     s->workaround_bugs      = s1->workaround_bugs;
570
571     // MPEG4 timing info
572     memcpy(&s->time_increment_bits, &s1->time_increment_bits,
573            (char *) &s1->shape - (char *) &s1->time_increment_bits);
574
575     // B-frame info
576     s->max_b_frames = s1->max_b_frames;
577     s->low_delay    = s1->low_delay;
578     s->dropable     = s1->dropable;
579
580     // DivX handling (doesn't work)
581     s->divx_packed  = s1->divx_packed;
582
583     if (s1->bitstream_buffer) {
584         if (s1->bitstream_buffer_size +
585             FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
586             av_fast_malloc(&s->bitstream_buffer,
587                            &s->allocated_bitstream_buffer_size,
588                            s1->allocated_bitstream_buffer_size);
589             s->bitstream_buffer_size = s1->bitstream_buffer_size;
590         memcpy(s->bitstream_buffer, s1->bitstream_buffer,
591                s1->bitstream_buffer_size);
592         memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
593                FF_INPUT_BUFFER_PADDING_SIZE);
594     }
595
596     // MPEG2/interlacing info
597     memcpy(&s->progressive_sequence, &s1->progressive_sequence,
598            (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
599
600     if (!s1->first_field) {
601         s->last_pict_type = s1->pict_type;
602         if (s1->current_picture_ptr)
603             s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
604
605         if (s1->pict_type != AV_PICTURE_TYPE_B) {
606             s->last_non_b_pict_type = s1->pict_type;
607         }
608     }
609
610     return 0;
611 }
612
613 /**
614  * Set the given MpegEncContext to common defaults
615  * (same for encoding and decoding).
616  * The changed fields will not depend upon the
617  * prior state of the MpegEncContext.
618  */
619 void ff_MPV_common_defaults(MpegEncContext *s)
620 {
621     s->y_dc_scale_table      =
622     s->c_dc_scale_table      = ff_mpeg1_dc_scale_table;
623     s->chroma_qscale_table   = ff_default_chroma_qscale_table;
624     s->progressive_frame     = 1;
625     s->progressive_sequence  = 1;
626     s->picture_structure     = PICT_FRAME;
627
628     s->coded_picture_number  = 0;
629     s->picture_number        = 0;
630     s->input_picture_number  = 0;
631
632     s->picture_in_gop_number = 0;
633
634     s->f_code                = 1;
635     s->b_code                = 1;
636
637     s->picture_range_start   = 0;
638     s->picture_range_end     = MAX_PICTURE_COUNT;
639
640     s->slice_context_count   = 1;
641 }
642
643 /**
644  * Set the given MpegEncContext to defaults for decoding.
645  * the changed fields will not depend upon
646  * the prior state of the MpegEncContext.
647  */
648 void ff_MPV_decode_defaults(MpegEncContext *s)
649 {
650     ff_MPV_common_defaults(s);
651 }
652
653 /**
654  * init common structure for both encoder and decoder.
655  * this assumes that some variables like width/height are already set
656  */
657 av_cold int ff_MPV_common_init(MpegEncContext *s)
658 {
659     int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
660     int nb_slices = (HAVE_THREADS &&
661                      s->avctx->active_thread_type & FF_THREAD_SLICE) ?
662                     s->avctx->thread_count : 1;
663
664     if (s->encoding && s->avctx->slices)
665         nb_slices = s->avctx->slices;
666
667     if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
668         s->mb_height = (s->height + 31) / 32 * 2;
669     else if (s->codec_id != AV_CODEC_ID_H264)
670         s->mb_height = (s->height + 15) / 16;
671
672     if (s->avctx->pix_fmt == PIX_FMT_NONE) {
673         av_log(s->avctx, AV_LOG_ERROR,
674                "decoding to PIX_FMT_NONE is not supported.\n");
675         return -1;
676     }
677
678     if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
679         int max_slices;
680         if (s->mb_height)
681             max_slices = FFMIN(MAX_THREADS, s->mb_height);
682         else
683             max_slices = MAX_THREADS;
684         av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
685                " reducing to %d\n", nb_slices, max_slices);
686         nb_slices = max_slices;
687     }
688
689     if ((s->width || s->height) &&
690         av_image_check_size(s->width, s->height, 0, s->avctx))
691         return -1;
692
693     ff_dct_common_init(s);
694
695     s->flags  = s->avctx->flags;
696     s->flags2 = s->avctx->flags2;
697
698     if (s->width && s->height) {
699         s->mb_width   = (s->width + 15) / 16;
700         s->mb_stride  = s->mb_width + 1;
701         s->b8_stride  = s->mb_width * 2 + 1;
702         s->b4_stride  = s->mb_width * 4 + 1;
703         mb_array_size = s->mb_height * s->mb_stride;
704         mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
705
706         /* set chroma shifts */
707         avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift,
708                                       &s->chroma_y_shift);
709
710         /* set default edge pos, will be overriden
711          * in decode_header if needed */
712         s->h_edge_pos = s->mb_width * 16;
713         s->v_edge_pos = s->mb_height * 16;
714
715         s->mb_num     = s->mb_width * s->mb_height;
716
717         s->block_wrap[0] =
718         s->block_wrap[1] =
719         s->block_wrap[2] =
720         s->block_wrap[3] = s->b8_stride;
721         s->block_wrap[4] =
722         s->block_wrap[5] = s->mb_stride;
723
724         y_size  = s->b8_stride * (2 * s->mb_height + 1);
725         c_size  = s->mb_stride * (s->mb_height + 1);
726         yc_size = y_size + 2   * c_size;
727
728         /* convert fourcc to upper case */
729         s->codec_tag          = avpriv_toupper4(s->avctx->codec_tag);
730
731         s->stream_codec_tag   = avpriv_toupper4(s->avctx->stream_codec_tag);
732
733         s->avctx->coded_frame = &s->current_picture.f;
734
735         FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
736                           fail); // error ressilience code looks cleaner with this
737         for (y = 0; y < s->mb_height; y++)
738             for (x = 0; x < s->mb_width; x++)
739                 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
740
741         s->mb_index2xy[s->mb_height * s->mb_width] =
742                        (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
743
744         if (s->encoding) {
745             /* Allocate MV tables */
746             FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
747                               mv_table_size * 2 * sizeof(int16_t), fail);
748             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
749                               mv_table_size * 2 * sizeof(int16_t), fail);
750             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
751                               mv_table_size * 2 * sizeof(int16_t), fail);
752             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
753                               mv_table_size * 2 * sizeof(int16_t), fail);
754             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
755                               mv_table_size * 2 * sizeof(int16_t), fail);
756             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
757                               mv_table_size * 2 * sizeof(int16_t), fail);
758             s->p_mv_table            = s->p_mv_table_base +
759                                        s->mb_stride + 1;
760             s->b_forw_mv_table       = s->b_forw_mv_table_base +
761                                        s->mb_stride + 1;
762             s->b_back_mv_table       = s->b_back_mv_table_base +
763                                        s->mb_stride + 1;
764             s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
765                                        s->mb_stride + 1;
766             s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
767                                        s->mb_stride + 1;
768             s->b_direct_mv_table     = s->b_direct_mv_table_base +
769                                        s->mb_stride + 1;
770
771             if (s->msmpeg4_version) {
772                 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
773                                   2 * 2 * (MAX_LEVEL + 1) *
774                                   (MAX_RUN + 1) * 2 * sizeof(int), fail);
775             }
776             FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
777
778             /* Allocate MB type table */
779             FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
780                               sizeof(uint16_t), fail); // needed for encoding
781
782             FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
783                               sizeof(int), fail);
784
785             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
786                               64 * 32   * sizeof(int), fail);
787             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
788                               64 * 32   * sizeof(int), fail);
789             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
790                               64 * 32 * 2 * sizeof(uint16_t), fail);
791             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
792                               64 * 32 * 2 * sizeof(uint16_t), fail);
793             FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
794                               MAX_PICTURE_COUNT * sizeof(Picture *), fail);
795             FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
796                               MAX_PICTURE_COUNT * sizeof(Picture *), fail);
797
798             if (s->avctx->noise_reduction) {
799                 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
800                                   2 * 64 * sizeof(uint16_t), fail);
801             }
802
803             FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
804                              mb_array_size * sizeof(float), fail);
805             FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
806                              mb_array_size * sizeof(float), fail);
807         }
808     }
809
810     s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
811     FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
812                       s->picture_count * sizeof(Picture), fail);
813     for (i = 0; i < s->picture_count; i++) {
814         avcodec_get_frame_defaults(&s->picture[i].f);
815     }
816
817     if (s->width && s->height) {
818         FF_ALLOC_OR_GOTO(s->avctx, s->er_temp_buffer,
819                          mb_array_size * sizeof(uint8_t), fail);
820         FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
821                           mb_array_size * sizeof(uint8_t), fail);
822
823         if (s->codec_id == AV_CODEC_ID_MPEG4 ||
824             (s->flags & CODEC_FLAG_INTERLACED_ME)) {
825             /* interlaced direct mode decoding tables */
826             for (i = 0; i < 2; i++) {
827                 int j, k;
828                 for (j = 0; j < 2; j++) {
829                     for (k = 0; k < 2; k++) {
830                         FF_ALLOCZ_OR_GOTO(s->avctx,
831                                           s->b_field_mv_table_base[i][j][k],
832                                           mv_table_size * 2 * sizeof(int16_t),
833                                           fail);
834                         s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
835                                                        s->mb_stride + 1;
836                     }
837                     FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
838                                       mb_array_size * 2 * sizeof(uint8_t),
839                                       fail);
840                     FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
841                                       mv_table_size * 2 * sizeof(int16_t),
842                                       fail);
843                     s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
844                                                 + s->mb_stride + 1;
845                 }
846                 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
847                                   mb_array_size * 2 * sizeof(uint8_t),
848                                   fail);
849             }
850         }
851         if (s->out_format == FMT_H263) {
852             /* cbp values */
853             FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
854             s->coded_block = s->coded_block_base + s->b8_stride + 1;
855
856             /* cbp, ac_pred, pred_dir */
857             FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
858                               mb_array_size * sizeof(uint8_t), fail);
859             FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
860                               mb_array_size * sizeof(uint8_t), fail);
861         }
862
863         if (s->h263_pred || s->h263_plus || !s->encoding) {
864             /* dc values */
865             // MN: we need these for  error resilience of intra-frames
866             FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
867                               yc_size * sizeof(int16_t), fail);
868             s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
869             s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
870             s->dc_val[2] = s->dc_val[1] + c_size;
871             for (i = 0; i < yc_size; i++)
872                 s->dc_val_base[i] = 1024;
873         }
874
875         /* which mb is a intra block */
876         FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
877         memset(s->mbintra_table, 1, mb_array_size);
878
879         /* init macroblock skip table */
880         FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
881         // Note the + 1 is for  a quicker mpeg4 slice_end detection
882
883         s->parse_context.state = -1;
884         if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
885             s->avctx->debug_mv) {
886             s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
887                         2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
888             s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
889                         2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
890             s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
891                         2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
892         }
893     }
894
895     s->context_initialized = 1;
896     s->thread_context[0]   = s;
897
898     if (s->width && s->height) {
899         if (nb_slices > 1) {
900             for (i = 1; i < nb_slices; i++) {
901                 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
902                 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
903             }
904
905             for (i = 0; i < nb_slices; i++) {
906                 if (init_duplicate_context(s->thread_context[i], s) < 0)
907                     goto fail;
908                     s->thread_context[i]->start_mb_y =
909                         (s->mb_height * (i) + nb_slices / 2) / nb_slices;
910                     s->thread_context[i]->end_mb_y   =
911                         (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
912             }
913         } else {
914             if (init_duplicate_context(s, s) < 0)
915                 goto fail;
916             s->start_mb_y = 0;
917             s->end_mb_y   = s->mb_height;
918         }
919         s->slice_context_count = nb_slices;
920     }
921
922     return 0;
923  fail:
924     ff_MPV_common_end(s);
925     return -1;
926 }
927
928 /* init common structure for both encoder and decoder */
929 void ff_MPV_common_end(MpegEncContext *s)
930 {
931     int i, j, k;
932
933     if (s->slice_context_count > 1) {
934         for (i = 0; i < s->slice_context_count; i++) {
935             free_duplicate_context(s->thread_context[i]);
936         }
937         for (i = 1; i < s->slice_context_count; i++) {
938             av_freep(&s->thread_context[i]);
939         }
940         s->slice_context_count = 1;
941     } else free_duplicate_context(s);
942
943     av_freep(&s->parse_context.buffer);
944     s->parse_context.buffer_size = 0;
945
946     av_freep(&s->mb_type);
947     av_freep(&s->p_mv_table_base);
948     av_freep(&s->b_forw_mv_table_base);
949     av_freep(&s->b_back_mv_table_base);
950     av_freep(&s->b_bidir_forw_mv_table_base);
951     av_freep(&s->b_bidir_back_mv_table_base);
952     av_freep(&s->b_direct_mv_table_base);
953     s->p_mv_table            = NULL;
954     s->b_forw_mv_table       = NULL;
955     s->b_back_mv_table       = NULL;
956     s->b_bidir_forw_mv_table = NULL;
957     s->b_bidir_back_mv_table = NULL;
958     s->b_direct_mv_table     = NULL;
959     for (i = 0; i < 2; i++) {
960         for (j = 0; j < 2; j++) {
961             for (k = 0; k < 2; k++) {
962                 av_freep(&s->b_field_mv_table_base[i][j][k]);
963                 s->b_field_mv_table[i][j][k] = NULL;
964             }
965             av_freep(&s->b_field_select_table[i][j]);
966             av_freep(&s->p_field_mv_table_base[i][j]);
967             s->p_field_mv_table[i][j] = NULL;
968         }
969         av_freep(&s->p_field_select_table[i]);
970     }
971
972     av_freep(&s->dc_val_base);
973     av_freep(&s->coded_block_base);
974     av_freep(&s->mbintra_table);
975     av_freep(&s->cbp_table);
976     av_freep(&s->pred_dir_table);
977
978     av_freep(&s->mbskip_table);
979     av_freep(&s->bitstream_buffer);
980     s->allocated_bitstream_buffer_size = 0;
981
982     av_freep(&s->avctx->stats_out);
983     av_freep(&s->ac_stats);
984     av_freep(&s->error_status_table);
985     av_freep(&s->er_temp_buffer);
986     av_freep(&s->mb_index2xy);
987     av_freep(&s->lambda_table);
988     av_freep(&s->q_intra_matrix);
989     av_freep(&s->q_inter_matrix);
990     av_freep(&s->q_intra_matrix16);
991     av_freep(&s->q_inter_matrix16);
992     av_freep(&s->input_picture);
993     av_freep(&s->reordered_input_picture);
994     av_freep(&s->dct_offset);
995     av_freep(&s->cplx_tab);
996     av_freep(&s->bits_tab);
997
998     if (s->picture && !s->avctx->internal->is_copy) {
999         for (i = 0; i < s->picture_count; i++) {
1000             free_picture(s, &s->picture[i]);
1001         }
1002     }
1003     av_freep(&s->picture);
1004     s->context_initialized      = 0;
1005     s->last_picture_ptr         =
1006     s->next_picture_ptr         =
1007     s->current_picture_ptr      = NULL;
1008     s->linesize = s->uvlinesize = 0;
1009
1010     for (i = 0; i < 3; i++)
1011         av_freep(&s->visualization_buffer[i]);
1012
1013     if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
1014         avcodec_default_free_buffers(s->avctx);
1015 }
1016
1017 void ff_init_rl(RLTable *rl,
1018                 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1019 {
1020     int8_t  max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1021     uint8_t index_run[MAX_RUN + 1];
1022     int last, run, level, start, end, i;
1023
1024     /* If table is static, we can quit if rl->max_level[0] is not NULL */
1025     if (static_store && rl->max_level[0])
1026         return;
1027
1028     /* compute max_level[], max_run[] and index_run[] */
1029     for (last = 0; last < 2; last++) {
1030         if (last == 0) {
1031             start = 0;
1032             end = rl->last;
1033         } else {
1034             start = rl->last;
1035             end = rl->n;
1036         }
1037
1038         memset(max_level, 0, MAX_RUN + 1);
1039         memset(max_run, 0, MAX_LEVEL + 1);
1040         memset(index_run, rl->n, MAX_RUN + 1);
1041         for (i = start; i < end; i++) {
1042             run   = rl->table_run[i];
1043             level = rl->table_level[i];
1044             if (index_run[run] == rl->n)
1045                 index_run[run] = i;
1046             if (level > max_level[run])
1047                 max_level[run] = level;
1048             if (run > max_run[level])
1049                 max_run[level] = run;
1050         }
1051         if (static_store)
1052             rl->max_level[last] = static_store[last];
1053         else
1054             rl->max_level[last] = av_malloc(MAX_RUN + 1);
1055         memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1056         if (static_store)
1057             rl->max_run[last]   = static_store[last] + MAX_RUN + 1;
1058         else
1059             rl->max_run[last]   = av_malloc(MAX_LEVEL + 1);
1060         memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1061         if (static_store)
1062             rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1063         else
1064             rl->index_run[last] = av_malloc(MAX_RUN + 1);
1065         memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1066     }
1067 }
1068
1069 void ff_init_vlc_rl(RLTable *rl)
1070 {
1071     int i, q;
1072
1073     for (q = 0; q < 32; q++) {
1074         int qmul = q * 2;
1075         int qadd = (q - 1) | 1;
1076
1077         if (q == 0) {
1078             qmul = 1;
1079             qadd = 0;
1080         }
1081         for (i = 0; i < rl->vlc.table_size; i++) {
1082             int code = rl->vlc.table[i][0];
1083             int len  = rl->vlc.table[i][1];
1084             int level, run;
1085
1086             if (len == 0) { // illegal code
1087                 run   = 66;
1088                 level = MAX_LEVEL;
1089             } else if (len < 0) { // more bits needed
1090                 run   = 0;
1091                 level = code;
1092             } else {
1093                 if (code == rl->n) { // esc
1094                     run   = 66;
1095                     level =  0;
1096                 } else {
1097                     run   = rl->table_run[code] + 1;
1098                     level = rl->table_level[code] * qmul + qadd;
1099                     if (code >= rl->last) run += 192;
1100                 }
1101             }
1102             rl->rl_vlc[q][i].len   = len;
1103             rl->rl_vlc[q][i].level = level;
1104             rl->rl_vlc[q][i].run   = run;
1105         }
1106     }
1107 }
1108
1109 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1110 {
1111     int i;
1112
1113     /* release non reference frames */
1114     for (i = 0; i < s->picture_count; i++) {
1115         if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
1116             (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
1117             (remove_current || &s->picture[i] !=  s->current_picture_ptr)
1118             /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
1119             free_frame_buffer(s, &s->picture[i]);
1120         }
1121     }
1122 }
1123
1124 int ff_find_unused_picture(MpegEncContext *s, int shared)
1125 {
1126     int i;
1127
1128     if (shared) {
1129         for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1130             if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1131                 return i;
1132         }
1133     } else {
1134         for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1135             if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
1136                 return i; // FIXME
1137         }
1138         for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1139             if (s->picture[i].f.data[0] == NULL)
1140                 return i;
1141         }
1142     }
1143
1144     return AVERROR_INVALIDDATA;
1145 }
1146
1147 static void update_noise_reduction(MpegEncContext *s)
1148 {
1149     int intra, i;
1150
1151     for (intra = 0; intra < 2; intra++) {
1152         if (s->dct_count[intra] > (1 << 16)) {
1153             for (i = 0; i < 64; i++) {
1154                 s->dct_error_sum[intra][i] >>= 1;
1155             }
1156             s->dct_count[intra] >>= 1;
1157         }
1158
1159         for (i = 0; i < 64; i++) {
1160             s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1161                                        s->dct_count[intra] +
1162                                        s->dct_error_sum[intra][i] / 2) /
1163                                       (s->dct_error_sum[intra][i] + 1);
1164         }
1165     }
1166 }
1167
1168 /**
1169  * generic function for encode/decode called after coding/decoding
1170  * the header and before a frame is coded/decoded.
1171  */
1172 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1173 {
1174     int i;
1175     Picture *pic;
1176     s->mb_skipped = 0;
1177
1178     assert(s->last_picture_ptr == NULL || s->out_format != FMT_H264 ||
1179            s->codec_id == AV_CODEC_ID_SVQ3);
1180
1181     /* mark & release old frames */
1182     if (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3) {
1183         if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1184             s->last_picture_ptr != s->next_picture_ptr &&
1185             s->last_picture_ptr->f.data[0]) {
1186             if (s->last_picture_ptr->owner2 == s)
1187                 free_frame_buffer(s, s->last_picture_ptr);
1188         }
1189
1190         /* release forgotten pictures */
1191         /* if (mpeg124/h263) */
1192         if (!s->encoding) {
1193             for (i = 0; i < s->picture_count; i++) {
1194                 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
1195                     &s->picture[i] != s->last_picture_ptr &&
1196                     &s->picture[i] != s->next_picture_ptr &&
1197                     s->picture[i].f.reference) {
1198                     if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1199                         av_log(avctx, AV_LOG_ERROR,
1200                                "releasing zombie picture\n");
1201                     free_frame_buffer(s, &s->picture[i]);
1202                 }
1203             }
1204         }
1205     }
1206
1207     if (!s->encoding) {
1208         ff_release_unused_pictures(s, 1);
1209
1210         if (s->current_picture_ptr &&
1211             s->current_picture_ptr->f.data[0] == NULL) {
1212             // we already have a unused image
1213             // (maybe it was set before reading the header)
1214             pic = s->current_picture_ptr;
1215         } else {
1216             i   = ff_find_unused_picture(s, 0);
1217             pic = &s->picture[i];
1218         }
1219
1220         pic->f.reference = 0;
1221         if (!s->dropable) {
1222             if (s->codec_id == AV_CODEC_ID_H264)
1223                 pic->f.reference = s->picture_structure;
1224             else if (s->pict_type != AV_PICTURE_TYPE_B)
1225                 pic->f.reference = 3;
1226         }
1227
1228         pic->f.coded_picture_number = s->coded_picture_number++;
1229
1230         if (ff_alloc_picture(s, pic, 0) < 0)
1231             return -1;
1232
1233         s->current_picture_ptr = pic;
1234         // FIXME use only the vars from current_pic
1235         s->current_picture_ptr->f.top_field_first = s->top_field_first;
1236         if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1237             s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1238             if (s->picture_structure != PICT_FRAME)
1239                 s->current_picture_ptr->f.top_field_first =
1240                     (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1241         }
1242         s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1243                                                      !s->progressive_sequence;
1244         s->current_picture_ptr->field_picture      =  s->picture_structure != PICT_FRAME;
1245     }
1246
1247     s->current_picture_ptr->f.pict_type = s->pict_type;
1248     // if (s->flags && CODEC_FLAG_QSCALE)
1249     //     s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1250     s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1251
1252     ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1253
1254     if (s->pict_type != AV_PICTURE_TYPE_B) {
1255         s->last_picture_ptr = s->next_picture_ptr;
1256         if (!s->dropable)
1257             s->next_picture_ptr = s->current_picture_ptr;
1258     }
1259     /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1260            s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1261            s->last_picture_ptr    ? s->last_picture_ptr->f.data[0]    : NULL,
1262            s->next_picture_ptr    ? s->next_picture_ptr->f.data[0]    : NULL,
1263            s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1264            s->pict_type, s->dropable); */
1265
1266     if (s->codec_id != AV_CODEC_ID_H264) {
1267         if ((s->last_picture_ptr == NULL ||
1268              s->last_picture_ptr->f.data[0] == NULL) &&
1269             (s->pict_type != AV_PICTURE_TYPE_I ||
1270              s->picture_structure != PICT_FRAME)) {
1271             if (s->pict_type != AV_PICTURE_TYPE_I)
1272                 av_log(avctx, AV_LOG_ERROR,
1273                        "warning: first frame is no keyframe\n");
1274             else if (s->picture_structure != PICT_FRAME)
1275                 av_log(avctx, AV_LOG_INFO,
1276                        "allocate dummy last picture for field based first keyframe\n");
1277
1278             /* Allocate a dummy frame */
1279             i = ff_find_unused_picture(s, 0);
1280             s->last_picture_ptr = &s->picture[i];
1281             if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1282                 s->last_picture_ptr = NULL;
1283                 return -1;
1284             }
1285             ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
1286             ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
1287             s->last_picture_ptr->f.reference = 3;
1288         }
1289         if ((s->next_picture_ptr == NULL ||
1290              s->next_picture_ptr->f.data[0] == NULL) &&
1291             s->pict_type == AV_PICTURE_TYPE_B) {
1292             /* Allocate a dummy frame */
1293             i = ff_find_unused_picture(s, 0);
1294             s->next_picture_ptr = &s->picture[i];
1295             if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1296                 s->next_picture_ptr = NULL;
1297                 return -1;
1298             }
1299             ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
1300             ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
1301             s->next_picture_ptr->f.reference = 3;
1302         }
1303     }
1304
1305     if (s->last_picture_ptr)
1306         ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1307     if (s->next_picture_ptr)
1308         ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1309
1310     if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME) &&
1311         (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3)) {
1312         if (s->next_picture_ptr)
1313             s->next_picture_ptr->owner2 = s;
1314         if (s->last_picture_ptr)
1315             s->last_picture_ptr->owner2 = s;
1316     }
1317
1318     assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1319                                                  s->last_picture_ptr->f.data[0]));
1320
1321     if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
1322         int i;
1323         for (i = 0; i < 4; i++) {
1324             if (s->picture_structure == PICT_BOTTOM_FIELD) {
1325                 s->current_picture.f.data[i] +=
1326                     s->current_picture.f.linesize[i];
1327             }
1328             s->current_picture.f.linesize[i] *= 2;
1329             s->last_picture.f.linesize[i]    *= 2;
1330             s->next_picture.f.linesize[i]    *= 2;
1331         }
1332     }
1333
1334     s->err_recognition = avctx->err_recognition;
1335
1336     /* set dequantizer, we can't do it during init as
1337      * it might change for mpeg4 and we can't do it in the header
1338      * decode as init is not called for mpeg4 there yet */
1339     if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1340         s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1341         s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1342     } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1343         s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1344         s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1345     } else {
1346         s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1347         s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1348     }
1349
1350     if (s->dct_error_sum) {
1351         assert(s->avctx->noise_reduction && s->encoding);
1352         update_noise_reduction(s);
1353     }
1354
1355     if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1356         return ff_xvmc_field_start(s, avctx);
1357
1358     return 0;
1359 }
1360
1361 /* generic function for encode/decode called after a
1362  * frame has been coded/decoded. */
1363 void ff_MPV_frame_end(MpegEncContext *s)
1364 {
1365     int i;
1366     /* redraw edges for the frame if decoding didn't complete */
1367     // just to make sure that all data is rendered.
1368     if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1369         ff_xvmc_field_end(s);
1370    } else if ((s->error_count || s->encoding) &&
1371               !s->avctx->hwaccel &&
1372               !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1373               s->unrestricted_mv &&
1374               s->current_picture.f.reference &&
1375               !s->intra_only &&
1376               !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1377         int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
1378         int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
1379         s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1380                           s->h_edge_pos, s->v_edge_pos,
1381                           EDGE_WIDTH, EDGE_WIDTH,
1382                           EDGE_TOP | EDGE_BOTTOM);
1383         s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1384                           s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1385                           EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1386                           EDGE_TOP | EDGE_BOTTOM);
1387         s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1388                           s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1389                           EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1390                           EDGE_TOP | EDGE_BOTTOM);
1391     }
1392
1393     emms_c();
1394
1395     s->last_pict_type                 = s->pict_type;
1396     s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1397     if (s->pict_type!= AV_PICTURE_TYPE_B) {
1398         s->last_non_b_pict_type = s->pict_type;
1399     }
1400 #if 0
1401     /* copy back current_picture variables */
1402     for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1403         if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1404             s->picture[i] = s->current_picture;
1405             break;
1406         }
1407     }
1408     assert(i < MAX_PICTURE_COUNT);
1409 #endif
1410
1411     if (s->encoding) {
1412         /* release non-reference frames */
1413         for (i = 0; i < s->picture_count; i++) {
1414             if (s->picture[i].f.data[0] && !s->picture[i].f.reference
1415                 /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
1416                 free_frame_buffer(s, &s->picture[i]);
1417             }
1418         }
1419     }
1420     // clear copies, to avoid confusion
1421 #if 0
1422     memset(&s->last_picture,    0, sizeof(Picture));
1423     memset(&s->next_picture,    0, sizeof(Picture));
1424     memset(&s->current_picture, 0, sizeof(Picture));
1425 #endif
1426     s->avctx->coded_frame = &s->current_picture_ptr->f;
1427
1428     if (s->codec_id != AV_CODEC_ID_H264 && s->current_picture.f.reference) {
1429         ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
1430     }
1431 }
1432
1433 /**
1434  * Draw a line from (ex, ey) -> (sx, sy).
1435  * @param w width of the image
1436  * @param h height of the image
1437  * @param stride stride/linesize of the image
1438  * @param color color of the arrow
1439  */
1440 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1441                       int w, int h, int stride, int color)
1442 {
1443     int x, y, fr, f;
1444
1445     sx = av_clip(sx, 0, w - 1);
1446     sy = av_clip(sy, 0, h - 1);
1447     ex = av_clip(ex, 0, w - 1);
1448     ey = av_clip(ey, 0, h - 1);
1449
1450     buf[sy * stride + sx] += color;
1451
1452     if (FFABS(ex - sx) > FFABS(ey - sy)) {
1453         if (sx > ex) {
1454             FFSWAP(int, sx, ex);
1455             FFSWAP(int, sy, ey);
1456         }
1457         buf += sx + sy * stride;
1458         ex  -= sx;
1459         f    = ((ey - sy) << 16) / ex;
1460         for (x = 0; x = ex; x++) {
1461             y  = (x * f) >> 16;
1462             fr = (x * f) & 0xFFFF;
1463             buf[y * stride + x]       += (color * (0x10000 - fr)) >> 16;
1464             buf[(y + 1) * stride + x] += (color *            fr ) >> 16;
1465         }
1466     } else {
1467         if (sy > ey) {
1468             FFSWAP(int, sx, ex);
1469             FFSWAP(int, sy, ey);
1470         }
1471         buf += sx + sy * stride;
1472         ey  -= sy;
1473         if (ey)
1474             f  = ((ex - sx) << 16) / ey;
1475         else
1476             f = 0;
1477         for (y = 0; y = ey; y++) {
1478             x  = (y * f) >> 16;
1479             fr = (y * f) & 0xFFFF;
1480             buf[y * stride + x]     += (color * (0x10000 - fr)) >> 16;
1481             buf[y * stride + x + 1] += (color *            fr ) >> 16;
1482         }
1483     }
1484 }
1485
1486 /**
1487  * Draw an arrow from (ex, ey) -> (sx, sy).
1488  * @param w width of the image
1489  * @param h height of the image
1490  * @param stride stride/linesize of the image
1491  * @param color color of the arrow
1492  */
1493 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1494                        int ey, int w, int h, int stride, int color)
1495 {
1496     int dx,dy;
1497
1498     sx = av_clip(sx, -100, w + 100);
1499     sy = av_clip(sy, -100, h + 100);
1500     ex = av_clip(ex, -100, w + 100);
1501     ey = av_clip(ey, -100, h + 100);
1502
1503     dx = ex - sx;
1504     dy = ey - sy;
1505
1506     if (dx * dx + dy * dy > 3 * 3) {
1507         int rx =  dx + dy;
1508         int ry = -dx + dy;
1509         int length = ff_sqrt((rx * rx + ry * ry) << 8);
1510
1511         // FIXME subpixel accuracy
1512         rx = ROUNDED_DIV(rx * 3 << 4, length);
1513         ry = ROUNDED_DIV(ry * 3 << 4, length);
1514
1515         draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1516         draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1517     }
1518     draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1519 }
1520
1521 /**
1522  * Print debugging info for the given picture.
1523  */
1524 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
1525 {
1526     if (s->avctx->hwaccel || !pict || !pict->mb_type)
1527         return;
1528
1529     if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1530         int x,y;
1531
1532         av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1533         switch (pict->pict_type) {
1534         case AV_PICTURE_TYPE_I:
1535             av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1536             break;
1537         case AV_PICTURE_TYPE_P:
1538             av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1539             break;
1540         case AV_PICTURE_TYPE_B:
1541             av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1542             break;
1543         case AV_PICTURE_TYPE_S:
1544             av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1545             break;
1546         case AV_PICTURE_TYPE_SI:
1547             av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1548             break;
1549         case AV_PICTURE_TYPE_SP:
1550             av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1551             break;
1552         }
1553         for (y = 0; y < s->mb_height; y++) {
1554             for (x = 0; x < s->mb_width; x++) {
1555                 if (s->avctx->debug & FF_DEBUG_SKIP) {
1556                     int count = s->mbskip_table[x + y * s->mb_stride];
1557                     if (count > 9)
1558                         count = 9;
1559                     av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1560                 }
1561                 if (s->avctx->debug & FF_DEBUG_QP) {
1562                     av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1563                            pict->qscale_table[x + y * s->mb_stride]);
1564                 }
1565                 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1566                     int mb_type = pict->mb_type[x + y * s->mb_stride];
1567                     // Type & MV direction
1568                     if (IS_PCM(mb_type))
1569                         av_log(s->avctx, AV_LOG_DEBUG, "P");
1570                     else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1571                         av_log(s->avctx, AV_LOG_DEBUG, "A");
1572                     else if (IS_INTRA4x4(mb_type))
1573                         av_log(s->avctx, AV_LOG_DEBUG, "i");
1574                     else if (IS_INTRA16x16(mb_type))
1575                         av_log(s->avctx, AV_LOG_DEBUG, "I");
1576                     else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1577                         av_log(s->avctx, AV_LOG_DEBUG, "d");
1578                     else if (IS_DIRECT(mb_type))
1579                         av_log(s->avctx, AV_LOG_DEBUG, "D");
1580                     else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1581                         av_log(s->avctx, AV_LOG_DEBUG, "g");
1582                     else if (IS_GMC(mb_type))
1583                         av_log(s->avctx, AV_LOG_DEBUG, "G");
1584                     else if (IS_SKIP(mb_type))
1585                         av_log(s->avctx, AV_LOG_DEBUG, "S");
1586                     else if (!USES_LIST(mb_type, 1))
1587                         av_log(s->avctx, AV_LOG_DEBUG, ">");
1588                     else if (!USES_LIST(mb_type, 0))
1589                         av_log(s->avctx, AV_LOG_DEBUG, "<");
1590                     else {
1591                         assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1592                         av_log(s->avctx, AV_LOG_DEBUG, "X");
1593                     }
1594
1595                     // segmentation
1596                     if (IS_8X8(mb_type))
1597                         av_log(s->avctx, AV_LOG_DEBUG, "+");
1598                     else if (IS_16X8(mb_type))
1599                         av_log(s->avctx, AV_LOG_DEBUG, "-");
1600                     else if (IS_8X16(mb_type))
1601                         av_log(s->avctx, AV_LOG_DEBUG, "|");
1602                     else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1603                         av_log(s->avctx, AV_LOG_DEBUG, " ");
1604                     else
1605                         av_log(s->avctx, AV_LOG_DEBUG, "?");
1606
1607
1608                     if (IS_INTERLACED(mb_type))
1609                         av_log(s->avctx, AV_LOG_DEBUG, "=");
1610                     else
1611                         av_log(s->avctx, AV_LOG_DEBUG, " ");
1612                 }
1613                 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1614             }
1615             av_log(s->avctx, AV_LOG_DEBUG, "\n");
1616         }
1617     }
1618
1619     if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1620         (s->avctx->debug_mv)) {
1621         const int shift = 1 + s->quarter_sample;
1622         int mb_y;
1623         uint8_t *ptr;
1624         int i;
1625         int h_chroma_shift, v_chroma_shift, block_height;
1626         const int width          = s->avctx->width;
1627         const int height         = s->avctx->height;
1628         const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
1629         const int mv_stride      = (s->mb_width << mv_sample_log2) +
1630                                    (s->codec_id == AV_CODEC_ID_H264 ? 0 : 1);
1631         s->low_delay = 0; // needed to see the vectors without trashing the buffers
1632
1633         avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1634                                       &h_chroma_shift, &v_chroma_shift);
1635         for (i = 0; i < 3; i++) {
1636             memcpy(s->visualization_buffer[i], pict->data[i],
1637                    (i == 0) ? pict->linesize[i] * height:
1638                               pict->linesize[i] * height >> v_chroma_shift);
1639             pict->data[i] = s->visualization_buffer[i];
1640         }
1641         pict->type   = FF_BUFFER_TYPE_COPY;
1642         ptr          = pict->data[0];
1643         block_height = 16 >> v_chroma_shift;
1644
1645         for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1646             int mb_x;
1647             for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1648                 const int mb_index = mb_x + mb_y * s->mb_stride;
1649                 if ((s->avctx->debug_mv) && pict->motion_val) {
1650                     int type;
1651                     for (type = 0; type < 3; type++) {
1652                         int direction = 0;
1653                         switch (type) {
1654                         case 0:
1655                             if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1656                                 (pict->pict_type!= AV_PICTURE_TYPE_P))
1657                                 continue;
1658                             direction = 0;
1659                             break;
1660                         case 1:
1661                             if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1662                                 (pict->pict_type!= AV_PICTURE_TYPE_B))
1663                                 continue;
1664                             direction = 0;
1665                             break;
1666                         case 2:
1667                             if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1668                                 (pict->pict_type!= AV_PICTURE_TYPE_B))
1669                                 continue;
1670                             direction = 1;
1671                             break;
1672                         }
1673                         if (!USES_LIST(pict->mb_type[mb_index], direction))
1674                             continue;
1675
1676                         if (IS_8X8(pict->mb_type[mb_index])) {
1677                             int i;
1678                             for (i = 0; i < 4; i++) {
1679                                 int sx = mb_x * 16 + 4 + 8 * (i & 1);
1680                                 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
1681                                 int xy = (mb_x * 2 + (i & 1) +
1682                                           (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1683                                 int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
1684                                 int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
1685                                 draw_arrow(ptr, sx, sy, mx, my, width,
1686                                            height, s->linesize, 100);
1687                             }
1688                         } else if (IS_16X8(pict->mb_type[mb_index])) {
1689                             int i;
1690                             for (i = 0; i < 2; i++) {
1691                                 int sx = mb_x * 16 + 8;
1692                                 int sy = mb_y * 16 + 4 + 8 * i;
1693                                 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
1694                                 int mx = (pict->motion_val[direction][xy][0] >> shift);
1695                                 int my = (pict->motion_val[direction][xy][1] >> shift);
1696
1697                                 if (IS_INTERLACED(pict->mb_type[mb_index]))
1698                                     my *= 2;
1699
1700                             draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1701                                        height, s->linesize, 100);
1702                             }
1703                         } else if (IS_8X16(pict->mb_type[mb_index])) {
1704                             int i;
1705                             for (i = 0; i < 2; i++) {
1706                                 int sx = mb_x * 16 + 4 + 8 * i;
1707                                 int sy = mb_y * 16 + 8;
1708                                 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
1709                                 int mx = pict->motion_val[direction][xy][0] >> shift;
1710                                 int my = pict->motion_val[direction][xy][1] >> shift;
1711
1712                                 if (IS_INTERLACED(pict->mb_type[mb_index]))
1713                                     my *= 2;
1714
1715                                 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1716                                            height, s->linesize, 100);
1717                             }
1718                         } else {
1719                               int sx = mb_x * 16 + 8;
1720                               int sy = mb_y * 16 + 8;
1721                               int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
1722                               int mx = pict->motion_val[direction][xy][0] >> shift + sx;
1723                               int my = pict->motion_val[direction][xy][1] >> shift + sy;
1724                               draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1725                         }
1726                     }
1727                 }
1728                 if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
1729                     uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
1730                                  0x0101010101010101ULL;
1731                     int y;
1732                     for (y = 0; y < block_height; y++) {
1733                         *(uint64_t *)(pict->data[1] + 8 * mb_x +
1734                                       (block_height * mb_y + y) *
1735                                       pict->linesize[1]) = c;
1736                         *(uint64_t *)(pict->data[2] + 8 * mb_x +
1737                                       (block_height * mb_y + y) *
1738                                       pict->linesize[2]) = c;
1739                     }
1740                 }
1741                 if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
1742                     pict->motion_val) {
1743                     int mb_type = pict->mb_type[mb_index];
1744                     uint64_t u,v;
1745                     int y;
1746 #define COLOR(theta, r) \
1747     u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
1748     v = (int)(128 + r * sin(theta * 3.141592 / 180));
1749
1750
1751                     u = v = 128;
1752                     if (IS_PCM(mb_type)) {
1753                         COLOR(120, 48)
1754                     } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
1755                                IS_INTRA16x16(mb_type)) {
1756                         COLOR(30, 48)
1757                     } else if (IS_INTRA4x4(mb_type)) {
1758                         COLOR(90, 48)
1759                     } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
1760                         // COLOR(120, 48)
1761                     } else if (IS_DIRECT(mb_type)) {
1762                         COLOR(150, 48)
1763                     } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
1764                         COLOR(170, 48)
1765                     } else if (IS_GMC(mb_type)) {
1766                         COLOR(190, 48)
1767                     } else if (IS_SKIP(mb_type)) {
1768                         // COLOR(180, 48)
1769                     } else if (!USES_LIST(mb_type, 1)) {
1770                         COLOR(240, 48)
1771                     } else if (!USES_LIST(mb_type, 0)) {
1772                         COLOR(0, 48)
1773                     } else {
1774                         assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1775                         COLOR(300,48)
1776                     }
1777
1778                     u *= 0x0101010101010101ULL;
1779                     v *= 0x0101010101010101ULL;
1780                     for (y = 0; y < block_height; y++) {
1781                         *(uint64_t *)(pict->data[1] + 8 * mb_x +
1782                                       (block_height * mb_y + y) * pict->linesize[1]) = u;
1783                         *(uint64_t *)(pict->data[2] + 8 * mb_x +
1784                                       (block_height * mb_y + y) * pict->linesize[2]) = v;
1785                     }
1786
1787                     // segmentation
1788                     if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
1789                         *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
1790                                       (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1791                         *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
1792                                       (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1793                     }
1794                     if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
1795                         for (y = 0; y < 16; y++)
1796                             pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
1797                                           pict->linesize[0]] ^= 0x80;
1798                     }
1799                     if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
1800                         int dm = 1 << (mv_sample_log2 - 2);
1801                         for (i = 0; i < 4; i++) {
1802                             int sx = mb_x * 16 + 8 * (i & 1);
1803                             int sy = mb_y * 16 + 8 * (i >> 1);
1804                             int xy = (mb_x * 2 + (i & 1) +
1805                                      (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1806                             // FIXME bidir
1807                             int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
1808                             if (mv[0] != mv[dm] ||
1809                                 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
1810                                 for (y = 0; y < 8; y++)
1811                                     pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
1812                             if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
1813                                 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
1814                                               pict->linesize[0]) ^= 0x8080808080808080ULL;
1815                         }
1816                     }
1817
1818                     if (IS_INTERLACED(mb_type) &&
1819                         s->codec_id == AV_CODEC_ID_H264) {
1820                         // hmm
1821                     }
1822                 }
1823                 s->mbskip_table[mb_index] = 0;
1824             }
1825         }
1826     }
1827 }
1828
1829 /**
1830  * find the lowest MB row referenced in the MVs
1831  */
1832 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1833 {
1834     int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1835     int my, off, i, mvs;
1836
1837     if (s->picture_structure != PICT_FRAME) goto unhandled;
1838
1839     switch (s->mv_type) {
1840         case MV_TYPE_16X16:
1841             mvs = 1;
1842             break;
1843         case MV_TYPE_16X8:
1844             mvs = 2;
1845             break;
1846         case MV_TYPE_8X8:
1847             mvs = 4;
1848             break;
1849         default:
1850             goto unhandled;
1851     }
1852
1853     for (i = 0; i < mvs; i++) {
1854         my = s->mv[dir][i][1]<<qpel_shift;
1855         my_max = FFMAX(my_max, my);
1856         my_min = FFMIN(my_min, my);
1857     }
1858
1859     off = (FFMAX(-my_min, my_max) + 63) >> 6;
1860
1861     return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1862 unhandled:
1863     return s->mb_height-1;
1864 }
1865
1866 /* put block[] to dest[] */
1867 static inline void put_dct(MpegEncContext *s,
1868                            DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1869 {
1870     s->dct_unquantize_intra(s, block, i, qscale);
1871     s->dsp.idct_put (dest, line_size, block);
1872 }
1873
1874 /* add block[] to dest[] */
1875 static inline void add_dct(MpegEncContext *s,
1876                            DCTELEM *block, int i, uint8_t *dest, int line_size)
1877 {
1878     if (s->block_last_index[i] >= 0) {
1879         s->dsp.idct_add (dest, line_size, block);
1880     }
1881 }
1882
1883 static inline void add_dequant_dct(MpegEncContext *s,
1884                            DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1885 {
1886     if (s->block_last_index[i] >= 0) {
1887         s->dct_unquantize_inter(s, block, i, qscale);
1888
1889         s->dsp.idct_add (dest, line_size, block);
1890     }
1891 }
1892
1893 /**
1894  * Clean dc, ac, coded_block for the current non-intra MB.
1895  */
1896 void ff_clean_intra_table_entries(MpegEncContext *s)
1897 {
1898     int wrap = s->b8_stride;
1899     int xy = s->block_index[0];
1900
1901     s->dc_val[0][xy           ] =
1902     s->dc_val[0][xy + 1       ] =
1903     s->dc_val[0][xy     + wrap] =
1904     s->dc_val[0][xy + 1 + wrap] = 1024;
1905     /* ac pred */
1906     memset(s->ac_val[0][xy       ], 0, 32 * sizeof(int16_t));
1907     memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1908     if (s->msmpeg4_version>=3) {
1909         s->coded_block[xy           ] =
1910         s->coded_block[xy + 1       ] =
1911         s->coded_block[xy     + wrap] =
1912         s->coded_block[xy + 1 + wrap] = 0;
1913     }
1914     /* chroma */
1915     wrap = s->mb_stride;
1916     xy = s->mb_x + s->mb_y * wrap;
1917     s->dc_val[1][xy] =
1918     s->dc_val[2][xy] = 1024;
1919     /* ac pred */
1920     memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1921     memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1922
1923     s->mbintra_table[xy]= 0;
1924 }
1925
1926 /* generic function called after a macroblock has been parsed by the
1927    decoder or after it has been encoded by the encoder.
1928
1929    Important variables used:
1930    s->mb_intra : true if intra macroblock
1931    s->mv_dir   : motion vector direction
1932    s->mv_type  : motion vector type
1933    s->mv       : motion vector
1934    s->interlaced_dct : true if interlaced dct used (mpeg2)
1935  */
1936 static av_always_inline
1937 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
1938                             int is_mpeg12)
1939 {
1940     const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1941     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1942         ff_xvmc_decode_mb(s);//xvmc uses pblocks
1943         return;
1944     }
1945
1946     if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1947        /* save DCT coefficients */
1948        int i,j;
1949        DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
1950        av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1951        for(i=0; i<6; i++){
1952            for(j=0; j<64; j++){
1953                *dct++ = block[i][s->dsp.idct_permutation[j]];
1954                av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
1955            }
1956            av_log(s->avctx, AV_LOG_DEBUG, "\n");
1957        }
1958     }
1959
1960     s->current_picture.f.qscale_table[mb_xy] = s->qscale;
1961
1962     /* update DC predictors for P macroblocks */
1963     if (!s->mb_intra) {
1964         if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1965             if(s->mbintra_table[mb_xy])
1966                 ff_clean_intra_table_entries(s);
1967         } else {
1968             s->last_dc[0] =
1969             s->last_dc[1] =
1970             s->last_dc[2] = 128 << s->intra_dc_precision;
1971         }
1972     }
1973     else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1974         s->mbintra_table[mb_xy]=1;
1975
1976     if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
1977         uint8_t *dest_y, *dest_cb, *dest_cr;
1978         int dct_linesize, dct_offset;
1979         op_pixels_func (*op_pix)[4];
1980         qpel_mc_func (*op_qpix)[16];
1981         const int linesize   = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
1982         const int uvlinesize = s->current_picture.f.linesize[1];
1983         const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
1984         const int block_size = 8;
1985
1986         /* avoid copy if macroblock skipped in last frame too */
1987         /* skip only during decoding as we might trash the buffers during encoding a bit */
1988         if(!s->encoding){
1989             uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
1990
1991             if (s->mb_skipped) {
1992                 s->mb_skipped= 0;
1993                 assert(s->pict_type!=AV_PICTURE_TYPE_I);
1994                 *mbskip_ptr = 1;
1995             } else if(!s->current_picture.f.reference) {
1996                 *mbskip_ptr = 1;
1997             } else{
1998                 *mbskip_ptr = 0; /* not skipped */
1999             }
2000         }
2001
2002         dct_linesize = linesize << s->interlaced_dct;
2003         dct_offset   = s->interlaced_dct ? linesize : linesize * block_size;
2004
2005         if(readable){
2006             dest_y=  s->dest[0];
2007             dest_cb= s->dest[1];
2008             dest_cr= s->dest[2];
2009         }else{
2010             dest_y = s->b_scratchpad;
2011             dest_cb= s->b_scratchpad+16*linesize;
2012             dest_cr= s->b_scratchpad+32*linesize;
2013         }
2014
2015         if (!s->mb_intra) {
2016             /* motion handling */
2017             /* decoding or more than one mb_type (MC was already done otherwise) */
2018             if(!s->encoding){
2019
2020                 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2021                     if (s->mv_dir & MV_DIR_FORWARD) {
2022                         ff_thread_await_progress(&s->last_picture_ptr->f,
2023                                                  ff_MPV_lowest_referenced_row(s, 0),
2024                                                  0);
2025                     }
2026                     if (s->mv_dir & MV_DIR_BACKWARD) {
2027                         ff_thread_await_progress(&s->next_picture_ptr->f,
2028                                                  ff_MPV_lowest_referenced_row(s, 1),
2029                                                  0);
2030                     }
2031                 }
2032
2033                 op_qpix= s->me.qpel_put;
2034                 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2035                     op_pix = s->dsp.put_pixels_tab;
2036                 }else{
2037                     op_pix = s->dsp.put_no_rnd_pixels_tab;
2038                 }
2039                 if (s->mv_dir & MV_DIR_FORWARD) {
2040                     ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2041                     op_pix = s->dsp.avg_pixels_tab;
2042                     op_qpix= s->me.qpel_avg;
2043                 }
2044                 if (s->mv_dir & MV_DIR_BACKWARD) {
2045                     ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2046                 }
2047             }
2048
2049             /* skip dequant / idct if we are really late ;) */
2050             if(s->avctx->skip_idct){
2051                 if(  (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2052                    ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2053                    || s->avctx->skip_idct >= AVDISCARD_ALL)
2054                     goto skip_idct;
2055             }
2056
2057             /* add dct residue */
2058             if(s->encoding || !(   s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2059                                 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2060                 add_dequant_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
2061                 add_dequant_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
2062                 add_dequant_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
2063                 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2064
2065                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2066                     if (s->chroma_y_shift){
2067                         add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2068                         add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2069                     }else{
2070                         dct_linesize >>= 1;
2071                         dct_offset >>=1;
2072                         add_dequant_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
2073                         add_dequant_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
2074                         add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2075                         add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2076                     }
2077                 }
2078             } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2079                 add_dct(s, block[0], 0, dest_y                          , dct_linesize);
2080                 add_dct(s, block[1], 1, dest_y              + block_size, dct_linesize);
2081                 add_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize);
2082                 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2083
2084                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2085                     if(s->chroma_y_shift){//Chroma420
2086                         add_dct(s, block[4], 4, dest_cb, uvlinesize);
2087                         add_dct(s, block[5], 5, dest_cr, uvlinesize);
2088                     }else{
2089                         //chroma422
2090                         dct_linesize = uvlinesize << s->interlaced_dct;
2091                         dct_offset   = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2092
2093                         add_dct(s, block[4], 4, dest_cb, dct_linesize);
2094                         add_dct(s, block[5], 5, dest_cr, dct_linesize);
2095                         add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2096                         add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2097                         if(!s->chroma_x_shift){//Chroma444
2098                             add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2099                             add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2100                             add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2101                             add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2102                         }
2103                     }
2104                 }//fi gray
2105             }
2106             else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2107                 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2108             }
2109         } else {
2110             /* dct only in intra block */
2111             if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2112                 put_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
2113                 put_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
2114                 put_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
2115                 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2116
2117                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2118                     if(s->chroma_y_shift){
2119                         put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2120                         put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2121                     }else{
2122                         dct_offset >>=1;
2123                         dct_linesize >>=1;
2124                         put_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
2125                         put_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
2126                         put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2127                         put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2128                     }
2129                 }
2130             }else{
2131                 s->dsp.idct_put(dest_y                          , dct_linesize, block[0]);
2132                 s->dsp.idct_put(dest_y              + block_size, dct_linesize, block[1]);
2133                 s->dsp.idct_put(dest_y + dct_offset             , dct_linesize, block[2]);
2134                 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2135
2136                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2137                     if(s->chroma_y_shift){
2138                         s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2139                         s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2140                     }else{
2141
2142                         dct_linesize = uvlinesize << s->interlaced_dct;
2143                         dct_offset   = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2144
2145                         s->dsp.idct_put(dest_cb,              dct_linesize, block[4]);
2146                         s->dsp.idct_put(dest_cr,              dct_linesize, block[5]);
2147                         s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2148                         s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2149                         if(!s->chroma_x_shift){//Chroma444
2150                             s->dsp.idct_put(dest_cb + 8,              dct_linesize, block[8]);
2151                             s->dsp.idct_put(dest_cr + 8,              dct_linesize, block[9]);
2152                             s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2153                             s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2154                         }
2155                     }
2156                 }//gray
2157             }
2158         }
2159 skip_idct:
2160         if(!readable){
2161             s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y ,   linesize,16);
2162             s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2163             s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2164         }
2165     }
2166 }
2167
2168 void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2169 #if !CONFIG_SMALL
2170     if(s->out_format == FMT_MPEG1) {
2171         MPV_decode_mb_internal(s, block, 1);
2172     } else
2173 #endif
2174         MPV_decode_mb_internal(s, block, 0);
2175 }
2176
2177 /**
2178  * @param h is the normal height, this will be reduced automatically if needed for the last row
2179  */
2180 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2181     const int field_pic= s->picture_structure != PICT_FRAME;
2182     if(field_pic){
2183         h <<= 1;
2184         y <<= 1;
2185     }
2186
2187     if (!s->avctx->hwaccel
2188        && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2189        && s->unrestricted_mv
2190        && s->current_picture.f.reference
2191        && !s->intra_only
2192        && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2193         int sides = 0, edge_h;
2194         int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
2195         int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
2196         if (y==0) sides |= EDGE_TOP;
2197         if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2198
2199         edge_h= FFMIN(h, s->v_edge_pos - y);
2200
2201         s->dsp.draw_edges(s->current_picture_ptr->f.data[0] +  y         *s->linesize,
2202                           s->linesize,           s->h_edge_pos,         edge_h,
2203                           EDGE_WIDTH,            EDGE_WIDTH,            sides);
2204         s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
2205                           s->uvlinesize,         s->h_edge_pos>>hshift, edge_h>>vshift,
2206                           EDGE_WIDTH>>hshift,    EDGE_WIDTH>>vshift,    sides);
2207         s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
2208                           s->uvlinesize,         s->h_edge_pos>>hshift, edge_h>>vshift,
2209                           EDGE_WIDTH>>hshift,    EDGE_WIDTH>>vshift,    sides);
2210     }
2211
2212     h= FFMIN(h, s->avctx->height - y);
2213
2214     if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2215
2216     if (s->avctx->draw_horiz_band) {
2217         AVFrame *src;
2218         int offset[AV_NUM_DATA_POINTERS];
2219         int i;
2220
2221         if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2222             src = &s->current_picture_ptr->f;
2223         else if(s->last_picture_ptr)
2224             src = &s->last_picture_ptr->f;
2225         else
2226             return;
2227
2228         if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2229             for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2230                 offset[i] = 0;
2231         }else{
2232             offset[0]= y * s->linesize;
2233             offset[1]=
2234             offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2235             for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2236                 offset[i] = 0;
2237         }
2238
2239         emms_c();
2240
2241         s->avctx->draw_horiz_band(s->avctx, src, offset,
2242                                   y, s->picture_structure, h);
2243     }
2244 }
2245
2246 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2247     const int linesize   = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2248     const int uvlinesize = s->current_picture.f.linesize[1];
2249     const int mb_size= 4;
2250
2251     s->block_index[0]= s->b8_stride*(s->mb_y*2    ) - 2 + s->mb_x*2;
2252     s->block_index[1]= s->b8_stride*(s->mb_y*2    ) - 1 + s->mb_x*2;
2253     s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2254     s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2255     s->block_index[4]= s->mb_stride*(s->mb_y + 1)                + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2256     s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2257     //block_index is not used by mpeg2, so it is not affected by chroma_format
2258
2259     s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) <<  mb_size);
2260     s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2261     s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2262
2263     if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2264     {
2265         if(s->picture_structure==PICT_FRAME){
2266         s->dest[0] += s->mb_y *   linesize << mb_size;
2267         s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2268         s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2269         }else{
2270             s->dest[0] += (s->mb_y>>1) *   linesize << mb_size;
2271             s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2272             s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2273             assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2274         }
2275     }
2276 }
2277
2278 void ff_mpeg_flush(AVCodecContext *avctx){
2279     int i;
2280     MpegEncContext *s = avctx->priv_data;
2281
2282     if(s==NULL || s->picture==NULL)
2283         return;
2284
2285     for(i=0; i<s->picture_count; i++){
2286        if (s->picture[i].f.data[0] &&
2287            (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2288             s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2289         free_frame_buffer(s, &s->picture[i]);
2290     }
2291     s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2292
2293     s->mb_x= s->mb_y= 0;
2294
2295     s->parse_context.state= -1;
2296     s->parse_context.frame_start_found= 0;
2297     s->parse_context.overread= 0;
2298     s->parse_context.overread_index= 0;
2299     s->parse_context.index= 0;
2300     s->parse_context.last_index= 0;
2301     s->bitstream_buffer_size=0;
2302     s->pp_time=0;
2303 }
2304
2305 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2306                                    DCTELEM *block, int n, int qscale)
2307 {
2308     int i, level, nCoeffs;
2309     const uint16_t *quant_matrix;
2310
2311     nCoeffs= s->block_last_index[n];
2312
2313     if (n < 4)
2314         block[0] = block[0] * s->y_dc_scale;
2315     else
2316         block[0] = block[0] * s->c_dc_scale;
2317     /* XXX: only mpeg1 */
2318     quant_matrix = s->intra_matrix;
2319     for(i=1;i<=nCoeffs;i++) {
2320         int j= s->intra_scantable.permutated[i];
2321         level = block[j];
2322         if (level) {
2323             if (level < 0) {
2324                 level = -level;
2325                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2326                 level = (level - 1) | 1;
2327                 level = -level;
2328             } else {
2329                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2330                 level = (level - 1) | 1;
2331             }
2332             block[j] = level;
2333         }
2334     }
2335 }
2336
2337 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2338                                    DCTELEM *block, int n, int qscale)
2339 {
2340     int i, level, nCoeffs;
2341     const uint16_t *quant_matrix;
2342
2343     nCoeffs= s->block_last_index[n];
2344
2345     quant_matrix = s->inter_matrix;
2346     for(i=0; i<=nCoeffs; i++) {
2347         int j= s->intra_scantable.permutated[i];
2348         level = block[j];
2349         if (level) {
2350             if (level < 0) {
2351                 level = -level;
2352                 level = (((level << 1) + 1) * qscale *
2353                          ((int) (quant_matrix[j]))) >> 4;
2354                 level = (level - 1) | 1;
2355                 level = -level;
2356             } else {
2357                 level = (((level << 1) + 1) * qscale *
2358                          ((int) (quant_matrix[j]))) >> 4;
2359                 level = (level - 1) | 1;
2360             }
2361             block[j] = level;
2362         }
2363     }
2364 }
2365
2366 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2367                                    DCTELEM *block, int n, int qscale)
2368 {
2369     int i, level, nCoeffs;
2370     const uint16_t *quant_matrix;
2371
2372     if(s->alternate_scan) nCoeffs= 63;
2373     else nCoeffs= s->block_last_index[n];
2374
2375     if (n < 4)
2376         block[0] = block[0] * s->y_dc_scale;
2377     else
2378         block[0] = block[0] * s->c_dc_scale;
2379     quant_matrix = s->intra_matrix;
2380     for(i=1;i<=nCoeffs;i++) {
2381         int j= s->intra_scantable.permutated[i];
2382         level = block[j];
2383         if (level) {
2384             if (level < 0) {
2385                 level = -level;
2386                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2387                 level = -level;
2388             } else {
2389                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2390             }
2391             block[j] = level;
2392         }
2393     }
2394 }
2395
2396 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2397                                    DCTELEM *block, int n, int qscale)
2398 {
2399     int i, level, nCoeffs;
2400     const uint16_t *quant_matrix;
2401     int sum=-1;
2402
2403     if(s->alternate_scan) nCoeffs= 63;
2404     else nCoeffs= s->block_last_index[n];
2405
2406     if (n < 4)
2407         block[0] = block[0] * s->y_dc_scale;
2408     else
2409         block[0] = block[0] * s->c_dc_scale;
2410     quant_matrix = s->intra_matrix;
2411     for(i=1;i<=nCoeffs;i++) {
2412         int j= s->intra_scantable.permutated[i];
2413         level = block[j];
2414         if (level) {
2415             if (level < 0) {
2416                 level = -level;
2417                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2418                 level = -level;
2419             } else {
2420                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2421             }
2422             block[j] = level;
2423             sum+=level;
2424         }
2425     }
2426     block[63]^=sum&1;
2427 }
2428
2429 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2430                                    DCTELEM *block, int n, int qscale)
2431 {
2432     int i, level, nCoeffs;
2433     const uint16_t *quant_matrix;
2434     int sum=-1;
2435
2436     if(s->alternate_scan) nCoeffs= 63;
2437     else nCoeffs= s->block_last_index[n];
2438
2439     quant_matrix = s->inter_matrix;
2440     for(i=0; i<=nCoeffs; i++) {
2441         int j= s->intra_scantable.permutated[i];
2442         level = block[j];
2443         if (level) {
2444             if (level < 0) {
2445                 level = -level;
2446                 level = (((level << 1) + 1) * qscale *
2447                          ((int) (quant_matrix[j]))) >> 4;
2448                 level = -level;
2449             } else {
2450                 level = (((level << 1) + 1) * qscale *
2451                          ((int) (quant_matrix[j]))) >> 4;
2452             }
2453             block[j] = level;
2454             sum+=level;
2455         }
2456     }
2457     block[63]^=sum&1;
2458 }
2459
2460 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2461                                   DCTELEM *block, int n, int qscale)
2462 {
2463     int i, level, qmul, qadd;
2464     int nCoeffs;
2465
2466     assert(s->block_last_index[n]>=0);
2467
2468     qmul = qscale << 1;
2469
2470     if (!s->h263_aic) {
2471         if (n < 4)
2472             block[0] = block[0] * s->y_dc_scale;
2473         else
2474             block[0] = block[0] * s->c_dc_scale;
2475         qadd = (qscale - 1) | 1;
2476     }else{
2477         qadd = 0;
2478     }
2479     if(s->ac_pred)
2480         nCoeffs=63;
2481     else
2482         nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2483
2484     for(i=1; i<=nCoeffs; i++) {
2485         level = block[i];
2486         if (level) {
2487             if (level < 0) {
2488                 level = level * qmul - qadd;
2489             } else {
2490                 level = level * qmul + qadd;
2491             }
2492             block[i] = level;
2493         }
2494     }
2495 }
2496
2497 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2498                                   DCTELEM *block, int n, int qscale)
2499 {
2500     int i, level, qmul, qadd;
2501     int nCoeffs;
2502
2503     assert(s->block_last_index[n]>=0);
2504
2505     qadd = (qscale - 1) | 1;
2506     qmul = qscale << 1;
2507
2508     nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2509
2510     for(i=0; i<=nCoeffs; i++) {
2511         level = block[i];
2512         if (level) {
2513             if (level < 0) {
2514                 level = level * qmul - qadd;
2515             } else {
2516                 level = level * qmul + qadd;
2517             }
2518             block[i] = level;
2519         }
2520     }
2521 }
2522
2523 /**
2524  * set qscale and update qscale dependent variables.
2525  */
2526 void ff_set_qscale(MpegEncContext * s, int qscale)
2527 {
2528     if (qscale < 1)
2529         qscale = 1;
2530     else if (qscale > 31)
2531         qscale = 31;
2532
2533     s->qscale = qscale;
2534     s->chroma_qscale= s->chroma_qscale_table[qscale];
2535
2536     s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2537     s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2538 }
2539
2540 void ff_MPV_report_decode_progress(MpegEncContext *s)
2541 {
2542     if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
2543         ff_thread_report_progress(&s->current_picture_ptr->f, s->mb_y, 0);
2544 }