718df8b63fe52df51a34dc826785efa9874ad48a
[ffmpeg.git] / libavcodec / mpegvideo.c
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of Libav.
9  *
10  * Libav is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * Libav is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with Libav; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
32 #include "avcodec.h"
33 #include "dsputil.h"
34 #include "internal.h"
35 #include "mpegvideo.h"
36 #include "mjpegenc.h"
37 #include "msmpeg4.h"
38 #include "xvmc_internal.h"
39 #include "thread.h"
40 #include <limits.h>
41
42 //#undef NDEBUG
43 //#include <assert.h>
44
45 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
46                                    DCTELEM *block, int n, int qscale);
47 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
48                                    DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
50                                    DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
52                                    DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
54                                    DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
56                                   DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
58                                   DCTELEM *block, int n, int qscale);
59
60
61 /* enable all paranoid tests for rounding, overflows, etc... */
62 //#define PARANOID
63
64 //#define DEBUG
65
66
67 static const uint8_t ff_default_chroma_qscale_table[32] = {
68 //   0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15
69      0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
70     16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
71 };
72
73 const uint8_t ff_mpeg1_dc_scale_table[128] = {
74 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
75     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
83 };
84
85 static const uint8_t mpeg2_dc_scale_table1[128] = {
86 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
87     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
95 };
96
97 static const uint8_t mpeg2_dc_scale_table2[128] = {
98 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
99     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
101     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
107 };
108
109 static const uint8_t mpeg2_dc_scale_table3[128] = {
110 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
111     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
112     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
113     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
119 };
120
121 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
122     ff_mpeg1_dc_scale_table,
123     mpeg2_dc_scale_table1,
124     mpeg2_dc_scale_table2,
125     mpeg2_dc_scale_table3,
126 };
127
128 const enum PixelFormat ff_pixfmt_list_420[] = {
129     PIX_FMT_YUV420P,
130     PIX_FMT_NONE
131 };
132
133 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
134     PIX_FMT_DXVA2_VLD,
135     PIX_FMT_VAAPI_VLD,
136     PIX_FMT_VDA_VLD,
137     PIX_FMT_YUV420P,
138     PIX_FMT_NONE
139 };
140
141 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
142                                           const uint8_t *end,
143                                           uint32_t * restrict state)
144 {
145     int i;
146
147     assert(p <= end);
148     if (p >= end)
149         return end;
150
151     for (i = 0; i < 3; i++) {
152         uint32_t tmp = *state << 8;
153         *state = tmp + *(p++);
154         if (tmp == 0x100 || p == end)
155             return p;
156     }
157
158     while (p < end) {
159         if      (p[-1] > 1      ) p += 3;
160         else if (p[-2]          ) p += 2;
161         else if (p[-3]|(p[-1]-1)) p++;
162         else {
163             p++;
164             break;
165         }
166     }
167
168     p = FFMIN(p, end) - 4;
169     *state = AV_RB32(p);
170
171     return p + 4;
172 }
173
174 /* init common dct for both encoder and decoder */
175 av_cold int ff_dct_common_init(MpegEncContext *s)
176 {
177     ff_dsputil_init(&s->dsp, s->avctx);
178
179     s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
180     s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
181     s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
182     s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
183     s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
184     if (s->flags & CODEC_FLAG_BITEXACT)
185         s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
186     s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
187
188 #if ARCH_X86
189     ff_MPV_common_init_x86(s);
190 #elif ARCH_ALPHA
191     ff_MPV_common_init_axp(s);
192 #elif HAVE_MMI
193     ff_MPV_common_init_mmi(s);
194 #elif ARCH_ARM
195     ff_MPV_common_init_arm(s);
196 #elif HAVE_ALTIVEC
197     ff_MPV_common_init_altivec(s);
198 #elif ARCH_BFIN
199     ff_MPV_common_init_bfin(s);
200 #endif
201
202     /* load & permutate scantables
203      * note: only wmv uses different ones
204      */
205     if (s->alternate_scan) {
206         ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_alternate_vertical_scan);
207         ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_alternate_vertical_scan);
208     } else {
209         ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_zigzag_direct);
210         ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_zigzag_direct);
211     }
212     ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
213     ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
214
215     return 0;
216 }
217
218 void ff_copy_picture(Picture *dst, Picture *src)
219 {
220     *dst = *src;
221     dst->f.type = FF_BUFFER_TYPE_COPY;
222 }
223
224 /**
225  * Release a frame buffer
226  */
227 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
228 {
229     /* WM Image / Screen codecs allocate internal buffers with different
230      * dimensions / colorspaces; ignore user-defined callbacks for these. */
231     if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
232         s->codec_id != AV_CODEC_ID_VC1IMAGE  &&
233         s->codec_id != AV_CODEC_ID_MSS2)
234         ff_thread_release_buffer(s->avctx, &pic->f);
235     else
236         avcodec_default_release_buffer(s->avctx, &pic->f);
237     av_freep(&pic->f.hwaccel_picture_private);
238 }
239
240 /**
241  * Allocate a frame buffer
242  */
243 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
244 {
245     int r;
246
247     if (s->avctx->hwaccel) {
248         assert(!pic->f.hwaccel_picture_private);
249         if (s->avctx->hwaccel->priv_data_size) {
250             pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
251             if (!pic->f.hwaccel_picture_private) {
252                 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
253                 return -1;
254             }
255         }
256     }
257
258     if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
259         s->codec_id != AV_CODEC_ID_VC1IMAGE  &&
260         s->codec_id != AV_CODEC_ID_MSS2)
261         r = ff_thread_get_buffer(s->avctx, &pic->f);
262     else
263         r = avcodec_default_get_buffer(s->avctx, &pic->f);
264
265     if (r < 0 || !pic->f.type || !pic->f.data[0]) {
266         av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
267                r, pic->f.type, pic->f.data[0]);
268         av_freep(&pic->f.hwaccel_picture_private);
269         return -1;
270     }
271
272     if (s->linesize && (s->linesize   != pic->f.linesize[0] ||
273                         s->uvlinesize != pic->f.linesize[1])) {
274         av_log(s->avctx, AV_LOG_ERROR,
275                "get_buffer() failed (stride changed)\n");
276         free_frame_buffer(s, pic);
277         return -1;
278     }
279
280     if (pic->f.linesize[1] != pic->f.linesize[2]) {
281         av_log(s->avctx, AV_LOG_ERROR,
282                "get_buffer() failed (uv stride mismatch)\n");
283         free_frame_buffer(s, pic);
284         return -1;
285     }
286
287     return 0;
288 }
289
290 /**
291  * Allocate a Picture.
292  * The pixels are allocated/set by calling get_buffer() if shared = 0
293  */
294 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
295 {
296     const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
297
298     // the + 1 is needed so memset(,,stride*height) does not sig11
299
300     const int mb_array_size = s->mb_stride * s->mb_height;
301     const int b8_array_size = s->b8_stride * s->mb_height * 2;
302     const int b4_array_size = s->b4_stride * s->mb_height * 4;
303     int i;
304     int r = -1;
305
306     if (shared) {
307         assert(pic->f.data[0]);
308         assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
309         pic->f.type = FF_BUFFER_TYPE_SHARED;
310     } else {
311         assert(!pic->f.data[0]);
312
313         if (alloc_frame_buffer(s, pic) < 0)
314             return -1;
315
316         s->linesize   = pic->f.linesize[0];
317         s->uvlinesize = pic->f.linesize[1];
318     }
319
320     if (pic->f.qscale_table == NULL) {
321         if (s->encoding) {
322             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
323                               mb_array_size * sizeof(int16_t), fail)
324             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
325                               mb_array_size * sizeof(int16_t), fail)
326             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
327                               mb_array_size * sizeof(int8_t ), fail)
328         }
329
330         FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
331                           mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
332         FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
333                           (big_mb_num + s->mb_stride) * sizeof(uint8_t),
334                           fail)
335         FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
336                           (big_mb_num + s->mb_stride) * sizeof(uint32_t),
337                           fail)
338         pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
339         pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
340         if (s->out_format == FMT_H264) {
341             for (i = 0; i < 2; i++) {
342                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
343                                   2 * (b4_array_size + 4) * sizeof(int16_t),
344                                   fail)
345                 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
346                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
347                                   4 * mb_array_size * sizeof(uint8_t), fail)
348             }
349             pic->f.motion_subsample_log2 = 2;
350         } else if (s->out_format == FMT_H263 || s->encoding ||
351                    (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
352             for (i = 0; i < 2; i++) {
353                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
354                                   2 * (b8_array_size + 4) * sizeof(int16_t),
355                                   fail)
356                 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
357                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
358                                   4 * mb_array_size * sizeof(uint8_t), fail)
359             }
360             pic->f.motion_subsample_log2 = 3;
361         }
362         if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
363             FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
364                               64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
365         }
366         pic->f.qstride = s->mb_stride;
367         FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
368                           1 * sizeof(AVPanScan), fail)
369     }
370
371     pic->owner2 = s;
372
373     return 0;
374 fail: // for  the FF_ALLOCZ_OR_GOTO macro
375     if (r >= 0)
376         free_frame_buffer(s, pic);
377     return -1;
378 }
379
380 /**
381  * Deallocate a picture.
382  */
383 static void free_picture(MpegEncContext *s, Picture *pic)
384 {
385     int i;
386
387     if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
388         free_frame_buffer(s, pic);
389     }
390
391     av_freep(&pic->mb_var);
392     av_freep(&pic->mc_mb_var);
393     av_freep(&pic->mb_mean);
394     av_freep(&pic->f.mbskip_table);
395     av_freep(&pic->qscale_table_base);
396     av_freep(&pic->mb_type_base);
397     av_freep(&pic->f.dct_coeff);
398     av_freep(&pic->f.pan_scan);
399     pic->f.mb_type = NULL;
400     for (i = 0; i < 2; i++) {
401         av_freep(&pic->motion_val_base[i]);
402         av_freep(&pic->f.ref_index[i]);
403     }
404
405     if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
406         for (i = 0; i < 4; i++) {
407             pic->f.base[i] =
408             pic->f.data[i] = NULL;
409         }
410         pic->f.type = 0;
411     }
412 }
413
414 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
415 {
416     int y_size = s->b8_stride * (2 * s->mb_height + 1);
417     int c_size = s->mb_stride * (s->mb_height + 1);
418     int yc_size = y_size + 2 * c_size;
419     int i;
420
421     // edge emu needs blocksize + filter length - 1
422     // (= 17x17 for  halfpel / 21x21 for  h264)
423     FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
424                       (s->width + 64) * 2 * 21 * 2, fail);    // (width + edge + align)*interlaced*MBsize*tolerance
425
426     // FIXME should be linesize instead of s->width * 2
427     // but that is not known before get_buffer()
428     FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
429                       (s->width + 64) * 4 * 16 * 2 * sizeof(uint8_t), fail)
430     s->me.temp         = s->me.scratchpad;
431     s->rd_scratchpad   = s->me.scratchpad;
432     s->b_scratchpad    = s->me.scratchpad;
433     s->obmc_scratchpad = s->me.scratchpad + 16;
434     if (s->encoding) {
435         FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
436                           ME_MAP_SIZE * sizeof(uint32_t), fail)
437         FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
438                           ME_MAP_SIZE * sizeof(uint32_t), fail)
439         if (s->avctx->noise_reduction) {
440             FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
441                               2 * 64 * sizeof(int), fail)
442         }
443     }
444     FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
445     s->block = s->blocks[0];
446
447     for (i = 0; i < 12; i++) {
448         s->pblocks[i] = &s->block[i];
449     }
450
451     if (s->out_format == FMT_H263) {
452         /* ac values */
453         FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
454                           yc_size * sizeof(int16_t) * 16, fail);
455         s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
456         s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
457         s->ac_val[2] = s->ac_val[1] + c_size;
458     }
459
460     return 0;
461 fail:
462     return -1; // free() through ff_MPV_common_end()
463 }
464
465 static void free_duplicate_context(MpegEncContext *s)
466 {
467     if (s == NULL)
468         return;
469
470     av_freep(&s->edge_emu_buffer);
471     av_freep(&s->me.scratchpad);
472     s->me.temp =
473     s->rd_scratchpad =
474     s->b_scratchpad =
475     s->obmc_scratchpad = NULL;
476
477     av_freep(&s->dct_error_sum);
478     av_freep(&s->me.map);
479     av_freep(&s->me.score_map);
480     av_freep(&s->blocks);
481     av_freep(&s->ac_val_base);
482     s->block = NULL;
483 }
484
485 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
486 {
487 #define COPY(a) bak->a = src->a
488     COPY(edge_emu_buffer);
489     COPY(me.scratchpad);
490     COPY(me.temp);
491     COPY(rd_scratchpad);
492     COPY(b_scratchpad);
493     COPY(obmc_scratchpad);
494     COPY(me.map);
495     COPY(me.score_map);
496     COPY(blocks);
497     COPY(block);
498     COPY(start_mb_y);
499     COPY(end_mb_y);
500     COPY(me.map_generation);
501     COPY(pb);
502     COPY(dct_error_sum);
503     COPY(dct_count[0]);
504     COPY(dct_count[1]);
505     COPY(ac_val_base);
506     COPY(ac_val[0]);
507     COPY(ac_val[1]);
508     COPY(ac_val[2]);
509 #undef COPY
510 }
511
512 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
513 {
514     MpegEncContext bak;
515     int i;
516     // FIXME copy only needed parts
517     // START_TIMER
518     backup_duplicate_context(&bak, dst);
519     memcpy(dst, src, sizeof(MpegEncContext));
520     backup_duplicate_context(dst, &bak);
521     for (i = 0; i < 12; i++) {
522         dst->pblocks[i] = &dst->block[i];
523     }
524     // STOP_TIMER("update_duplicate_context")
525     // about 10k cycles / 0.01 sec for  1000frames on 1ghz with 2 threads
526 }
527
528 int ff_mpeg_update_thread_context(AVCodecContext *dst,
529                                   const AVCodecContext *src)
530 {
531     MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
532
533     if (dst == src || !s1->context_initialized)
534         return 0;
535
536     // FIXME can parameters change on I-frames?
537     // in that case dst may need a reinit
538     if (!s->context_initialized) {
539         memcpy(s, s1, sizeof(MpegEncContext));
540
541         s->avctx                 = dst;
542         s->picture_range_start  += MAX_PICTURE_COUNT;
543         s->picture_range_end    += MAX_PICTURE_COUNT;
544         s->bitstream_buffer      = NULL;
545         s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
546
547         ff_MPV_common_init(s);
548     }
549
550     s->avctx->coded_height  = s1->avctx->coded_height;
551     s->avctx->coded_width   = s1->avctx->coded_width;
552     s->avctx->width         = s1->avctx->width;
553     s->avctx->height        = s1->avctx->height;
554
555     s->coded_picture_number = s1->coded_picture_number;
556     s->picture_number       = s1->picture_number;
557     s->input_picture_number = s1->input_picture_number;
558
559     memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
560     memcpy(&s->last_picture, &s1->last_picture,
561            (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
562
563     s->last_picture_ptr    = REBASE_PICTURE(s1->last_picture_ptr,    s, s1);
564     s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
565     s->next_picture_ptr    = REBASE_PICTURE(s1->next_picture_ptr,    s, s1);
566
567     // Error/bug resilience
568     s->next_p_frame_damaged = s1->next_p_frame_damaged;
569     s->workaround_bugs      = s1->workaround_bugs;
570
571     // MPEG4 timing info
572     memcpy(&s->time_increment_bits, &s1->time_increment_bits,
573            (char *) &s1->shape - (char *) &s1->time_increment_bits);
574
575     // B-frame info
576     s->max_b_frames = s1->max_b_frames;
577     s->low_delay    = s1->low_delay;
578     s->dropable     = s1->dropable;
579
580     // DivX handling (doesn't work)
581     s->divx_packed  = s1->divx_packed;
582
583     if (s1->bitstream_buffer) {
584         if (s1->bitstream_buffer_size +
585             FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
586             av_fast_malloc(&s->bitstream_buffer,
587                            &s->allocated_bitstream_buffer_size,
588                            s1->allocated_bitstream_buffer_size);
589             s->bitstream_buffer_size = s1->bitstream_buffer_size;
590         memcpy(s->bitstream_buffer, s1->bitstream_buffer,
591                s1->bitstream_buffer_size);
592         memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
593                FF_INPUT_BUFFER_PADDING_SIZE);
594     }
595
596     // MPEG2/interlacing info
597     memcpy(&s->progressive_sequence, &s1->progressive_sequence,
598            (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
599
600     if (!s1->first_field) {
601         s->last_pict_type = s1->pict_type;
602         if (s1->current_picture_ptr)
603             s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
604
605         if (s1->pict_type != AV_PICTURE_TYPE_B) {
606             s->last_non_b_pict_type = s1->pict_type;
607         }
608     }
609
610     return 0;
611 }
612
613 /**
614  * Set the given MpegEncContext to common defaults
615  * (same for encoding and decoding).
616  * The changed fields will not depend upon the
617  * prior state of the MpegEncContext.
618  */
619 void ff_MPV_common_defaults(MpegEncContext *s)
620 {
621     s->y_dc_scale_table      =
622     s->c_dc_scale_table      = ff_mpeg1_dc_scale_table;
623     s->chroma_qscale_table   = ff_default_chroma_qscale_table;
624     s->progressive_frame     = 1;
625     s->progressive_sequence  = 1;
626     s->picture_structure     = PICT_FRAME;
627
628     s->coded_picture_number  = 0;
629     s->picture_number        = 0;
630     s->input_picture_number  = 0;
631
632     s->picture_in_gop_number = 0;
633
634     s->f_code                = 1;
635     s->b_code                = 1;
636
637     s->picture_range_start   = 0;
638     s->picture_range_end     = MAX_PICTURE_COUNT;
639
640     s->slice_context_count   = 1;
641 }
642
643 /**
644  * Set the given MpegEncContext to defaults for decoding.
645  * the changed fields will not depend upon
646  * the prior state of the MpegEncContext.
647  */
648 void ff_MPV_decode_defaults(MpegEncContext *s)
649 {
650     ff_MPV_common_defaults(s);
651 }
652
653 /**
654  * init common structure for both encoder and decoder.
655  * this assumes that some variables like width/height are already set
656  */
657 av_cold int ff_MPV_common_init(MpegEncContext *s)
658 {
659     int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
660     int nb_slices = (HAVE_THREADS &&
661                      s->avctx->active_thread_type & FF_THREAD_SLICE) ?
662                     s->avctx->thread_count : 1;
663
664     if (s->encoding && s->avctx->slices)
665         nb_slices = s->avctx->slices;
666
667     if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
668         s->mb_height = (s->height + 31) / 32 * 2;
669     else if (s->codec_id != AV_CODEC_ID_H264)
670         s->mb_height = (s->height + 15) / 16;
671
672     if (s->avctx->pix_fmt == PIX_FMT_NONE) {
673         av_log(s->avctx, AV_LOG_ERROR,
674                "decoding to PIX_FMT_NONE is not supported.\n");
675         return -1;
676     }
677
678     if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
679         int max_slices;
680         if (s->mb_height)
681             max_slices = FFMIN(MAX_THREADS, s->mb_height);
682         else
683             max_slices = MAX_THREADS;
684         av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
685                " reducing to %d\n", nb_slices, max_slices);
686         nb_slices = max_slices;
687     }
688
689     if ((s->width || s->height) &&
690         av_image_check_size(s->width, s->height, 0, s->avctx))
691         return -1;
692
693     ff_dct_common_init(s);
694
695     s->flags  = s->avctx->flags;
696     s->flags2 = s->avctx->flags2;
697
698     if (s->width && s->height) {
699         s->mb_width   = (s->width + 15) / 16;
700         s->mb_stride  = s->mb_width + 1;
701         s->b8_stride  = s->mb_width * 2 + 1;
702         s->b4_stride  = s->mb_width * 4 + 1;
703         mb_array_size = s->mb_height * s->mb_stride;
704         mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
705
706         /* set chroma shifts */
707         avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift,
708                                       &s->chroma_y_shift);
709
710         /* set default edge pos, will be overriden
711          * in decode_header if needed */
712         s->h_edge_pos = s->mb_width * 16;
713         s->v_edge_pos = s->mb_height * 16;
714
715         s->mb_num     = s->mb_width * s->mb_height;
716
717         s->block_wrap[0] =
718         s->block_wrap[1] =
719         s->block_wrap[2] =
720         s->block_wrap[3] = s->b8_stride;
721         s->block_wrap[4] =
722         s->block_wrap[5] = s->mb_stride;
723
724         y_size  = s->b8_stride * (2 * s->mb_height + 1);
725         c_size  = s->mb_stride * (s->mb_height + 1);
726         yc_size = y_size + 2   * c_size;
727
728         /* convert fourcc to upper case */
729         s->codec_tag          = avpriv_toupper4(s->avctx->codec_tag);
730
731         s->stream_codec_tag   = avpriv_toupper4(s->avctx->stream_codec_tag);
732
733         s->avctx->coded_frame = &s->current_picture.f;
734
735         FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
736                           fail); // error ressilience code looks cleaner with this
737         for (y = 0; y < s->mb_height; y++)
738             for (x = 0; x < s->mb_width; x++)
739                 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
740
741         s->mb_index2xy[s->mb_height * s->mb_width] =
742                        (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
743
744         if (s->encoding) {
745             /* Allocate MV tables */
746             FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
747                               mv_table_size * 2 * sizeof(int16_t), fail);
748             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
749                               mv_table_size * 2 * sizeof(int16_t), fail);
750             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
751                               mv_table_size * 2 * sizeof(int16_t), fail);
752             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
753                               mv_table_size * 2 * sizeof(int16_t), fail);
754             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
755                               mv_table_size * 2 * sizeof(int16_t), fail);
756             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
757                               mv_table_size * 2 * sizeof(int16_t), fail);
758             s->p_mv_table            = s->p_mv_table_base +
759                                        s->mb_stride + 1;
760             s->b_forw_mv_table       = s->b_forw_mv_table_base +
761                                        s->mb_stride + 1;
762             s->b_back_mv_table       = s->b_back_mv_table_base +
763                                        s->mb_stride + 1;
764             s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
765                                        s->mb_stride + 1;
766             s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
767                                        s->mb_stride + 1;
768             s->b_direct_mv_table     = s->b_direct_mv_table_base +
769                                        s->mb_stride + 1;
770
771             if (s->msmpeg4_version) {
772                 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
773                                   2 * 2 * (MAX_LEVEL + 1) *
774                                   (MAX_RUN + 1) * 2 * sizeof(int), fail);
775             }
776             FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
777
778             /* Allocate MB type table */
779             FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
780                               sizeof(uint16_t), fail); // needed for encoding
781
782             FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
783                               sizeof(int), fail);
784
785             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
786                               64 * 32   * sizeof(int), fail);
787             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
788                               64 * 32   * sizeof(int), fail);
789             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
790                               64 * 32 * 2 * sizeof(uint16_t), fail);
791             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
792                               64 * 32 * 2 * sizeof(uint16_t), fail);
793             FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
794                               MAX_PICTURE_COUNT * sizeof(Picture *), fail);
795             FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
796                               MAX_PICTURE_COUNT * sizeof(Picture *), fail);
797
798             if (s->avctx->noise_reduction) {
799                 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
800                                   2 * 64 * sizeof(uint16_t), fail);
801             }
802
803             FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
804                              mb_array_size * sizeof(float), fail);
805             FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
806                              mb_array_size * sizeof(float), fail);
807         }
808     }
809
810     s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
811     FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
812                       s->picture_count * sizeof(Picture), fail);
813     for (i = 0; i < s->picture_count; i++) {
814         avcodec_get_frame_defaults(&s->picture[i].f);
815     }
816
817     if (s->width && s->height) {
818         FF_ALLOC_OR_GOTO(s->avctx, s->er_temp_buffer,
819                          mb_array_size * sizeof(uint8_t), fail);
820         FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
821                           mb_array_size * sizeof(uint8_t), fail);
822
823         if (s->codec_id == AV_CODEC_ID_MPEG4 ||
824             (s->flags & CODEC_FLAG_INTERLACED_ME)) {
825             /* interlaced direct mode decoding tables */
826             for (i = 0; i < 2; i++) {
827                 int j, k;
828                 for (j = 0; j < 2; j++) {
829                     for (k = 0; k < 2; k++) {
830                         FF_ALLOCZ_OR_GOTO(s->avctx,
831                                           s->b_field_mv_table_base[i][j][k],
832                                           mv_table_size * 2 * sizeof(int16_t),
833                                           fail);
834                         s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
835                                                        s->mb_stride + 1;
836                     }
837                     FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
838                                       mb_array_size * 2 * sizeof(uint8_t),
839                                       fail);
840                     FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
841                                       mv_table_size * 2 * sizeof(int16_t),
842                                       fail);
843                     s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
844                                                 + s->mb_stride + 1;
845                 }
846                 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
847                                   mb_array_size * 2 * sizeof(uint8_t),
848                                   fail);
849             }
850         }
851         if (s->out_format == FMT_H263) {
852             /* cbp values */
853             FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
854             s->coded_block = s->coded_block_base + s->b8_stride + 1;
855
856             /* cbp, ac_pred, pred_dir */
857             FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
858                               mb_array_size * sizeof(uint8_t), fail);
859             FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
860                               mb_array_size * sizeof(uint8_t), fail);
861         }
862
863         if (s->h263_pred || s->h263_plus || !s->encoding) {
864             /* dc values */
865             // MN: we need these for  error resilience of intra-frames
866             FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
867                               yc_size * sizeof(int16_t), fail);
868             s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
869             s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
870             s->dc_val[2] = s->dc_val[1] + c_size;
871             for (i = 0; i < yc_size; i++)
872                 s->dc_val_base[i] = 1024;
873         }
874
875         /* which mb is a intra block */
876         FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
877         memset(s->mbintra_table, 1, mb_array_size);
878
879         /* init macroblock skip table */
880         FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
881         // Note the + 1 is for  a quicker mpeg4 slice_end detection
882
883         s->parse_context.state = -1;
884         if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
885             s->avctx->debug_mv) {
886             s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
887                         2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
888             s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
889                         2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
890             s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
891                         2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
892         }
893     }
894
895     s->context_initialized = 1;
896     s->thread_context[0]   = s;
897
898     if (s->width && s->height) {
899         if (nb_slices > 1) {
900             for (i = 1; i < nb_slices; i++) {
901                 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
902                 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
903             }
904
905             for (i = 0; i < nb_slices; i++) {
906                 if (init_duplicate_context(s->thread_context[i], s) < 0)
907                     goto fail;
908                     s->thread_context[i]->start_mb_y =
909                         (s->mb_height * (i) + nb_slices / 2) / nb_slices;
910                     s->thread_context[i]->end_mb_y   =
911                         (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
912             }
913         } else {
914             if (init_duplicate_context(s, s) < 0)
915                 goto fail;
916             s->start_mb_y = 0;
917             s->end_mb_y   = s->mb_height;
918         }
919         s->slice_context_count = nb_slices;
920     }
921
922     return 0;
923  fail:
924     ff_MPV_common_end(s);
925     return -1;
926 }
927
928 /* init common structure for both encoder and decoder */
929 void ff_MPV_common_end(MpegEncContext *s)
930 {
931     int i, j, k;
932
933     if (s->slice_context_count > 1) {
934         for (i = 0; i < s->slice_context_count; i++) {
935             free_duplicate_context(s->thread_context[i]);
936         }
937         for (i = 1; i < s->slice_context_count; i++) {
938             av_freep(&s->thread_context[i]);
939         }
940         s->slice_context_count = 1;
941     } else free_duplicate_context(s);
942
943     av_freep(&s->parse_context.buffer);
944     s->parse_context.buffer_size = 0;
945
946     av_freep(&s->mb_type);
947     av_freep(&s->p_mv_table_base);
948     av_freep(&s->b_forw_mv_table_base);
949     av_freep(&s->b_back_mv_table_base);
950     av_freep(&s->b_bidir_forw_mv_table_base);
951     av_freep(&s->b_bidir_back_mv_table_base);
952     av_freep(&s->b_direct_mv_table_base);
953     s->p_mv_table            = NULL;
954     s->b_forw_mv_table       = NULL;
955     s->b_back_mv_table       = NULL;
956     s->b_bidir_forw_mv_table = NULL;
957     s->b_bidir_back_mv_table = NULL;
958     s->b_direct_mv_table     = NULL;
959     for (i = 0; i < 2; i++) {
960         for (j = 0; j < 2; j++) {
961             for (k = 0; k < 2; k++) {
962                 av_freep(&s->b_field_mv_table_base[i][j][k]);
963                 s->b_field_mv_table[i][j][k] = NULL;
964             }
965             av_freep(&s->b_field_select_table[i][j]);
966             av_freep(&s->p_field_mv_table_base[i][j]);
967             s->p_field_mv_table[i][j] = NULL;
968         }
969         av_freep(&s->p_field_select_table[i]);
970     }
971
972     av_freep(&s->dc_val_base);
973     av_freep(&s->coded_block_base);
974     av_freep(&s->mbintra_table);
975     av_freep(&s->cbp_table);
976     av_freep(&s->pred_dir_table);
977
978     av_freep(&s->mbskip_table);
979     av_freep(&s->bitstream_buffer);
980     s->allocated_bitstream_buffer_size = 0;
981
982     av_freep(&s->avctx->stats_out);
983     av_freep(&s->ac_stats);
984     av_freep(&s->error_status_table);
985     av_freep(&s->er_temp_buffer);
986     av_freep(&s->mb_index2xy);
987     av_freep(&s->lambda_table);
988     av_freep(&s->q_intra_matrix);
989     av_freep(&s->q_inter_matrix);
990     av_freep(&s->q_intra_matrix16);
991     av_freep(&s->q_inter_matrix16);
992     av_freep(&s->input_picture);
993     av_freep(&s->reordered_input_picture);
994     av_freep(&s->dct_offset);
995     av_freep(&s->cplx_tab);
996     av_freep(&s->bits_tab);
997
998     if (s->picture && !s->avctx->internal->is_copy) {
999         for (i = 0; i < s->picture_count; i++) {
1000             free_picture(s, &s->picture[i]);
1001         }
1002     }
1003     av_freep(&s->picture);
1004     s->context_initialized      = 0;
1005     s->last_picture_ptr         =
1006     s->next_picture_ptr         =
1007     s->current_picture_ptr      = NULL;
1008     s->linesize = s->uvlinesize = 0;
1009
1010     for (i = 0; i < 3; i++)
1011         av_freep(&s->visualization_buffer[i]);
1012
1013     if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
1014         avcodec_default_free_buffers(s->avctx);
1015 }
1016
1017 void ff_init_rl(RLTable *rl,
1018                 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1019 {
1020     int8_t  max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1021     uint8_t index_run[MAX_RUN + 1];
1022     int last, run, level, start, end, i;
1023
1024     /* If table is static, we can quit if rl->max_level[0] is not NULL */
1025     if (static_store && rl->max_level[0])
1026         return;
1027
1028     /* compute max_level[], max_run[] and index_run[] */
1029     for (last = 0; last < 2; last++) {
1030         if (last == 0) {
1031             start = 0;
1032             end = rl->last;
1033         } else {
1034             start = rl->last;
1035             end = rl->n;
1036         }
1037
1038         memset(max_level, 0, MAX_RUN + 1);
1039         memset(max_run, 0, MAX_LEVEL + 1);
1040         memset(index_run, rl->n, MAX_RUN + 1);
1041         for (i = start; i < end; i++) {
1042             run   = rl->table_run[i];
1043             level = rl->table_level[i];
1044             if (index_run[run] == rl->n)
1045                 index_run[run] = i;
1046             if (level > max_level[run])
1047                 max_level[run] = level;
1048             if (run > max_run[level])
1049                 max_run[level] = run;
1050         }
1051         if (static_store)
1052             rl->max_level[last] = static_store[last];
1053         else
1054             rl->max_level[last] = av_malloc(MAX_RUN + 1);
1055         memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1056         if (static_store)
1057             rl->max_run[last]   = static_store[last] + MAX_RUN + 1;
1058         else
1059             rl->max_run[last]   = av_malloc(MAX_LEVEL + 1);
1060         memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1061         if (static_store)
1062             rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1063         else
1064             rl->index_run[last] = av_malloc(MAX_RUN + 1);
1065         memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1066     }
1067 }
1068
1069 void ff_init_vlc_rl(RLTable *rl)
1070 {
1071     int i, q;
1072
1073     for (q = 0; q < 32; q++) {
1074         int qmul = q * 2;
1075         int qadd = (q - 1) | 1;
1076
1077         if (q == 0) {
1078             qmul = 1;
1079             qadd = 0;
1080         }
1081         for (i = 0; i < rl->vlc.table_size; i++) {
1082             int code = rl->vlc.table[i][0];
1083             int len  = rl->vlc.table[i][1];
1084             int level, run;
1085
1086             if (len == 0) { // illegal code
1087                 run   = 66;
1088                 level = MAX_LEVEL;
1089             } else if (len < 0) { // more bits needed
1090                 run   = 0;
1091                 level = code;
1092             } else {
1093                 if (code == rl->n) { // esc
1094                     run   = 66;
1095                     level =  0;
1096                 } else {
1097                     run   = rl->table_run[code] + 1;
1098                     level = rl->table_level[code] * qmul + qadd;
1099                     if (code >= rl->last) run += 192;
1100                 }
1101             }
1102             rl->rl_vlc[q][i].len   = len;
1103             rl->rl_vlc[q][i].level = level;
1104             rl->rl_vlc[q][i].run   = run;
1105         }
1106     }
1107 }
1108
1109 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1110 {
1111     int i;
1112
1113     /* release non reference frames */
1114     for (i = 0; i < s->picture_count; i++) {
1115         if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
1116             (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
1117             (remove_current || &s->picture[i] !=  s->current_picture_ptr)
1118             /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
1119             free_frame_buffer(s, &s->picture[i]);
1120         }
1121     }
1122 }
1123
1124 int ff_find_unused_picture(MpegEncContext *s, int shared)
1125 {
1126     int i;
1127
1128     if (shared) {
1129         for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1130             if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1131                 return i;
1132         }
1133     } else {
1134         for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1135             if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
1136                 return i; // FIXME
1137         }
1138         for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1139             if (s->picture[i].f.data[0] == NULL)
1140                 return i;
1141         }
1142     }
1143
1144     return AVERROR_INVALIDDATA;
1145 }
1146
1147 static void update_noise_reduction(MpegEncContext *s)
1148 {
1149     int intra, i;
1150
1151     for (intra = 0; intra < 2; intra++) {
1152         if (s->dct_count[intra] > (1 << 16)) {
1153             for (i = 0; i < 64; i++) {
1154                 s->dct_error_sum[intra][i] >>= 1;
1155             }
1156             s->dct_count[intra] >>= 1;
1157         }
1158
1159         for (i = 0; i < 64; i++) {
1160             s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1161                                        s->dct_count[intra] +
1162                                        s->dct_error_sum[intra][i] / 2) /
1163                                       (s->dct_error_sum[intra][i] + 1);
1164         }
1165     }
1166 }
1167
1168 /**
1169  * generic function for encode/decode called after coding/decoding
1170  * the header and before a frame is coded/decoded.
1171  */
1172 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1173 {
1174     int i;
1175     Picture *pic;
1176     s->mb_skipped = 0;
1177
1178     /* mark & release old frames */
1179     if (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3) {
1180         if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1181             s->last_picture_ptr != s->next_picture_ptr &&
1182             s->last_picture_ptr->f.data[0]) {
1183             if (s->last_picture_ptr->owner2 == s)
1184                 free_frame_buffer(s, s->last_picture_ptr);
1185         }
1186
1187         /* release forgotten pictures */
1188         /* if (mpeg124/h263) */
1189         if (!s->encoding) {
1190             for (i = 0; i < s->picture_count; i++) {
1191                 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
1192                     &s->picture[i] != s->last_picture_ptr &&
1193                     &s->picture[i] != s->next_picture_ptr &&
1194                     s->picture[i].f.reference) {
1195                     if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1196                         av_log(avctx, AV_LOG_ERROR,
1197                                "releasing zombie picture\n");
1198                     free_frame_buffer(s, &s->picture[i]);
1199                 }
1200             }
1201         }
1202     }
1203
1204     if (!s->encoding) {
1205         ff_release_unused_pictures(s, 1);
1206
1207         if (s->current_picture_ptr &&
1208             s->current_picture_ptr->f.data[0] == NULL) {
1209             // we already have a unused image
1210             // (maybe it was set before reading the header)
1211             pic = s->current_picture_ptr;
1212         } else {
1213             i   = ff_find_unused_picture(s, 0);
1214             pic = &s->picture[i];
1215         }
1216
1217         pic->f.reference = 0;
1218         if (!s->dropable) {
1219             if (s->codec_id == AV_CODEC_ID_H264)
1220                 pic->f.reference = s->picture_structure;
1221             else if (s->pict_type != AV_PICTURE_TYPE_B)
1222                 pic->f.reference = 3;
1223         }
1224
1225         pic->f.coded_picture_number = s->coded_picture_number++;
1226
1227         if (ff_alloc_picture(s, pic, 0) < 0)
1228             return -1;
1229
1230         s->current_picture_ptr = pic;
1231         // FIXME use only the vars from current_pic
1232         s->current_picture_ptr->f.top_field_first = s->top_field_first;
1233         if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1234             s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1235             if (s->picture_structure != PICT_FRAME)
1236                 s->current_picture_ptr->f.top_field_first =
1237                     (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1238         }
1239         s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1240                                                      !s->progressive_sequence;
1241         s->current_picture_ptr->field_picture      =  s->picture_structure != PICT_FRAME;
1242     }
1243
1244     s->current_picture_ptr->f.pict_type = s->pict_type;
1245     // if (s->flags && CODEC_FLAG_QSCALE)
1246     //     s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1247     s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1248
1249     ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1250
1251     if (s->pict_type != AV_PICTURE_TYPE_B) {
1252         s->last_picture_ptr = s->next_picture_ptr;
1253         if (!s->dropable)
1254             s->next_picture_ptr = s->current_picture_ptr;
1255     }
1256     /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1257            s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1258            s->last_picture_ptr    ? s->last_picture_ptr->f.data[0]    : NULL,
1259            s->next_picture_ptr    ? s->next_picture_ptr->f.data[0]    : NULL,
1260            s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1261            s->pict_type, s->dropable); */
1262
1263     if (s->codec_id != AV_CODEC_ID_H264) {
1264         if ((s->last_picture_ptr == NULL ||
1265              s->last_picture_ptr->f.data[0] == NULL) &&
1266             (s->pict_type != AV_PICTURE_TYPE_I ||
1267              s->picture_structure != PICT_FRAME)) {
1268             if (s->pict_type != AV_PICTURE_TYPE_I)
1269                 av_log(avctx, AV_LOG_ERROR,
1270                        "warning: first frame is no keyframe\n");
1271             else if (s->picture_structure != PICT_FRAME)
1272                 av_log(avctx, AV_LOG_INFO,
1273                        "allocate dummy last picture for field based first keyframe\n");
1274
1275             /* Allocate a dummy frame */
1276             i = ff_find_unused_picture(s, 0);
1277             s->last_picture_ptr = &s->picture[i];
1278             if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1279                 s->last_picture_ptr = NULL;
1280                 return -1;
1281             }
1282             ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
1283             ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
1284             s->last_picture_ptr->f.reference = 3;
1285         }
1286         if ((s->next_picture_ptr == NULL ||
1287              s->next_picture_ptr->f.data[0] == NULL) &&
1288             s->pict_type == AV_PICTURE_TYPE_B) {
1289             /* Allocate a dummy frame */
1290             i = ff_find_unused_picture(s, 0);
1291             s->next_picture_ptr = &s->picture[i];
1292             if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1293                 s->next_picture_ptr = NULL;
1294                 return -1;
1295             }
1296             ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
1297             ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
1298             s->next_picture_ptr->f.reference = 3;
1299         }
1300     }
1301
1302     if (s->last_picture_ptr)
1303         ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1304     if (s->next_picture_ptr)
1305         ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1306
1307     if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME) &&
1308         (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3)) {
1309         if (s->next_picture_ptr)
1310             s->next_picture_ptr->owner2 = s;
1311         if (s->last_picture_ptr)
1312             s->last_picture_ptr->owner2 = s;
1313     }
1314
1315     assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1316                                                  s->last_picture_ptr->f.data[0]));
1317
1318     if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
1319         int i;
1320         for (i = 0; i < 4; i++) {
1321             if (s->picture_structure == PICT_BOTTOM_FIELD) {
1322                 s->current_picture.f.data[i] +=
1323                     s->current_picture.f.linesize[i];
1324             }
1325             s->current_picture.f.linesize[i] *= 2;
1326             s->last_picture.f.linesize[i]    *= 2;
1327             s->next_picture.f.linesize[i]    *= 2;
1328         }
1329     }
1330
1331     s->err_recognition = avctx->err_recognition;
1332
1333     /* set dequantizer, we can't do it during init as
1334      * it might change for mpeg4 and we can't do it in the header
1335      * decode as init is not called for mpeg4 there yet */
1336     if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1337         s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1338         s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1339     } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1340         s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1341         s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1342     } else {
1343         s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1344         s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1345     }
1346
1347     if (s->dct_error_sum) {
1348         assert(s->avctx->noise_reduction && s->encoding);
1349         update_noise_reduction(s);
1350     }
1351
1352     if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1353         return ff_xvmc_field_start(s, avctx);
1354
1355     return 0;
1356 }
1357
1358 /* generic function for encode/decode called after a
1359  * frame has been coded/decoded. */
1360 void ff_MPV_frame_end(MpegEncContext *s)
1361 {
1362     int i;
1363     /* redraw edges for the frame if decoding didn't complete */
1364     // just to make sure that all data is rendered.
1365     if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1366         ff_xvmc_field_end(s);
1367    } else if ((s->error_count || s->encoding) &&
1368               !s->avctx->hwaccel &&
1369               !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1370               s->unrestricted_mv &&
1371               s->current_picture.f.reference &&
1372               !s->intra_only &&
1373               !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1374         int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
1375         int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
1376         s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1377                           s->h_edge_pos, s->v_edge_pos,
1378                           EDGE_WIDTH, EDGE_WIDTH,
1379                           EDGE_TOP | EDGE_BOTTOM);
1380         s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1381                           s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1382                           EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1383                           EDGE_TOP | EDGE_BOTTOM);
1384         s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1385                           s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1386                           EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1387                           EDGE_TOP | EDGE_BOTTOM);
1388     }
1389
1390     emms_c();
1391
1392     s->last_pict_type                 = s->pict_type;
1393     s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1394     if (s->pict_type!= AV_PICTURE_TYPE_B) {
1395         s->last_non_b_pict_type = s->pict_type;
1396     }
1397 #if 0
1398     /* copy back current_picture variables */
1399     for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1400         if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1401             s->picture[i] = s->current_picture;
1402             break;
1403         }
1404     }
1405     assert(i < MAX_PICTURE_COUNT);
1406 #endif
1407
1408     if (s->encoding) {
1409         /* release non-reference frames */
1410         for (i = 0; i < s->picture_count; i++) {
1411             if (s->picture[i].f.data[0] && !s->picture[i].f.reference
1412                 /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
1413                 free_frame_buffer(s, &s->picture[i]);
1414             }
1415         }
1416     }
1417     // clear copies, to avoid confusion
1418 #if 0
1419     memset(&s->last_picture,    0, sizeof(Picture));
1420     memset(&s->next_picture,    0, sizeof(Picture));
1421     memset(&s->current_picture, 0, sizeof(Picture));
1422 #endif
1423     s->avctx->coded_frame = &s->current_picture_ptr->f;
1424
1425     if (s->codec_id != AV_CODEC_ID_H264 && s->current_picture.f.reference) {
1426         ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
1427     }
1428 }
1429
1430 /**
1431  * Draw a line from (ex, ey) -> (sx, sy).
1432  * @param w width of the image
1433  * @param h height of the image
1434  * @param stride stride/linesize of the image
1435  * @param color color of the arrow
1436  */
1437 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1438                       int w, int h, int stride, int color)
1439 {
1440     int x, y, fr, f;
1441
1442     sx = av_clip(sx, 0, w - 1);
1443     sy = av_clip(sy, 0, h - 1);
1444     ex = av_clip(ex, 0, w - 1);
1445     ey = av_clip(ey, 0, h - 1);
1446
1447     buf[sy * stride + sx] += color;
1448
1449     if (FFABS(ex - sx) > FFABS(ey - sy)) {
1450         if (sx > ex) {
1451             FFSWAP(int, sx, ex);
1452             FFSWAP(int, sy, ey);
1453         }
1454         buf += sx + sy * stride;
1455         ex  -= sx;
1456         f    = ((ey - sy) << 16) / ex;
1457         for (x = 0; x = ex; x++) {
1458             y  = (x * f) >> 16;
1459             fr = (x * f) & 0xFFFF;
1460             buf[y * stride + x]       += (color * (0x10000 - fr)) >> 16;
1461             buf[(y + 1) * stride + x] += (color *            fr ) >> 16;
1462         }
1463     } else {
1464         if (sy > ey) {
1465             FFSWAP(int, sx, ex);
1466             FFSWAP(int, sy, ey);
1467         }
1468         buf += sx + sy * stride;
1469         ey  -= sy;
1470         if (ey)
1471             f  = ((ex - sx) << 16) / ey;
1472         else
1473             f = 0;
1474         for (y = 0; y = ey; y++) {
1475             x  = (y * f) >> 16;
1476             fr = (y * f) & 0xFFFF;
1477             buf[y * stride + x]     += (color * (0x10000 - fr)) >> 16;
1478             buf[y * stride + x + 1] += (color *            fr ) >> 16;
1479         }
1480     }
1481 }
1482
1483 /**
1484  * Draw an arrow from (ex, ey) -> (sx, sy).
1485  * @param w width of the image
1486  * @param h height of the image
1487  * @param stride stride/linesize of the image
1488  * @param color color of the arrow
1489  */
1490 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1491                        int ey, int w, int h, int stride, int color)
1492 {
1493     int dx,dy;
1494
1495     sx = av_clip(sx, -100, w + 100);
1496     sy = av_clip(sy, -100, h + 100);
1497     ex = av_clip(ex, -100, w + 100);
1498     ey = av_clip(ey, -100, h + 100);
1499
1500     dx = ex - sx;
1501     dy = ey - sy;
1502
1503     if (dx * dx + dy * dy > 3 * 3) {
1504         int rx =  dx + dy;
1505         int ry = -dx + dy;
1506         int length = ff_sqrt((rx * rx + ry * ry) << 8);
1507
1508         // FIXME subpixel accuracy
1509         rx = ROUNDED_DIV(rx * 3 << 4, length);
1510         ry = ROUNDED_DIV(ry * 3 << 4, length);
1511
1512         draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1513         draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1514     }
1515     draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1516 }
1517
1518 /**
1519  * Print debugging info for the given picture.
1520  */
1521 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
1522 {
1523     if (s->avctx->hwaccel || !pict || !pict->mb_type)
1524         return;
1525
1526     if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1527         int x,y;
1528
1529         av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1530         switch (pict->pict_type) {
1531         case AV_PICTURE_TYPE_I:
1532             av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1533             break;
1534         case AV_PICTURE_TYPE_P:
1535             av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1536             break;
1537         case AV_PICTURE_TYPE_B:
1538             av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1539             break;
1540         case AV_PICTURE_TYPE_S:
1541             av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1542             break;
1543         case AV_PICTURE_TYPE_SI:
1544             av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1545             break;
1546         case AV_PICTURE_TYPE_SP:
1547             av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1548             break;
1549         }
1550         for (y = 0; y < s->mb_height; y++) {
1551             for (x = 0; x < s->mb_width; x++) {
1552                 if (s->avctx->debug & FF_DEBUG_SKIP) {
1553                     int count = s->mbskip_table[x + y * s->mb_stride];
1554                     if (count > 9)
1555                         count = 9;
1556                     av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1557                 }
1558                 if (s->avctx->debug & FF_DEBUG_QP) {
1559                     av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1560                            pict->qscale_table[x + y * s->mb_stride]);
1561                 }
1562                 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1563                     int mb_type = pict->mb_type[x + y * s->mb_stride];
1564                     // Type & MV direction
1565                     if (IS_PCM(mb_type))
1566                         av_log(s->avctx, AV_LOG_DEBUG, "P");
1567                     else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1568                         av_log(s->avctx, AV_LOG_DEBUG, "A");
1569                     else if (IS_INTRA4x4(mb_type))
1570                         av_log(s->avctx, AV_LOG_DEBUG, "i");
1571                     else if (IS_INTRA16x16(mb_type))
1572                         av_log(s->avctx, AV_LOG_DEBUG, "I");
1573                     else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1574                         av_log(s->avctx, AV_LOG_DEBUG, "d");
1575                     else if (IS_DIRECT(mb_type))
1576                         av_log(s->avctx, AV_LOG_DEBUG, "D");
1577                     else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1578                         av_log(s->avctx, AV_LOG_DEBUG, "g");
1579                     else if (IS_GMC(mb_type))
1580                         av_log(s->avctx, AV_LOG_DEBUG, "G");
1581                     else if (IS_SKIP(mb_type))
1582                         av_log(s->avctx, AV_LOG_DEBUG, "S");
1583                     else if (!USES_LIST(mb_type, 1))
1584                         av_log(s->avctx, AV_LOG_DEBUG, ">");
1585                     else if (!USES_LIST(mb_type, 0))
1586                         av_log(s->avctx, AV_LOG_DEBUG, "<");
1587                     else {
1588                         assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1589                         av_log(s->avctx, AV_LOG_DEBUG, "X");
1590                     }
1591
1592                     // segmentation
1593                     if (IS_8X8(mb_type))
1594                         av_log(s->avctx, AV_LOG_DEBUG, "+");
1595                     else if (IS_16X8(mb_type))
1596                         av_log(s->avctx, AV_LOG_DEBUG, "-");
1597                     else if (IS_8X16(mb_type))
1598                         av_log(s->avctx, AV_LOG_DEBUG, "|");
1599                     else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1600                         av_log(s->avctx, AV_LOG_DEBUG, " ");
1601                     else
1602                         av_log(s->avctx, AV_LOG_DEBUG, "?");
1603
1604
1605                     if (IS_INTERLACED(mb_type))
1606                         av_log(s->avctx, AV_LOG_DEBUG, "=");
1607                     else
1608                         av_log(s->avctx, AV_LOG_DEBUG, " ");
1609                 }
1610                 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1611             }
1612             av_log(s->avctx, AV_LOG_DEBUG, "\n");
1613         }
1614     }
1615
1616     if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1617         (s->avctx->debug_mv)) {
1618         const int shift = 1 + s->quarter_sample;
1619         int mb_y;
1620         uint8_t *ptr;
1621         int i;
1622         int h_chroma_shift, v_chroma_shift, block_height;
1623         const int width          = s->avctx->width;
1624         const int height         = s->avctx->height;
1625         const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
1626         const int mv_stride      = (s->mb_width << mv_sample_log2) +
1627                                    (s->codec_id == AV_CODEC_ID_H264 ? 0 : 1);
1628         s->low_delay = 0; // needed to see the vectors without trashing the buffers
1629
1630         avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1631                                       &h_chroma_shift, &v_chroma_shift);
1632         for (i = 0; i < 3; i++) {
1633             memcpy(s->visualization_buffer[i], pict->data[i],
1634                    (i == 0) ? pict->linesize[i] * height:
1635                               pict->linesize[i] * height >> v_chroma_shift);
1636             pict->data[i] = s->visualization_buffer[i];
1637         }
1638         pict->type   = FF_BUFFER_TYPE_COPY;
1639         ptr          = pict->data[0];
1640         block_height = 16 >> v_chroma_shift;
1641
1642         for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1643             int mb_x;
1644             for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1645                 const int mb_index = mb_x + mb_y * s->mb_stride;
1646                 if ((s->avctx->debug_mv) && pict->motion_val) {
1647                     int type;
1648                     for (type = 0; type < 3; type++) {
1649                         int direction = 0;
1650                         switch (type) {
1651                         case 0:
1652                             if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1653                                 (pict->pict_type!= AV_PICTURE_TYPE_P))
1654                                 continue;
1655                             direction = 0;
1656                             break;
1657                         case 1:
1658                             if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1659                                 (pict->pict_type!= AV_PICTURE_TYPE_B))
1660                                 continue;
1661                             direction = 0;
1662                             break;
1663                         case 2:
1664                             if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1665                                 (pict->pict_type!= AV_PICTURE_TYPE_B))
1666                                 continue;
1667                             direction = 1;
1668                             break;
1669                         }
1670                         if (!USES_LIST(pict->mb_type[mb_index], direction))
1671                             continue;
1672
1673                         if (IS_8X8(pict->mb_type[mb_index])) {
1674                             int i;
1675                             for (i = 0; i < 4; i++) {
1676                                 int sx = mb_x * 16 + 4 + 8 * (i & 1);
1677                                 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
1678                                 int xy = (mb_x * 2 + (i & 1) +
1679                                           (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1680                                 int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
1681                                 int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
1682                                 draw_arrow(ptr, sx, sy, mx, my, width,
1683                                            height, s->linesize, 100);
1684                             }
1685                         } else if (IS_16X8(pict->mb_type[mb_index])) {
1686                             int i;
1687                             for (i = 0; i < 2; i++) {
1688                                 int sx = mb_x * 16 + 8;
1689                                 int sy = mb_y * 16 + 4 + 8 * i;
1690                                 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
1691                                 int mx = (pict->motion_val[direction][xy][0] >> shift);
1692                                 int my = (pict->motion_val[direction][xy][1] >> shift);
1693
1694                                 if (IS_INTERLACED(pict->mb_type[mb_index]))
1695                                     my *= 2;
1696
1697                             draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1698                                        height, s->linesize, 100);
1699                             }
1700                         } else if (IS_8X16(pict->mb_type[mb_index])) {
1701                             int i;
1702                             for (i = 0; i < 2; i++) {
1703                                 int sx = mb_x * 16 + 4 + 8 * i;
1704                                 int sy = mb_y * 16 + 8;
1705                                 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
1706                                 int mx = pict->motion_val[direction][xy][0] >> shift;
1707                                 int my = pict->motion_val[direction][xy][1] >> shift;
1708
1709                                 if (IS_INTERLACED(pict->mb_type[mb_index]))
1710                                     my *= 2;
1711
1712                                 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1713                                            height, s->linesize, 100);
1714                             }
1715                         } else {
1716                               int sx = mb_x * 16 + 8;
1717                               int sy = mb_y * 16 + 8;
1718                               int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
1719                               int mx = pict->motion_val[direction][xy][0] >> shift + sx;
1720                               int my = pict->motion_val[direction][xy][1] >> shift + sy;
1721                               draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1722                         }
1723                     }
1724                 }
1725                 if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
1726                     uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
1727                                  0x0101010101010101ULL;
1728                     int y;
1729                     for (y = 0; y < block_height; y++) {
1730                         *(uint64_t *)(pict->data[1] + 8 * mb_x +
1731                                       (block_height * mb_y + y) *
1732                                       pict->linesize[1]) = c;
1733                         *(uint64_t *)(pict->data[2] + 8 * mb_x +
1734                                       (block_height * mb_y + y) *
1735                                       pict->linesize[2]) = c;
1736                     }
1737                 }
1738                 if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
1739                     pict->motion_val) {
1740                     int mb_type = pict->mb_type[mb_index];
1741                     uint64_t u,v;
1742                     int y;
1743 #define COLOR(theta, r) \
1744     u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
1745     v = (int)(128 + r * sin(theta * 3.141592 / 180));
1746
1747
1748                     u = v = 128;
1749                     if (IS_PCM(mb_type)) {
1750                         COLOR(120, 48)
1751                     } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
1752                                IS_INTRA16x16(mb_type)) {
1753                         COLOR(30, 48)
1754                     } else if (IS_INTRA4x4(mb_type)) {
1755                         COLOR(90, 48)
1756                     } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
1757                         // COLOR(120, 48)
1758                     } else if (IS_DIRECT(mb_type)) {
1759                         COLOR(150, 48)
1760                     } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
1761                         COLOR(170, 48)
1762                     } else if (IS_GMC(mb_type)) {
1763                         COLOR(190, 48)
1764                     } else if (IS_SKIP(mb_type)) {
1765                         // COLOR(180, 48)
1766                     } else if (!USES_LIST(mb_type, 1)) {
1767                         COLOR(240, 48)
1768                     } else if (!USES_LIST(mb_type, 0)) {
1769                         COLOR(0, 48)
1770                     } else {
1771                         assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1772                         COLOR(300,48)
1773                     }
1774
1775                     u *= 0x0101010101010101ULL;
1776                     v *= 0x0101010101010101ULL;
1777                     for (y = 0; y < block_height; y++) {
1778                         *(uint64_t *)(pict->data[1] + 8 * mb_x +
1779                                       (block_height * mb_y + y) * pict->linesize[1]) = u;
1780                         *(uint64_t *)(pict->data[2] + 8 * mb_x +
1781                                       (block_height * mb_y + y) * pict->linesize[2]) = v;
1782                     }
1783
1784                     // segmentation
1785                     if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
1786                         *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
1787                                       (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1788                         *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
1789                                       (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1790                     }
1791                     if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
1792                         for (y = 0; y < 16; y++)
1793                             pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
1794                                           pict->linesize[0]] ^= 0x80;
1795                     }
1796                     if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
1797                         int dm = 1 << (mv_sample_log2 - 2);
1798                         for (i = 0; i < 4; i++) {
1799                             int sx = mb_x * 16 + 8 * (i & 1);
1800                             int sy = mb_y * 16 + 8 * (i >> 1);
1801                             int xy = (mb_x * 2 + (i & 1) +
1802                                      (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1803                             // FIXME bidir
1804                             int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
1805                             if (mv[0] != mv[dm] ||
1806                                 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
1807                                 for (y = 0; y < 8; y++)
1808                                     pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
1809                             if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
1810                                 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
1811                                               pict->linesize[0]) ^= 0x8080808080808080ULL;
1812                         }
1813                     }
1814
1815                     if (IS_INTERLACED(mb_type) &&
1816                         s->codec_id == AV_CODEC_ID_H264) {
1817                         // hmm
1818                     }
1819                 }
1820                 s->mbskip_table[mb_index] = 0;
1821             }
1822         }
1823     }
1824 }
1825
1826 /**
1827  * find the lowest MB row referenced in the MVs
1828  */
1829 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1830 {
1831     int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1832     int my, off, i, mvs;
1833
1834     if (s->picture_structure != PICT_FRAME) goto unhandled;
1835
1836     switch (s->mv_type) {
1837         case MV_TYPE_16X16:
1838             mvs = 1;
1839             break;
1840         case MV_TYPE_16X8:
1841             mvs = 2;
1842             break;
1843         case MV_TYPE_8X8:
1844             mvs = 4;
1845             break;
1846         default:
1847             goto unhandled;
1848     }
1849
1850     for (i = 0; i < mvs; i++) {
1851         my = s->mv[dir][i][1]<<qpel_shift;
1852         my_max = FFMAX(my_max, my);
1853         my_min = FFMIN(my_min, my);
1854     }
1855
1856     off = (FFMAX(-my_min, my_max) + 63) >> 6;
1857
1858     return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1859 unhandled:
1860     return s->mb_height-1;
1861 }
1862
1863 /* put block[] to dest[] */
1864 static inline void put_dct(MpegEncContext *s,
1865                            DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1866 {
1867     s->dct_unquantize_intra(s, block, i, qscale);
1868     s->dsp.idct_put (dest, line_size, block);
1869 }
1870
1871 /* add block[] to dest[] */
1872 static inline void add_dct(MpegEncContext *s,
1873                            DCTELEM *block, int i, uint8_t *dest, int line_size)
1874 {
1875     if (s->block_last_index[i] >= 0) {
1876         s->dsp.idct_add (dest, line_size, block);
1877     }
1878 }
1879
1880 static inline void add_dequant_dct(MpegEncContext *s,
1881                            DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1882 {
1883     if (s->block_last_index[i] >= 0) {
1884         s->dct_unquantize_inter(s, block, i, qscale);
1885
1886         s->dsp.idct_add (dest, line_size, block);
1887     }
1888 }
1889
1890 /**
1891  * Clean dc, ac, coded_block for the current non-intra MB.
1892  */
1893 void ff_clean_intra_table_entries(MpegEncContext *s)
1894 {
1895     int wrap = s->b8_stride;
1896     int xy = s->block_index[0];
1897
1898     s->dc_val[0][xy           ] =
1899     s->dc_val[0][xy + 1       ] =
1900     s->dc_val[0][xy     + wrap] =
1901     s->dc_val[0][xy + 1 + wrap] = 1024;
1902     /* ac pred */
1903     memset(s->ac_val[0][xy       ], 0, 32 * sizeof(int16_t));
1904     memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1905     if (s->msmpeg4_version>=3) {
1906         s->coded_block[xy           ] =
1907         s->coded_block[xy + 1       ] =
1908         s->coded_block[xy     + wrap] =
1909         s->coded_block[xy + 1 + wrap] = 0;
1910     }
1911     /* chroma */
1912     wrap = s->mb_stride;
1913     xy = s->mb_x + s->mb_y * wrap;
1914     s->dc_val[1][xy] =
1915     s->dc_val[2][xy] = 1024;
1916     /* ac pred */
1917     memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1918     memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1919
1920     s->mbintra_table[xy]= 0;
1921 }
1922
1923 /* generic function called after a macroblock has been parsed by the
1924    decoder or after it has been encoded by the encoder.
1925
1926    Important variables used:
1927    s->mb_intra : true if intra macroblock
1928    s->mv_dir   : motion vector direction
1929    s->mv_type  : motion vector type
1930    s->mv       : motion vector
1931    s->interlaced_dct : true if interlaced dct used (mpeg2)
1932  */
1933 static av_always_inline
1934 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
1935                             int is_mpeg12)
1936 {
1937     const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1938     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1939         ff_xvmc_decode_mb(s);//xvmc uses pblocks
1940         return;
1941     }
1942
1943     if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1944        /* save DCT coefficients */
1945        int i,j;
1946        DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
1947        av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1948        for(i=0; i<6; i++){
1949            for(j=0; j<64; j++){
1950                *dct++ = block[i][s->dsp.idct_permutation[j]];
1951                av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
1952            }
1953            av_log(s->avctx, AV_LOG_DEBUG, "\n");
1954        }
1955     }
1956
1957     s->current_picture.f.qscale_table[mb_xy] = s->qscale;
1958
1959     /* update DC predictors for P macroblocks */
1960     if (!s->mb_intra) {
1961         if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1962             if(s->mbintra_table[mb_xy])
1963                 ff_clean_intra_table_entries(s);
1964         } else {
1965             s->last_dc[0] =
1966             s->last_dc[1] =
1967             s->last_dc[2] = 128 << s->intra_dc_precision;
1968         }
1969     }
1970     else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1971         s->mbintra_table[mb_xy]=1;
1972
1973     if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
1974         uint8_t *dest_y, *dest_cb, *dest_cr;
1975         int dct_linesize, dct_offset;
1976         op_pixels_func (*op_pix)[4];
1977         qpel_mc_func (*op_qpix)[16];
1978         const int linesize   = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
1979         const int uvlinesize = s->current_picture.f.linesize[1];
1980         const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
1981         const int block_size = 8;
1982
1983         /* avoid copy if macroblock skipped in last frame too */
1984         /* skip only during decoding as we might trash the buffers during encoding a bit */
1985         if(!s->encoding){
1986             uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
1987
1988             if (s->mb_skipped) {
1989                 s->mb_skipped= 0;
1990                 assert(s->pict_type!=AV_PICTURE_TYPE_I);
1991                 *mbskip_ptr = 1;
1992             } else if(!s->current_picture.f.reference) {
1993                 *mbskip_ptr = 1;
1994             } else{
1995                 *mbskip_ptr = 0; /* not skipped */
1996             }
1997         }
1998
1999         dct_linesize = linesize << s->interlaced_dct;
2000         dct_offset   = s->interlaced_dct ? linesize : linesize * block_size;
2001
2002         if(readable){
2003             dest_y=  s->dest[0];
2004             dest_cb= s->dest[1];
2005             dest_cr= s->dest[2];
2006         }else{
2007             dest_y = s->b_scratchpad;
2008             dest_cb= s->b_scratchpad+16*linesize;
2009             dest_cr= s->b_scratchpad+32*linesize;
2010         }
2011
2012         if (!s->mb_intra) {
2013             /* motion handling */
2014             /* decoding or more than one mb_type (MC was already done otherwise) */
2015             if(!s->encoding){
2016
2017                 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2018                     if (s->mv_dir & MV_DIR_FORWARD) {
2019                         ff_thread_await_progress(&s->last_picture_ptr->f,
2020                                                  ff_MPV_lowest_referenced_row(s, 0),
2021                                                  0);
2022                     }
2023                     if (s->mv_dir & MV_DIR_BACKWARD) {
2024                         ff_thread_await_progress(&s->next_picture_ptr->f,
2025                                                  ff_MPV_lowest_referenced_row(s, 1),
2026                                                  0);
2027                     }
2028                 }
2029
2030                 op_qpix= s->me.qpel_put;
2031                 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2032                     op_pix = s->dsp.put_pixels_tab;
2033                 }else{
2034                     op_pix = s->dsp.put_no_rnd_pixels_tab;
2035                 }
2036                 if (s->mv_dir & MV_DIR_FORWARD) {
2037                     ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2038                     op_pix = s->dsp.avg_pixels_tab;
2039                     op_qpix= s->me.qpel_avg;
2040                 }
2041                 if (s->mv_dir & MV_DIR_BACKWARD) {
2042                     ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2043                 }
2044             }
2045
2046             /* skip dequant / idct if we are really late ;) */
2047             if(s->avctx->skip_idct){
2048                 if(  (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2049                    ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2050                    || s->avctx->skip_idct >= AVDISCARD_ALL)
2051                     goto skip_idct;
2052             }
2053
2054             /* add dct residue */
2055             if(s->encoding || !(   s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2056                                 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2057                 add_dequant_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
2058                 add_dequant_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
2059                 add_dequant_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
2060                 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2061
2062                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2063                     if (s->chroma_y_shift){
2064                         add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2065                         add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2066                     }else{
2067                         dct_linesize >>= 1;
2068                         dct_offset >>=1;
2069                         add_dequant_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
2070                         add_dequant_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
2071                         add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2072                         add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2073                     }
2074                 }
2075             } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2076                 add_dct(s, block[0], 0, dest_y                          , dct_linesize);
2077                 add_dct(s, block[1], 1, dest_y              + block_size, dct_linesize);
2078                 add_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize);
2079                 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2080
2081                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2082                     if(s->chroma_y_shift){//Chroma420
2083                         add_dct(s, block[4], 4, dest_cb, uvlinesize);
2084                         add_dct(s, block[5], 5, dest_cr, uvlinesize);
2085                     }else{
2086                         //chroma422
2087                         dct_linesize = uvlinesize << s->interlaced_dct;
2088                         dct_offset   = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2089
2090                         add_dct(s, block[4], 4, dest_cb, dct_linesize);
2091                         add_dct(s, block[5], 5, dest_cr, dct_linesize);
2092                         add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2093                         add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2094                         if(!s->chroma_x_shift){//Chroma444
2095                             add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2096                             add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2097                             add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2098                             add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2099                         }
2100                     }
2101                 }//fi gray
2102             }
2103             else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2104                 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2105             }
2106         } else {
2107             /* dct only in intra block */
2108             if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2109                 put_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
2110                 put_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
2111                 put_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
2112                 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2113
2114                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2115                     if(s->chroma_y_shift){
2116                         put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2117                         put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2118                     }else{
2119                         dct_offset >>=1;
2120                         dct_linesize >>=1;
2121                         put_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
2122                         put_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
2123                         put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2124                         put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2125                     }
2126                 }
2127             }else{
2128                 s->dsp.idct_put(dest_y                          , dct_linesize, block[0]);
2129                 s->dsp.idct_put(dest_y              + block_size, dct_linesize, block[1]);
2130                 s->dsp.idct_put(dest_y + dct_offset             , dct_linesize, block[2]);
2131                 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2132
2133                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2134                     if(s->chroma_y_shift){
2135                         s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2136                         s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2137                     }else{
2138
2139                         dct_linesize = uvlinesize << s->interlaced_dct;
2140                         dct_offset   = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2141
2142                         s->dsp.idct_put(dest_cb,              dct_linesize, block[4]);
2143                         s->dsp.idct_put(dest_cr,              dct_linesize, block[5]);
2144                         s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2145                         s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2146                         if(!s->chroma_x_shift){//Chroma444
2147                             s->dsp.idct_put(dest_cb + 8,              dct_linesize, block[8]);
2148                             s->dsp.idct_put(dest_cr + 8,              dct_linesize, block[9]);
2149                             s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2150                             s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2151                         }
2152                     }
2153                 }//gray
2154             }
2155         }
2156 skip_idct:
2157         if(!readable){
2158             s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y ,   linesize,16);
2159             s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2160             s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2161         }
2162     }
2163 }
2164
2165 void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2166 #if !CONFIG_SMALL
2167     if(s->out_format == FMT_MPEG1) {
2168         MPV_decode_mb_internal(s, block, 1);
2169     } else
2170 #endif
2171         MPV_decode_mb_internal(s, block, 0);
2172 }
2173
2174 /**
2175  * @param h is the normal height, this will be reduced automatically if needed for the last row
2176  */
2177 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2178     const int field_pic= s->picture_structure != PICT_FRAME;
2179     if(field_pic){
2180         h <<= 1;
2181         y <<= 1;
2182     }
2183
2184     if (!s->avctx->hwaccel
2185        && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2186        && s->unrestricted_mv
2187        && s->current_picture.f.reference
2188        && !s->intra_only
2189        && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2190         int sides = 0, edge_h;
2191         int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
2192         int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
2193         if (y==0) sides |= EDGE_TOP;
2194         if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2195
2196         edge_h= FFMIN(h, s->v_edge_pos - y);
2197
2198         s->dsp.draw_edges(s->current_picture_ptr->f.data[0] +  y         *s->linesize,
2199                           s->linesize,           s->h_edge_pos,         edge_h,
2200                           EDGE_WIDTH,            EDGE_WIDTH,            sides);
2201         s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
2202                           s->uvlinesize,         s->h_edge_pos>>hshift, edge_h>>vshift,
2203                           EDGE_WIDTH>>hshift,    EDGE_WIDTH>>vshift,    sides);
2204         s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
2205                           s->uvlinesize,         s->h_edge_pos>>hshift, edge_h>>vshift,
2206                           EDGE_WIDTH>>hshift,    EDGE_WIDTH>>vshift,    sides);
2207     }
2208
2209     h= FFMIN(h, s->avctx->height - y);
2210
2211     if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2212
2213     if (s->avctx->draw_horiz_band) {
2214         AVFrame *src;
2215         int offset[AV_NUM_DATA_POINTERS];
2216         int i;
2217
2218         if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2219             src = &s->current_picture_ptr->f;
2220         else if(s->last_picture_ptr)
2221             src = &s->last_picture_ptr->f;
2222         else
2223             return;
2224
2225         if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2226             for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2227                 offset[i] = 0;
2228         }else{
2229             offset[0]= y * s->linesize;
2230             offset[1]=
2231             offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2232             for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2233                 offset[i] = 0;
2234         }
2235
2236         emms_c();
2237
2238         s->avctx->draw_horiz_band(s->avctx, src, offset,
2239                                   y, s->picture_structure, h);
2240     }
2241 }
2242
2243 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2244     const int linesize   = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2245     const int uvlinesize = s->current_picture.f.linesize[1];
2246     const int mb_size= 4;
2247
2248     s->block_index[0]= s->b8_stride*(s->mb_y*2    ) - 2 + s->mb_x*2;
2249     s->block_index[1]= s->b8_stride*(s->mb_y*2    ) - 1 + s->mb_x*2;
2250     s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2251     s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2252     s->block_index[4]= s->mb_stride*(s->mb_y + 1)                + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2253     s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2254     //block_index is not used by mpeg2, so it is not affected by chroma_format
2255
2256     s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) <<  mb_size);
2257     s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2258     s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2259
2260     if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2261     {
2262         if(s->picture_structure==PICT_FRAME){
2263         s->dest[0] += s->mb_y *   linesize << mb_size;
2264         s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2265         s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2266         }else{
2267             s->dest[0] += (s->mb_y>>1) *   linesize << mb_size;
2268             s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2269             s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2270             assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2271         }
2272     }
2273 }
2274
2275 void ff_mpeg_flush(AVCodecContext *avctx){
2276     int i;
2277     MpegEncContext *s = avctx->priv_data;
2278
2279     if(s==NULL || s->picture==NULL)
2280         return;
2281
2282     for(i=0; i<s->picture_count; i++){
2283        if (s->picture[i].f.data[0] &&
2284            (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2285             s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2286         free_frame_buffer(s, &s->picture[i]);
2287     }
2288     s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2289
2290     s->mb_x= s->mb_y= 0;
2291
2292     s->parse_context.state= -1;
2293     s->parse_context.frame_start_found= 0;
2294     s->parse_context.overread= 0;
2295     s->parse_context.overread_index= 0;
2296     s->parse_context.index= 0;
2297     s->parse_context.last_index= 0;
2298     s->bitstream_buffer_size=0;
2299     s->pp_time=0;
2300 }
2301
2302 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2303                                    DCTELEM *block, int n, int qscale)
2304 {
2305     int i, level, nCoeffs;
2306     const uint16_t *quant_matrix;
2307
2308     nCoeffs= s->block_last_index[n];
2309
2310     if (n < 4)
2311         block[0] = block[0] * s->y_dc_scale;
2312     else
2313         block[0] = block[0] * s->c_dc_scale;
2314     /* XXX: only mpeg1 */
2315     quant_matrix = s->intra_matrix;
2316     for(i=1;i<=nCoeffs;i++) {
2317         int j= s->intra_scantable.permutated[i];
2318         level = block[j];
2319         if (level) {
2320             if (level < 0) {
2321                 level = -level;
2322                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2323                 level = (level - 1) | 1;
2324                 level = -level;
2325             } else {
2326                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2327                 level = (level - 1) | 1;
2328             }
2329             block[j] = level;
2330         }
2331     }
2332 }
2333
2334 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2335                                    DCTELEM *block, int n, int qscale)
2336 {
2337     int i, level, nCoeffs;
2338     const uint16_t *quant_matrix;
2339
2340     nCoeffs= s->block_last_index[n];
2341
2342     quant_matrix = s->inter_matrix;
2343     for(i=0; i<=nCoeffs; i++) {
2344         int j= s->intra_scantable.permutated[i];
2345         level = block[j];
2346         if (level) {
2347             if (level < 0) {
2348                 level = -level;
2349                 level = (((level << 1) + 1) * qscale *
2350                          ((int) (quant_matrix[j]))) >> 4;
2351                 level = (level - 1) | 1;
2352                 level = -level;
2353             } else {
2354                 level = (((level << 1) + 1) * qscale *
2355                          ((int) (quant_matrix[j]))) >> 4;
2356                 level = (level - 1) | 1;
2357             }
2358             block[j] = level;
2359         }
2360     }
2361 }
2362
2363 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2364                                    DCTELEM *block, int n, int qscale)
2365 {
2366     int i, level, nCoeffs;
2367     const uint16_t *quant_matrix;
2368
2369     if(s->alternate_scan) nCoeffs= 63;
2370     else nCoeffs= s->block_last_index[n];
2371
2372     if (n < 4)
2373         block[0] = block[0] * s->y_dc_scale;
2374     else
2375         block[0] = block[0] * s->c_dc_scale;
2376     quant_matrix = s->intra_matrix;
2377     for(i=1;i<=nCoeffs;i++) {
2378         int j= s->intra_scantable.permutated[i];
2379         level = block[j];
2380         if (level) {
2381             if (level < 0) {
2382                 level = -level;
2383                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2384                 level = -level;
2385             } else {
2386                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2387             }
2388             block[j] = level;
2389         }
2390     }
2391 }
2392
2393 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2394                                    DCTELEM *block, int n, int qscale)
2395 {
2396     int i, level, nCoeffs;
2397     const uint16_t *quant_matrix;
2398     int sum=-1;
2399
2400     if(s->alternate_scan) nCoeffs= 63;
2401     else nCoeffs= s->block_last_index[n];
2402
2403     if (n < 4)
2404         block[0] = block[0] * s->y_dc_scale;
2405     else
2406         block[0] = block[0] * s->c_dc_scale;
2407     quant_matrix = s->intra_matrix;
2408     for(i=1;i<=nCoeffs;i++) {
2409         int j= s->intra_scantable.permutated[i];
2410         level = block[j];
2411         if (level) {
2412             if (level < 0) {
2413                 level = -level;
2414                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2415                 level = -level;
2416             } else {
2417                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2418             }
2419             block[j] = level;
2420             sum+=level;
2421         }
2422     }
2423     block[63]^=sum&1;
2424 }
2425
2426 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2427                                    DCTELEM *block, int n, int qscale)
2428 {
2429     int i, level, nCoeffs;
2430     const uint16_t *quant_matrix;
2431     int sum=-1;
2432
2433     if(s->alternate_scan) nCoeffs= 63;
2434     else nCoeffs= s->block_last_index[n];
2435
2436     quant_matrix = s->inter_matrix;
2437     for(i=0; i<=nCoeffs; i++) {
2438         int j= s->intra_scantable.permutated[i];
2439         level = block[j];
2440         if (level) {
2441             if (level < 0) {
2442                 level = -level;
2443                 level = (((level << 1) + 1) * qscale *
2444                          ((int) (quant_matrix[j]))) >> 4;
2445                 level = -level;
2446             } else {
2447                 level = (((level << 1) + 1) * qscale *
2448                          ((int) (quant_matrix[j]))) >> 4;
2449             }
2450             block[j] = level;
2451             sum+=level;
2452         }
2453     }
2454     block[63]^=sum&1;
2455 }
2456
2457 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2458                                   DCTELEM *block, int n, int qscale)
2459 {
2460     int i, level, qmul, qadd;
2461     int nCoeffs;
2462
2463     assert(s->block_last_index[n]>=0);
2464
2465     qmul = qscale << 1;
2466
2467     if (!s->h263_aic) {
2468         if (n < 4)
2469             block[0] = block[0] * s->y_dc_scale;
2470         else
2471             block[0] = block[0] * s->c_dc_scale;
2472         qadd = (qscale - 1) | 1;
2473     }else{
2474         qadd = 0;
2475     }
2476     if(s->ac_pred)
2477         nCoeffs=63;
2478     else
2479         nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2480
2481     for(i=1; i<=nCoeffs; i++) {
2482         level = block[i];
2483         if (level) {
2484             if (level < 0) {
2485                 level = level * qmul - qadd;
2486             } else {
2487                 level = level * qmul + qadd;
2488             }
2489             block[i] = level;
2490         }
2491     }
2492 }
2493
2494 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2495                                   DCTELEM *block, int n, int qscale)
2496 {
2497     int i, level, qmul, qadd;
2498     int nCoeffs;
2499
2500     assert(s->block_last_index[n]>=0);
2501
2502     qadd = (qscale - 1) | 1;
2503     qmul = qscale << 1;
2504
2505     nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2506
2507     for(i=0; i<=nCoeffs; i++) {
2508         level = block[i];
2509         if (level) {
2510             if (level < 0) {
2511                 level = level * qmul - qadd;
2512             } else {
2513                 level = level * qmul + qadd;
2514             }
2515             block[i] = level;
2516         }
2517     }
2518 }
2519
2520 /**
2521  * set qscale and update qscale dependent variables.
2522  */
2523 void ff_set_qscale(MpegEncContext * s, int qscale)
2524 {
2525     if (qscale < 1)
2526         qscale = 1;
2527     else if (qscale > 31)
2528         qscale = 31;
2529
2530     s->qscale = qscale;
2531     s->chroma_qscale= s->chroma_qscale_table[qscale];
2532
2533     s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2534     s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2535 }
2536
2537 void ff_MPV_report_decode_progress(MpegEncContext *s)
2538 {
2539     if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
2540         ff_thread_report_progress(&s->current_picture_ptr->f, s->mb_y, 0);
2541 }