mpegvideo: Move Picture-related functions to a separate file
[ffmpeg.git] / libavcodec / mpegvideo.c
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of Libav.
9  *
10  * Libav is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * Libav is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with Libav; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/timer.h"
35 #include "avcodec.h"
36 #include "blockdsp.h"
37 #include "idctdsp.h"
38 #include "internal.h"
39 #include "mathops.h"
40 #include "mpegutils.h"
41 #include "mpegvideo.h"
42 #include "mpegvideodata.h"
43 #include "mjpegenc.h"
44 #include "msmpeg4.h"
45 #include "qpeldsp.h"
46 #include "xvmc_internal.h"
47 #include "thread.h"
48 #include "wmv2.h"
49 #include <limits.h>
50
51 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
52                                    int16_t *block, int n, int qscale)
53 {
54     int i, level, nCoeffs;
55     const uint16_t *quant_matrix;
56
57     nCoeffs= s->block_last_index[n];
58
59     if (n < 4)
60         block[0] = block[0] * s->y_dc_scale;
61     else
62         block[0] = block[0] * s->c_dc_scale;
63     /* XXX: only mpeg1 */
64     quant_matrix = s->intra_matrix;
65     for(i=1;i<=nCoeffs;i++) {
66         int j= s->intra_scantable.permutated[i];
67         level = block[j];
68         if (level) {
69             if (level < 0) {
70                 level = -level;
71                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
72                 level = (level - 1) | 1;
73                 level = -level;
74             } else {
75                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
76                 level = (level - 1) | 1;
77             }
78             block[j] = level;
79         }
80     }
81 }
82
83 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
84                                    int16_t *block, int n, int qscale)
85 {
86     int i, level, nCoeffs;
87     const uint16_t *quant_matrix;
88
89     nCoeffs= s->block_last_index[n];
90
91     quant_matrix = s->inter_matrix;
92     for(i=0; i<=nCoeffs; i++) {
93         int j= s->intra_scantable.permutated[i];
94         level = block[j];
95         if (level) {
96             if (level < 0) {
97                 level = -level;
98                 level = (((level << 1) + 1) * qscale *
99                          ((int) (quant_matrix[j]))) >> 4;
100                 level = (level - 1) | 1;
101                 level = -level;
102             } else {
103                 level = (((level << 1) + 1) * qscale *
104                          ((int) (quant_matrix[j]))) >> 4;
105                 level = (level - 1) | 1;
106             }
107             block[j] = level;
108         }
109     }
110 }
111
112 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
113                                    int16_t *block, int n, int qscale)
114 {
115     int i, level, nCoeffs;
116     const uint16_t *quant_matrix;
117
118     if(s->alternate_scan) nCoeffs= 63;
119     else nCoeffs= s->block_last_index[n];
120
121     if (n < 4)
122         block[0] = block[0] * s->y_dc_scale;
123     else
124         block[0] = block[0] * s->c_dc_scale;
125     quant_matrix = s->intra_matrix;
126     for(i=1;i<=nCoeffs;i++) {
127         int j= s->intra_scantable.permutated[i];
128         level = block[j];
129         if (level) {
130             if (level < 0) {
131                 level = -level;
132                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
133                 level = -level;
134             } else {
135                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
136             }
137             block[j] = level;
138         }
139     }
140 }
141
142 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
143                                    int16_t *block, int n, int qscale)
144 {
145     int i, level, nCoeffs;
146     const uint16_t *quant_matrix;
147     int sum=-1;
148
149     if(s->alternate_scan) nCoeffs= 63;
150     else nCoeffs= s->block_last_index[n];
151
152     if (n < 4)
153         block[0] = block[0] * s->y_dc_scale;
154     else
155         block[0] = block[0] * s->c_dc_scale;
156     quant_matrix = s->intra_matrix;
157     for(i=1;i<=nCoeffs;i++) {
158         int j= s->intra_scantable.permutated[i];
159         level = block[j];
160         if (level) {
161             if (level < 0) {
162                 level = -level;
163                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
164                 level = -level;
165             } else {
166                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
167             }
168             block[j] = level;
169             sum+=level;
170         }
171     }
172     block[63]^=sum&1;
173 }
174
175 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
176                                    int16_t *block, int n, int qscale)
177 {
178     int i, level, nCoeffs;
179     const uint16_t *quant_matrix;
180     int sum=-1;
181
182     if(s->alternate_scan) nCoeffs= 63;
183     else nCoeffs= s->block_last_index[n];
184
185     quant_matrix = s->inter_matrix;
186     for(i=0; i<=nCoeffs; i++) {
187         int j= s->intra_scantable.permutated[i];
188         level = block[j];
189         if (level) {
190             if (level < 0) {
191                 level = -level;
192                 level = (((level << 1) + 1) * qscale *
193                          ((int) (quant_matrix[j]))) >> 4;
194                 level = -level;
195             } else {
196                 level = (((level << 1) + 1) * qscale *
197                          ((int) (quant_matrix[j]))) >> 4;
198             }
199             block[j] = level;
200             sum+=level;
201         }
202     }
203     block[63]^=sum&1;
204 }
205
206 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
207                                   int16_t *block, int n, int qscale)
208 {
209     int i, level, qmul, qadd;
210     int nCoeffs;
211
212     assert(s->block_last_index[n]>=0);
213
214     qmul = qscale << 1;
215
216     if (!s->h263_aic) {
217         if (n < 4)
218             block[0] = block[0] * s->y_dc_scale;
219         else
220             block[0] = block[0] * s->c_dc_scale;
221         qadd = (qscale - 1) | 1;
222     }else{
223         qadd = 0;
224     }
225     if(s->ac_pred)
226         nCoeffs=63;
227     else
228         nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
229
230     for(i=1; i<=nCoeffs; i++) {
231         level = block[i];
232         if (level) {
233             if (level < 0) {
234                 level = level * qmul - qadd;
235             } else {
236                 level = level * qmul + qadd;
237             }
238             block[i] = level;
239         }
240     }
241 }
242
243 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
244                                   int16_t *block, int n, int qscale)
245 {
246     int i, level, qmul, qadd;
247     int nCoeffs;
248
249     assert(s->block_last_index[n]>=0);
250
251     qadd = (qscale - 1) | 1;
252     qmul = qscale << 1;
253
254     nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
255
256     for(i=0; i<=nCoeffs; i++) {
257         level = block[i];
258         if (level) {
259             if (level < 0) {
260                 level = level * qmul - qadd;
261             } else {
262                 level = level * qmul + qadd;
263             }
264             block[i] = level;
265         }
266     }
267 }
268
269 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
270                               int (*mv)[2][4][2],
271                               int mb_x, int mb_y, int mb_intra, int mb_skipped)
272 {
273     MpegEncContext *s = opaque;
274
275     s->mv_dir     = mv_dir;
276     s->mv_type    = mv_type;
277     s->mb_intra   = mb_intra;
278     s->mb_skipped = mb_skipped;
279     s->mb_x       = mb_x;
280     s->mb_y       = mb_y;
281     memcpy(s->mv, mv, sizeof(*mv));
282
283     ff_init_block_index(s);
284     ff_update_block_index(s);
285
286     s->bdsp.clear_blocks(s->block[0]);
287
288     s->dest[0] = s->current_picture.f->data[0] + (s->mb_y *  16                       * s->linesize)   + s->mb_x *  16;
289     s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
290     s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
291
292     if (ref)
293         av_log(s->avctx, AV_LOG_DEBUG,
294                "Interlaced error concealment is not fully implemented\n");
295     ff_mpv_decode_mb(s, s->block);
296 }
297
298 /* init common dct for both encoder and decoder */
299 static av_cold int dct_init(MpegEncContext *s)
300 {
301     ff_blockdsp_init(&s->bdsp, s->avctx);
302     ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
303     ff_mpegvideodsp_init(&s->mdsp);
304     ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
305
306     s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
307     s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
308     s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
309     s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
310     s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
311     if (s->avctx->flags & CODEC_FLAG_BITEXACT)
312         s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
313     s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
314
315     if (HAVE_INTRINSICS_NEON)
316         ff_mpv_common_init_neon(s);
317
318     if (ARCH_ARM)
319         ff_mpv_common_init_arm(s);
320     if (ARCH_PPC)
321         ff_mpv_common_init_ppc(s);
322     if (ARCH_X86)
323         ff_mpv_common_init_x86(s);
324
325     return 0;
326 }
327
328 av_cold void ff_mpv_idct_init(MpegEncContext *s)
329 {
330     ff_idctdsp_init(&s->idsp, s->avctx);
331
332     /* load & permutate scantables
333      * note: only wmv uses different ones
334      */
335     if (s->alternate_scan) {
336         ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
337         ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
338     } else {
339         ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
340         ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
341     }
342     ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
343     ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
344 }
345
346 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
347 {
348     return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 0,
349                             s->chroma_x_shift, s->chroma_y_shift, s->out_format,
350                             s->mb_stride, s->mb_height, s->b8_stride,
351                             &s->linesize, &s->uvlinesize);
352 }
353
354 static int init_duplicate_context(MpegEncContext *s)
355 {
356     int y_size = s->b8_stride * (2 * s->mb_height + 1);
357     int c_size = s->mb_stride * (s->mb_height + 1);
358     int yc_size = y_size + 2 * c_size;
359     int i;
360
361     s->sc.edge_emu_buffer =
362     s->me.scratchpad   =
363     s->me.temp         =
364     s->sc.rd_scratchpad   =
365     s->sc.b_scratchpad    =
366     s->sc.obmc_scratchpad = NULL;
367
368     if (s->encoding) {
369         FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
370                           ME_MAP_SIZE * sizeof(uint32_t), fail)
371         FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
372                           ME_MAP_SIZE * sizeof(uint32_t), fail)
373         if (s->avctx->noise_reduction) {
374             FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
375                               2 * 64 * sizeof(int), fail)
376         }
377     }
378     FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
379     s->block = s->blocks[0];
380
381     for (i = 0; i < 12; i++) {
382         s->pblocks[i] = &s->block[i];
383     }
384     if (s->avctx->codec_tag == AV_RL32("VCR2")) {
385         // exchange uv
386         int16_t (*tmp)[64];
387         tmp           = s->pblocks[4];
388         s->pblocks[4] = s->pblocks[5];
389         s->pblocks[5] = tmp;
390     }
391
392     if (s->out_format == FMT_H263) {
393         /* ac values */
394         FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
395                           yc_size * sizeof(int16_t) * 16, fail);
396         s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
397         s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
398         s->ac_val[2] = s->ac_val[1] + c_size;
399     }
400
401     return 0;
402 fail:
403     return -1; // free() through ff_mpv_common_end()
404 }
405
406 static void free_duplicate_context(MpegEncContext *s)
407 {
408     if (!s)
409         return;
410
411     av_freep(&s->sc.edge_emu_buffer);
412     av_freep(&s->me.scratchpad);
413     s->me.temp =
414     s->sc.rd_scratchpad =
415     s->sc.b_scratchpad =
416     s->sc.obmc_scratchpad = NULL;
417
418     av_freep(&s->dct_error_sum);
419     av_freep(&s->me.map);
420     av_freep(&s->me.score_map);
421     av_freep(&s->blocks);
422     av_freep(&s->ac_val_base);
423     s->block = NULL;
424 }
425
426 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
427 {
428 #define COPY(a) bak->a = src->a
429     COPY(sc.edge_emu_buffer);
430     COPY(me.scratchpad);
431     COPY(me.temp);
432     COPY(sc.rd_scratchpad);
433     COPY(sc.b_scratchpad);
434     COPY(sc.obmc_scratchpad);
435     COPY(me.map);
436     COPY(me.score_map);
437     COPY(blocks);
438     COPY(block);
439     COPY(start_mb_y);
440     COPY(end_mb_y);
441     COPY(me.map_generation);
442     COPY(pb);
443     COPY(dct_error_sum);
444     COPY(dct_count[0]);
445     COPY(dct_count[1]);
446     COPY(ac_val_base);
447     COPY(ac_val[0]);
448     COPY(ac_val[1]);
449     COPY(ac_val[2]);
450 #undef COPY
451 }
452
453 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
454 {
455     MpegEncContext bak;
456     int i, ret;
457     // FIXME copy only needed parts
458     // START_TIMER
459     backup_duplicate_context(&bak, dst);
460     memcpy(dst, src, sizeof(MpegEncContext));
461     backup_duplicate_context(dst, &bak);
462     for (i = 0; i < 12; i++) {
463         dst->pblocks[i] = &dst->block[i];
464     }
465     if (dst->avctx->codec_tag == AV_RL32("VCR2")) {
466         // exchange uv
467         int16_t (*tmp)[64];
468         tmp             = dst->pblocks[4];
469         dst->pblocks[4] = dst->pblocks[5];
470         dst->pblocks[5] = tmp;
471     }
472     if (!dst->sc.edge_emu_buffer &&
473         (ret = ff_mpeg_framesize_alloc(dst->avctx, &dst->me,
474                                        &dst->sc, dst->linesize)) < 0) {
475         av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
476                "scratch buffers.\n");
477         return ret;
478     }
479     // STOP_TIMER("update_duplicate_context")
480     // about 10k cycles / 0.01 sec for  1000frames on 1ghz with 2 threads
481     return 0;
482 }
483
484 int ff_mpeg_update_thread_context(AVCodecContext *dst,
485                                   const AVCodecContext *src)
486 {
487     int i, ret;
488     MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
489
490     if (dst == src || !s1->context_initialized)
491         return 0;
492
493     // FIXME can parameters change on I-frames?
494     // in that case dst may need a reinit
495     if (!s->context_initialized) {
496         int err;
497         memcpy(s, s1, sizeof(MpegEncContext));
498
499         s->avctx                 = dst;
500         s->bitstream_buffer      = NULL;
501         s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
502
503         ff_mpv_idct_init(s);
504         if ((err = ff_mpv_common_init(s)) < 0)
505             return err;
506     }
507
508     if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
509         int err;
510         s->context_reinit = 0;
511         s->height = s1->height;
512         s->width  = s1->width;
513         if ((err = ff_mpv_common_frame_size_change(s)) < 0)
514             return err;
515     }
516
517     s->avctx->coded_height  = s1->avctx->coded_height;
518     s->avctx->coded_width   = s1->avctx->coded_width;
519     s->avctx->width         = s1->avctx->width;
520     s->avctx->height        = s1->avctx->height;
521
522     s->coded_picture_number = s1->coded_picture_number;
523     s->picture_number       = s1->picture_number;
524
525     for (i = 0; i < MAX_PICTURE_COUNT; i++) {
526         ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
527         if (s1->picture[i].f->buf[0] &&
528             (ret = ff_mpeg_ref_picture(s->avctx, &s->picture[i], &s1->picture[i])) < 0)
529             return ret;
530     }
531
532 #define UPDATE_PICTURE(pic)\
533 do {\
534     ff_mpeg_unref_picture(s->avctx, &s->pic);\
535     if (s1->pic.f->buf[0])\
536         ret = ff_mpeg_ref_picture(s->avctx, &s->pic, &s1->pic);\
537     else\
538         ret = ff_update_picture_tables(&s->pic, &s1->pic);\
539     if (ret < 0)\
540         return ret;\
541 } while (0)
542
543     UPDATE_PICTURE(current_picture);
544     UPDATE_PICTURE(last_picture);
545     UPDATE_PICTURE(next_picture);
546
547 #define REBASE_PICTURE(pic, new_ctx, old_ctx)                                 \
548     ((pic && pic >= old_ctx->picture &&                                       \
549       pic < old_ctx->picture + MAX_PICTURE_COUNT) ?                           \
550         &new_ctx->picture[pic - old_ctx->picture] : NULL)
551
552     s->last_picture_ptr    = REBASE_PICTURE(s1->last_picture_ptr,    s, s1);
553     s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
554     s->next_picture_ptr    = REBASE_PICTURE(s1->next_picture_ptr,    s, s1);
555
556     // Error/bug resilience
557     s->next_p_frame_damaged = s1->next_p_frame_damaged;
558     s->workaround_bugs      = s1->workaround_bugs;
559
560     // MPEG4 timing info
561     memcpy(&s->last_time_base, &s1->last_time_base,
562            (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
563            (char *) &s1->last_time_base);
564
565     // B-frame info
566     s->max_b_frames = s1->max_b_frames;
567     s->low_delay    = s1->low_delay;
568     s->droppable    = s1->droppable;
569
570     // DivX handling (doesn't work)
571     s->divx_packed  = s1->divx_packed;
572
573     if (s1->bitstream_buffer) {
574         if (s1->bitstream_buffer_size +
575             FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
576             av_fast_malloc(&s->bitstream_buffer,
577                            &s->allocated_bitstream_buffer_size,
578                            s1->allocated_bitstream_buffer_size);
579             s->bitstream_buffer_size = s1->bitstream_buffer_size;
580         memcpy(s->bitstream_buffer, s1->bitstream_buffer,
581                s1->bitstream_buffer_size);
582         memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
583                FF_INPUT_BUFFER_PADDING_SIZE);
584     }
585
586     // linesize dependend scratch buffer allocation
587     if (!s->sc.edge_emu_buffer)
588         if (s1->linesize) {
589             if (ff_mpeg_framesize_alloc(s->avctx, &s->me,
590                                         &s->sc, s1->linesize) < 0) {
591                 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
592                        "scratch buffers.\n");
593                 return AVERROR(ENOMEM);
594             }
595         } else {
596             av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
597                    "be allocated due to unknown size.\n");
598             return AVERROR_BUG;
599         }
600
601     // MPEG2/interlacing info
602     memcpy(&s->progressive_sequence, &s1->progressive_sequence,
603            (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
604
605     if (!s1->first_field) {
606         s->last_pict_type = s1->pict_type;
607         if (s1->current_picture_ptr)
608             s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
609     }
610
611     return 0;
612 }
613
614 /**
615  * Set the given MpegEncContext to common defaults
616  * (same for encoding and decoding).
617  * The changed fields will not depend upon the
618  * prior state of the MpegEncContext.
619  */
620 void ff_mpv_common_defaults(MpegEncContext *s)
621 {
622     s->y_dc_scale_table      =
623     s->c_dc_scale_table      = ff_mpeg1_dc_scale_table;
624     s->chroma_qscale_table   = ff_default_chroma_qscale_table;
625     s->progressive_frame     = 1;
626     s->progressive_sequence  = 1;
627     s->picture_structure     = PICT_FRAME;
628
629     s->coded_picture_number  = 0;
630     s->picture_number        = 0;
631
632     s->f_code                = 1;
633     s->b_code                = 1;
634
635     s->slice_context_count   = 1;
636 }
637
638 /**
639  * Set the given MpegEncContext to defaults for decoding.
640  * the changed fields will not depend upon
641  * the prior state of the MpegEncContext.
642  */
643 void ff_mpv_decode_defaults(MpegEncContext *s)
644 {
645     ff_mpv_common_defaults(s);
646 }
647
648 static int init_er(MpegEncContext *s)
649 {
650     ERContext *er = &s->er;
651     int mb_array_size = s->mb_height * s->mb_stride;
652     int i;
653
654     er->avctx       = s->avctx;
655
656     er->mb_index2xy = s->mb_index2xy;
657     er->mb_num      = s->mb_num;
658     er->mb_width    = s->mb_width;
659     er->mb_height   = s->mb_height;
660     er->mb_stride   = s->mb_stride;
661     er->b8_stride   = s->b8_stride;
662
663     er->er_temp_buffer     = av_malloc(s->mb_height * s->mb_stride);
664     er->error_status_table = av_mallocz(mb_array_size);
665     if (!er->er_temp_buffer || !er->error_status_table)
666         goto fail;
667
668     er->mbskip_table  = s->mbskip_table;
669     er->mbintra_table = s->mbintra_table;
670
671     for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
672         er->dc_val[i] = s->dc_val[i];
673
674     er->decode_mb = mpeg_er_decode_mb;
675     er->opaque    = s;
676
677     return 0;
678 fail:
679     av_freep(&er->er_temp_buffer);
680     av_freep(&er->error_status_table);
681     return AVERROR(ENOMEM);
682 }
683
684 /**
685  * Initialize and allocates MpegEncContext fields dependent on the resolution.
686  */
687 static int init_context_frame(MpegEncContext *s)
688 {
689     int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
690
691     s->mb_width   = (s->width + 15) / 16;
692     s->mb_stride  = s->mb_width + 1;
693     s->b8_stride  = s->mb_width * 2 + 1;
694     mb_array_size = s->mb_height * s->mb_stride;
695     mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
696
697     /* set default edge pos, will be overriden
698      * in decode_header if needed */
699     s->h_edge_pos = s->mb_width * 16;
700     s->v_edge_pos = s->mb_height * 16;
701
702     s->mb_num     = s->mb_width * s->mb_height;
703
704     s->block_wrap[0] =
705     s->block_wrap[1] =
706     s->block_wrap[2] =
707     s->block_wrap[3] = s->b8_stride;
708     s->block_wrap[4] =
709     s->block_wrap[5] = s->mb_stride;
710
711     y_size  = s->b8_stride * (2 * s->mb_height + 1);
712     c_size  = s->mb_stride * (s->mb_height + 1);
713     yc_size = y_size + 2   * c_size;
714
715     FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
716                       fail); // error ressilience code looks cleaner with this
717     for (y = 0; y < s->mb_height; y++)
718         for (x = 0; x < s->mb_width; x++)
719             s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
720
721     s->mb_index2xy[s->mb_height * s->mb_width] =
722         (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
723
724     if (s->encoding) {
725         /* Allocate MV tables */
726         FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
727                           mv_table_size * 2 * sizeof(int16_t), fail);
728         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
729                           mv_table_size * 2 * sizeof(int16_t), fail);
730         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
731                           mv_table_size * 2 * sizeof(int16_t), fail);
732         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
733                           mv_table_size * 2 * sizeof(int16_t), fail);
734         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
735                           mv_table_size * 2 * sizeof(int16_t), fail);
736         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
737                           mv_table_size * 2 * sizeof(int16_t), fail);
738         s->p_mv_table            = s->p_mv_table_base + s->mb_stride + 1;
739         s->b_forw_mv_table       = s->b_forw_mv_table_base + s->mb_stride + 1;
740         s->b_back_mv_table       = s->b_back_mv_table_base + s->mb_stride + 1;
741         s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
742                                    s->mb_stride + 1;
743         s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
744                                    s->mb_stride + 1;
745         s->b_direct_mv_table     = s->b_direct_mv_table_base + s->mb_stride + 1;
746
747         /* Allocate MB type table */
748         FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
749                           sizeof(uint16_t), fail); // needed for encoding
750
751         FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
752                           sizeof(int), fail);
753
754         FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
755                          mb_array_size * sizeof(float), fail);
756         FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
757                          mb_array_size * sizeof(float), fail);
758
759     }
760
761     if (s->codec_id == AV_CODEC_ID_MPEG4 ||
762         (s->avctx->flags & CODEC_FLAG_INTERLACED_ME)) {
763         /* interlaced direct mode decoding tables */
764         for (i = 0; i < 2; i++) {
765             int j, k;
766             for (j = 0; j < 2; j++) {
767                 for (k = 0; k < 2; k++) {
768                     FF_ALLOCZ_OR_GOTO(s->avctx,
769                                       s->b_field_mv_table_base[i][j][k],
770                                       mv_table_size * 2 * sizeof(int16_t),
771                                       fail);
772                     s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
773                                                    s->mb_stride + 1;
774                 }
775                 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
776                                   mb_array_size * 2 * sizeof(uint8_t), fail);
777                 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
778                                   mv_table_size * 2 * sizeof(int16_t), fail);
779                 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
780                                             + s->mb_stride + 1;
781             }
782             FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
783                               mb_array_size * 2 * sizeof(uint8_t), fail);
784         }
785     }
786     if (s->out_format == FMT_H263) {
787         /* cbp values */
788         FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
789         s->coded_block = s->coded_block_base + s->b8_stride + 1;
790
791         /* cbp, ac_pred, pred_dir */
792         FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
793                           mb_array_size * sizeof(uint8_t), fail);
794         FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
795                           mb_array_size * sizeof(uint8_t), fail);
796     }
797
798     if (s->h263_pred || s->h263_plus || !s->encoding) {
799         /* dc values */
800         // MN: we need these for  error resilience of intra-frames
801         FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
802                           yc_size * sizeof(int16_t), fail);
803         s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
804         s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
805         s->dc_val[2] = s->dc_val[1] + c_size;
806         for (i = 0; i < yc_size; i++)
807             s->dc_val_base[i] = 1024;
808     }
809
810     /* which mb is a intra block */
811     FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
812     memset(s->mbintra_table, 1, mb_array_size);
813
814     /* init macroblock skip table */
815     FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
816     // Note the + 1 is for  a quicker mpeg4 slice_end detection
817
818     return init_er(s);
819 fail:
820     return AVERROR(ENOMEM);
821 }
822
823 /**
824  * init common structure for both encoder and decoder.
825  * this assumes that some variables like width/height are already set
826  */
827 av_cold int ff_mpv_common_init(MpegEncContext *s)
828 {
829     int i;
830     int nb_slices = (HAVE_THREADS &&
831                      s->avctx->active_thread_type & FF_THREAD_SLICE) ?
832                     s->avctx->thread_count : 1;
833
834     if (s->encoding && s->avctx->slices)
835         nb_slices = s->avctx->slices;
836
837     if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
838         s->mb_height = (s->height + 31) / 32 * 2;
839     else
840         s->mb_height = (s->height + 15) / 16;
841
842     if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
843         av_log(s->avctx, AV_LOG_ERROR,
844                "decoding to AV_PIX_FMT_NONE is not supported.\n");
845         return -1;
846     }
847
848     if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
849         int max_slices;
850         if (s->mb_height)
851             max_slices = FFMIN(MAX_THREADS, s->mb_height);
852         else
853             max_slices = MAX_THREADS;
854         av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
855                " reducing to %d\n", nb_slices, max_slices);
856         nb_slices = max_slices;
857     }
858
859     if ((s->width || s->height) &&
860         av_image_check_size(s->width, s->height, 0, s->avctx))
861         return -1;
862
863     dct_init(s);
864
865     /* set chroma shifts */
866     av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
867                                      &s->chroma_x_shift,
868                                      &s->chroma_y_shift);
869
870     /* convert fourcc to upper case */
871     s->codec_tag          = avpriv_toupper4(s->avctx->codec_tag);
872
873     FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
874                       MAX_PICTURE_COUNT * sizeof(Picture), fail);
875     for (i = 0; i < MAX_PICTURE_COUNT; i++) {
876         s->picture[i].f = av_frame_alloc();
877         if (!s->picture[i].f)
878             goto fail;
879     }
880     memset(&s->next_picture, 0, sizeof(s->next_picture));
881     memset(&s->last_picture, 0, sizeof(s->last_picture));
882     memset(&s->current_picture, 0, sizeof(s->current_picture));
883     memset(&s->new_picture, 0, sizeof(s->new_picture));
884     s->next_picture.f = av_frame_alloc();
885     if (!s->next_picture.f)
886         goto fail;
887     s->last_picture.f = av_frame_alloc();
888     if (!s->last_picture.f)
889         goto fail;
890     s->current_picture.f = av_frame_alloc();
891     if (!s->current_picture.f)
892         goto fail;
893     s->new_picture.f = av_frame_alloc();
894     if (!s->new_picture.f)
895         goto fail;
896
897     if (s->width && s->height) {
898         if (init_context_frame(s))
899             goto fail;
900
901         s->parse_context.state = -1;
902     }
903
904     s->context_initialized = 1;
905     s->thread_context[0]   = s;
906
907     if (s->width && s->height) {
908         if (nb_slices > 1) {
909             for (i = 1; i < nb_slices; i++) {
910                 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
911                 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
912             }
913
914             for (i = 0; i < nb_slices; i++) {
915                 if (init_duplicate_context(s->thread_context[i]) < 0)
916                     goto fail;
917                     s->thread_context[i]->start_mb_y =
918                         (s->mb_height * (i) + nb_slices / 2) / nb_slices;
919                     s->thread_context[i]->end_mb_y   =
920                         (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
921             }
922         } else {
923             if (init_duplicate_context(s) < 0)
924                 goto fail;
925             s->start_mb_y = 0;
926             s->end_mb_y   = s->mb_height;
927         }
928         s->slice_context_count = nb_slices;
929     }
930
931     return 0;
932  fail:
933     ff_mpv_common_end(s);
934     return -1;
935 }
936
937 /**
938  * Frees and resets MpegEncContext fields depending on the resolution.
939  * Is used during resolution changes to avoid a full reinitialization of the
940  * codec.
941  */
942 static void free_context_frame(MpegEncContext *s)
943 {
944     int i, j, k;
945
946     av_freep(&s->mb_type);
947     av_freep(&s->p_mv_table_base);
948     av_freep(&s->b_forw_mv_table_base);
949     av_freep(&s->b_back_mv_table_base);
950     av_freep(&s->b_bidir_forw_mv_table_base);
951     av_freep(&s->b_bidir_back_mv_table_base);
952     av_freep(&s->b_direct_mv_table_base);
953     s->p_mv_table            = NULL;
954     s->b_forw_mv_table       = NULL;
955     s->b_back_mv_table       = NULL;
956     s->b_bidir_forw_mv_table = NULL;
957     s->b_bidir_back_mv_table = NULL;
958     s->b_direct_mv_table     = NULL;
959     for (i = 0; i < 2; i++) {
960         for (j = 0; j < 2; j++) {
961             for (k = 0; k < 2; k++) {
962                 av_freep(&s->b_field_mv_table_base[i][j][k]);
963                 s->b_field_mv_table[i][j][k] = NULL;
964             }
965             av_freep(&s->b_field_select_table[i][j]);
966             av_freep(&s->p_field_mv_table_base[i][j]);
967             s->p_field_mv_table[i][j] = NULL;
968         }
969         av_freep(&s->p_field_select_table[i]);
970     }
971
972     av_freep(&s->dc_val_base);
973     av_freep(&s->coded_block_base);
974     av_freep(&s->mbintra_table);
975     av_freep(&s->cbp_table);
976     av_freep(&s->pred_dir_table);
977
978     av_freep(&s->mbskip_table);
979
980     av_freep(&s->er.error_status_table);
981     av_freep(&s->er.er_temp_buffer);
982     av_freep(&s->mb_index2xy);
983     av_freep(&s->lambda_table);
984     av_freep(&s->cplx_tab);
985     av_freep(&s->bits_tab);
986
987     s->linesize = s->uvlinesize = 0;
988 }
989
990 int ff_mpv_common_frame_size_change(MpegEncContext *s)
991 {
992     int i, err = 0;
993
994     if (s->slice_context_count > 1) {
995         for (i = 0; i < s->slice_context_count; i++) {
996             free_duplicate_context(s->thread_context[i]);
997         }
998         for (i = 1; i < s->slice_context_count; i++) {
999             av_freep(&s->thread_context[i]);
1000         }
1001     } else
1002         free_duplicate_context(s);
1003
1004     free_context_frame(s);
1005
1006     if (s->picture)
1007         for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1008                 s->picture[i].needs_realloc = 1;
1009         }
1010
1011     s->last_picture_ptr         =
1012     s->next_picture_ptr         =
1013     s->current_picture_ptr      = NULL;
1014
1015     // init
1016     if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1017         s->mb_height = (s->height + 31) / 32 * 2;
1018     else
1019         s->mb_height = (s->height + 15) / 16;
1020
1021     if ((s->width || s->height) &&
1022         (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
1023         goto fail;
1024
1025     if ((err = init_context_frame(s)))
1026         goto fail;
1027
1028     s->thread_context[0]   = s;
1029
1030     if (s->width && s->height) {
1031         int nb_slices = s->slice_context_count;
1032         if (nb_slices > 1) {
1033             for (i = 1; i < nb_slices; i++) {
1034                 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1035                 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1036             }
1037
1038             for (i = 0; i < nb_slices; i++) {
1039                 if ((err = init_duplicate_context(s->thread_context[i])) < 0)
1040                     goto fail;
1041                     s->thread_context[i]->start_mb_y =
1042                         (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1043                     s->thread_context[i]->end_mb_y   =
1044                         (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1045             }
1046         } else {
1047             if (init_duplicate_context(s) < 0)
1048                 goto fail;
1049             s->start_mb_y = 0;
1050             s->end_mb_y   = s->mb_height;
1051         }
1052         s->slice_context_count = nb_slices;
1053     }
1054
1055     return 0;
1056  fail:
1057     ff_mpv_common_end(s);
1058     return err;
1059 }
1060
1061 /* init common structure for both encoder and decoder */
1062 void ff_mpv_common_end(MpegEncContext *s)
1063 {
1064     int i;
1065
1066     if (s->slice_context_count > 1) {
1067         for (i = 0; i < s->slice_context_count; i++) {
1068             free_duplicate_context(s->thread_context[i]);
1069         }
1070         for (i = 1; i < s->slice_context_count; i++) {
1071             av_freep(&s->thread_context[i]);
1072         }
1073         s->slice_context_count = 1;
1074     } else free_duplicate_context(s);
1075
1076     av_freep(&s->parse_context.buffer);
1077     s->parse_context.buffer_size = 0;
1078
1079     av_freep(&s->bitstream_buffer);
1080     s->allocated_bitstream_buffer_size = 0;
1081
1082     if (s->picture) {
1083         for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1084             ff_free_picture_tables(&s->picture[i]);
1085             ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1086             av_frame_free(&s->picture[i].f);
1087         }
1088     }
1089     av_freep(&s->picture);
1090     ff_free_picture_tables(&s->last_picture);
1091     ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1092     av_frame_free(&s->last_picture.f);
1093     ff_free_picture_tables(&s->current_picture);
1094     ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1095     av_frame_free(&s->current_picture.f);
1096     ff_free_picture_tables(&s->next_picture);
1097     ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1098     av_frame_free(&s->next_picture.f);
1099     ff_free_picture_tables(&s->new_picture);
1100     ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1101     av_frame_free(&s->new_picture.f);
1102
1103     free_context_frame(s);
1104
1105     s->context_initialized      = 0;
1106     s->last_picture_ptr         =
1107     s->next_picture_ptr         =
1108     s->current_picture_ptr      = NULL;
1109     s->linesize = s->uvlinesize = 0;
1110 }
1111
1112 static void release_unused_pictures(AVCodecContext *avctx, Picture *picture)
1113 {
1114     int i;
1115
1116     /* release non reference frames */
1117     for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1118         if (!picture[i].reference)
1119             ff_mpeg_unref_picture(avctx, &picture[i]);
1120     }
1121 }
1122
1123 /**
1124  * generic function called after decoding
1125  * the header and before a frame is decoded.
1126  */
1127 int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1128 {
1129     int i, ret;
1130     Picture *pic;
1131     s->mb_skipped = 0;
1132
1133     /* mark & release old frames */
1134     if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1135         s->last_picture_ptr != s->next_picture_ptr &&
1136         s->last_picture_ptr->f->buf[0]) {
1137         ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1138     }
1139
1140     /* release forgotten pictures */
1141     /* if (mpeg124/h263) */
1142     for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1143         if (&s->picture[i] != s->last_picture_ptr &&
1144             &s->picture[i] != s->next_picture_ptr &&
1145             s->picture[i].reference && !s->picture[i].needs_realloc) {
1146             if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1147                 av_log(avctx, AV_LOG_ERROR,
1148                        "releasing zombie picture\n");
1149             ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1150         }
1151     }
1152
1153     ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1154
1155     release_unused_pictures(s->avctx, s->picture);
1156
1157     if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
1158         // we already have a unused image
1159         // (maybe it was set before reading the header)
1160         pic = s->current_picture_ptr;
1161     } else {
1162         i   = ff_find_unused_picture(s->avctx, s->picture, 0);
1163         if (i < 0) {
1164             av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1165             return i;
1166         }
1167         pic = &s->picture[i];
1168     }
1169
1170     pic->reference = 0;
1171     if (!s->droppable) {
1172         if (s->pict_type != AV_PICTURE_TYPE_B)
1173             pic->reference = 3;
1174     }
1175
1176     pic->f->coded_picture_number = s->coded_picture_number++;
1177
1178     if (alloc_picture(s, pic, 0) < 0)
1179         return -1;
1180
1181     s->current_picture_ptr = pic;
1182     // FIXME use only the vars from current_pic
1183     s->current_picture_ptr->f->top_field_first = s->top_field_first;
1184     if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1185         s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1186         if (s->picture_structure != PICT_FRAME)
1187             s->current_picture_ptr->f->top_field_first =
1188                 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1189     }
1190     s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1191                                                  !s->progressive_sequence;
1192     s->current_picture_ptr->field_picture      =  s->picture_structure != PICT_FRAME;
1193
1194     s->current_picture_ptr->f->pict_type = s->pict_type;
1195     // if (s->avctx->flags && CODEC_FLAG_QSCALE)
1196     //     s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1197     s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1198
1199     if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1200                                    s->current_picture_ptr)) < 0)
1201         return ret;
1202
1203     if (s->pict_type != AV_PICTURE_TYPE_B) {
1204         s->last_picture_ptr = s->next_picture_ptr;
1205         if (!s->droppable)
1206             s->next_picture_ptr = s->current_picture_ptr;
1207     }
1208     ff_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1209             s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1210             s->last_picture_ptr    ? s->last_picture_ptr->f->data[0]    : NULL,
1211             s->next_picture_ptr    ? s->next_picture_ptr->f->data[0]    : NULL,
1212             s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1213             s->pict_type, s->droppable);
1214
1215     if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
1216         (s->pict_type != AV_PICTURE_TYPE_I ||
1217          s->picture_structure != PICT_FRAME)) {
1218         int h_chroma_shift, v_chroma_shift;
1219         av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1220                                          &h_chroma_shift, &v_chroma_shift);
1221         if (s->pict_type != AV_PICTURE_TYPE_I)
1222             av_log(avctx, AV_LOG_ERROR,
1223                    "warning: first frame is no keyframe\n");
1224         else if (s->picture_structure != PICT_FRAME)
1225             av_log(avctx, AV_LOG_INFO,
1226                    "allocate dummy last picture for field based first keyframe\n");
1227
1228         /* Allocate a dummy frame */
1229         i = ff_find_unused_picture(s->avctx, s->picture, 0);
1230         if (i < 0) {
1231             av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1232             return i;
1233         }
1234         s->last_picture_ptr = &s->picture[i];
1235
1236         s->last_picture_ptr->reference   = 3;
1237         s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_I;
1238
1239         if (alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1240             s->last_picture_ptr = NULL;
1241             return -1;
1242         }
1243
1244         memset(s->last_picture_ptr->f->data[0], 0,
1245                avctx->height * s->last_picture_ptr->f->linesize[0]);
1246         memset(s->last_picture_ptr->f->data[1], 0x80,
1247                (avctx->height >> v_chroma_shift) *
1248                s->last_picture_ptr->f->linesize[1]);
1249         memset(s->last_picture_ptr->f->data[2], 0x80,
1250                (avctx->height >> v_chroma_shift) *
1251                s->last_picture_ptr->f->linesize[2]);
1252
1253         ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1254         ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1255     }
1256     if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
1257         s->pict_type == AV_PICTURE_TYPE_B) {
1258         /* Allocate a dummy frame */
1259         i = ff_find_unused_picture(s->avctx, s->picture, 0);
1260         if (i < 0) {
1261             av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1262             return i;
1263         }
1264         s->next_picture_ptr = &s->picture[i];
1265
1266         s->next_picture_ptr->reference   = 3;
1267         s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_I;
1268
1269         if (alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1270             s->next_picture_ptr = NULL;
1271             return -1;
1272         }
1273         ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1274         ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1275     }
1276
1277     if (s->last_picture_ptr) {
1278         ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1279         if (s->last_picture_ptr->f->buf[0] &&
1280             (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1281                                        s->last_picture_ptr)) < 0)
1282             return ret;
1283     }
1284     if (s->next_picture_ptr) {
1285         ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1286         if (s->next_picture_ptr->f->buf[0] &&
1287             (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1288                                        s->next_picture_ptr)) < 0)
1289             return ret;
1290     }
1291
1292     if (s->pict_type != AV_PICTURE_TYPE_I &&
1293         !(s->last_picture_ptr && s->last_picture_ptr->f->buf[0])) {
1294         av_log(s, AV_LOG_ERROR,
1295                "Non-reference picture received and no reference available\n");
1296         return AVERROR_INVALIDDATA;
1297     }
1298
1299     if (s->picture_structure!= PICT_FRAME) {
1300         int i;
1301         for (i = 0; i < 4; i++) {
1302             if (s->picture_structure == PICT_BOTTOM_FIELD) {
1303                 s->current_picture.f->data[i] +=
1304                     s->current_picture.f->linesize[i];
1305             }
1306             s->current_picture.f->linesize[i] *= 2;
1307             s->last_picture.f->linesize[i]    *= 2;
1308             s->next_picture.f->linesize[i]    *= 2;
1309         }
1310     }
1311
1312     /* set dequantizer, we can't do it during init as
1313      * it might change for mpeg4 and we can't do it in the header
1314      * decode as init is not called for mpeg4 there yet */
1315     if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1316         s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1317         s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1318     } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1319         s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1320         s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1321     } else {
1322         s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1323         s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1324     }
1325
1326 #if FF_API_XVMC
1327 FF_DISABLE_DEPRECATION_WARNINGS
1328     if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1329         return ff_xvmc_field_start(s, avctx);
1330 FF_ENABLE_DEPRECATION_WARNINGS
1331 #endif /* FF_API_XVMC */
1332
1333     return 0;
1334 }
1335
1336 /* called after a frame has been decoded. */
1337 void ff_mpv_frame_end(MpegEncContext *s)
1338 {
1339 #if FF_API_XVMC
1340 FF_DISABLE_DEPRECATION_WARNINGS
1341     /* redraw edges for the frame if decoding didn't complete */
1342     // just to make sure that all data is rendered.
1343     if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1344         ff_xvmc_field_end(s);
1345     } else
1346 FF_ENABLE_DEPRECATION_WARNINGS
1347 #endif /* FF_API_XVMC */
1348
1349     emms_c();
1350
1351     if (s->current_picture.reference)
1352         ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1353 }
1354
1355 /**
1356  * Print debugging info for the given picture.
1357  */
1358 void ff_print_debug_info(MpegEncContext *s, Picture *p)
1359 {
1360     AVFrame *pict;
1361     if (s->avctx->hwaccel || !p || !p->mb_type)
1362         return;
1363     pict = p->f;
1364
1365     if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1366         int x,y;
1367
1368         av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1369         switch (pict->pict_type) {
1370         case AV_PICTURE_TYPE_I:
1371             av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1372             break;
1373         case AV_PICTURE_TYPE_P:
1374             av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1375             break;
1376         case AV_PICTURE_TYPE_B:
1377             av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1378             break;
1379         case AV_PICTURE_TYPE_S:
1380             av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1381             break;
1382         case AV_PICTURE_TYPE_SI:
1383             av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1384             break;
1385         case AV_PICTURE_TYPE_SP:
1386             av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1387             break;
1388         }
1389         for (y = 0; y < s->mb_height; y++) {
1390             for (x = 0; x < s->mb_width; x++) {
1391                 if (s->avctx->debug & FF_DEBUG_SKIP) {
1392                     int count = s->mbskip_table[x + y * s->mb_stride];
1393                     if (count > 9)
1394                         count = 9;
1395                     av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1396                 }
1397                 if (s->avctx->debug & FF_DEBUG_QP) {
1398                     av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1399                            p->qscale_table[x + y * s->mb_stride]);
1400                 }
1401                 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1402                     int mb_type = p->mb_type[x + y * s->mb_stride];
1403                     // Type & MV direction
1404                     if (IS_PCM(mb_type))
1405                         av_log(s->avctx, AV_LOG_DEBUG, "P");
1406                     else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1407                         av_log(s->avctx, AV_LOG_DEBUG, "A");
1408                     else if (IS_INTRA4x4(mb_type))
1409                         av_log(s->avctx, AV_LOG_DEBUG, "i");
1410                     else if (IS_INTRA16x16(mb_type))
1411                         av_log(s->avctx, AV_LOG_DEBUG, "I");
1412                     else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1413                         av_log(s->avctx, AV_LOG_DEBUG, "d");
1414                     else if (IS_DIRECT(mb_type))
1415                         av_log(s->avctx, AV_LOG_DEBUG, "D");
1416                     else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1417                         av_log(s->avctx, AV_LOG_DEBUG, "g");
1418                     else if (IS_GMC(mb_type))
1419                         av_log(s->avctx, AV_LOG_DEBUG, "G");
1420                     else if (IS_SKIP(mb_type))
1421                         av_log(s->avctx, AV_LOG_DEBUG, "S");
1422                     else if (!USES_LIST(mb_type, 1))
1423                         av_log(s->avctx, AV_LOG_DEBUG, ">");
1424                     else if (!USES_LIST(mb_type, 0))
1425                         av_log(s->avctx, AV_LOG_DEBUG, "<");
1426                     else {
1427                         assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1428                         av_log(s->avctx, AV_LOG_DEBUG, "X");
1429                     }
1430
1431                     // segmentation
1432                     if (IS_8X8(mb_type))
1433                         av_log(s->avctx, AV_LOG_DEBUG, "+");
1434                     else if (IS_16X8(mb_type))
1435                         av_log(s->avctx, AV_LOG_DEBUG, "-");
1436                     else if (IS_8X16(mb_type))
1437                         av_log(s->avctx, AV_LOG_DEBUG, "|");
1438                     else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1439                         av_log(s->avctx, AV_LOG_DEBUG, " ");
1440                     else
1441                         av_log(s->avctx, AV_LOG_DEBUG, "?");
1442
1443
1444                     if (IS_INTERLACED(mb_type))
1445                         av_log(s->avctx, AV_LOG_DEBUG, "=");
1446                     else
1447                         av_log(s->avctx, AV_LOG_DEBUG, " ");
1448                 }
1449             }
1450             av_log(s->avctx, AV_LOG_DEBUG, "\n");
1451         }
1452     }
1453 }
1454
1455 /**
1456  * find the lowest MB row referenced in the MVs
1457  */
1458 int ff_mpv_lowest_referenced_row(MpegEncContext *s, int dir)
1459 {
1460     int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1461     int my, off, i, mvs;
1462
1463     if (s->picture_structure != PICT_FRAME || s->mcsel)
1464         goto unhandled;
1465
1466     switch (s->mv_type) {
1467         case MV_TYPE_16X16:
1468             mvs = 1;
1469             break;
1470         case MV_TYPE_16X8:
1471             mvs = 2;
1472             break;
1473         case MV_TYPE_8X8:
1474             mvs = 4;
1475             break;
1476         default:
1477             goto unhandled;
1478     }
1479
1480     for (i = 0; i < mvs; i++) {
1481         my = s->mv[dir][i][1]<<qpel_shift;
1482         my_max = FFMAX(my_max, my);
1483         my_min = FFMIN(my_min, my);
1484     }
1485
1486     off = (FFMAX(-my_min, my_max) + 63) >> 6;
1487
1488     return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1489 unhandled:
1490     return s->mb_height-1;
1491 }
1492
1493 /* put block[] to dest[] */
1494 static inline void put_dct(MpegEncContext *s,
1495                            int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1496 {
1497     s->dct_unquantize_intra(s, block, i, qscale);
1498     s->idsp.idct_put(dest, line_size, block);
1499 }
1500
1501 /* add block[] to dest[] */
1502 static inline void add_dct(MpegEncContext *s,
1503                            int16_t *block, int i, uint8_t *dest, int line_size)
1504 {
1505     if (s->block_last_index[i] >= 0) {
1506         s->idsp.idct_add(dest, line_size, block);
1507     }
1508 }
1509
1510 static inline void add_dequant_dct(MpegEncContext *s,
1511                            int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1512 {
1513     if (s->block_last_index[i] >= 0) {
1514         s->dct_unquantize_inter(s, block, i, qscale);
1515
1516         s->idsp.idct_add(dest, line_size, block);
1517     }
1518 }
1519
1520 /**
1521  * Clean dc, ac, coded_block for the current non-intra MB.
1522  */
1523 void ff_clean_intra_table_entries(MpegEncContext *s)
1524 {
1525     int wrap = s->b8_stride;
1526     int xy = s->block_index[0];
1527
1528     s->dc_val[0][xy           ] =
1529     s->dc_val[0][xy + 1       ] =
1530     s->dc_val[0][xy     + wrap] =
1531     s->dc_val[0][xy + 1 + wrap] = 1024;
1532     /* ac pred */
1533     memset(s->ac_val[0][xy       ], 0, 32 * sizeof(int16_t));
1534     memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1535     if (s->msmpeg4_version>=3) {
1536         s->coded_block[xy           ] =
1537         s->coded_block[xy + 1       ] =
1538         s->coded_block[xy     + wrap] =
1539         s->coded_block[xy + 1 + wrap] = 0;
1540     }
1541     /* chroma */
1542     wrap = s->mb_stride;
1543     xy = s->mb_x + s->mb_y * wrap;
1544     s->dc_val[1][xy] =
1545     s->dc_val[2][xy] = 1024;
1546     /* ac pred */
1547     memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1548     memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1549
1550     s->mbintra_table[xy]= 0;
1551 }
1552
1553 /* generic function called after a macroblock has been parsed by the
1554    decoder or after it has been encoded by the encoder.
1555
1556    Important variables used:
1557    s->mb_intra : true if intra macroblock
1558    s->mv_dir   : motion vector direction
1559    s->mv_type  : motion vector type
1560    s->mv       : motion vector
1561    s->interlaced_dct : true if interlaced dct used (mpeg2)
1562  */
1563 static av_always_inline
1564 void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
1565                             int is_mpeg12)
1566 {
1567     const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1568
1569 #if FF_API_XVMC
1570 FF_DISABLE_DEPRECATION_WARNINGS
1571     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1572         ff_xvmc_decode_mb(s);//xvmc uses pblocks
1573         return;
1574     }
1575 FF_ENABLE_DEPRECATION_WARNINGS
1576 #endif /* FF_API_XVMC */
1577
1578     if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1579        /* print DCT coefficients */
1580        int i,j;
1581        av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1582        for(i=0; i<6; i++){
1583            for(j=0; j<64; j++){
1584                av_log(s->avctx, AV_LOG_DEBUG, "%5d",
1585                       block[i][s->idsp.idct_permutation[j]]);
1586            }
1587            av_log(s->avctx, AV_LOG_DEBUG, "\n");
1588        }
1589     }
1590
1591     s->current_picture.qscale_table[mb_xy] = s->qscale;
1592
1593     /* update DC predictors for P macroblocks */
1594     if (!s->mb_intra) {
1595         if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1596             if(s->mbintra_table[mb_xy])
1597                 ff_clean_intra_table_entries(s);
1598         } else {
1599             s->last_dc[0] =
1600             s->last_dc[1] =
1601             s->last_dc[2] = 128 << s->intra_dc_precision;
1602         }
1603     }
1604     else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1605         s->mbintra_table[mb_xy]=1;
1606
1607     if ((s->avctx->flags & CODEC_FLAG_PSNR) ||
1608         !(s->encoding && (s->intra_only || s->pict_type == AV_PICTURE_TYPE_B) &&
1609           s->avctx->mb_decision != FF_MB_DECISION_RD)) { // FIXME precalc
1610         uint8_t *dest_y, *dest_cb, *dest_cr;
1611         int dct_linesize, dct_offset;
1612         op_pixels_func (*op_pix)[4];
1613         qpel_mc_func (*op_qpix)[16];
1614         const int linesize   = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
1615         const int uvlinesize = s->current_picture.f->linesize[1];
1616         const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
1617         const int block_size = 8;
1618
1619         /* avoid copy if macroblock skipped in last frame too */
1620         /* skip only during decoding as we might trash the buffers during encoding a bit */
1621         if(!s->encoding){
1622             uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
1623
1624             if (s->mb_skipped) {
1625                 s->mb_skipped= 0;
1626                 assert(s->pict_type!=AV_PICTURE_TYPE_I);
1627                 *mbskip_ptr = 1;
1628             } else if(!s->current_picture.reference) {
1629                 *mbskip_ptr = 1;
1630             } else{
1631                 *mbskip_ptr = 0; /* not skipped */
1632             }
1633         }
1634
1635         dct_linesize = linesize << s->interlaced_dct;
1636         dct_offset   = s->interlaced_dct ? linesize : linesize * block_size;
1637
1638         if(readable){
1639             dest_y=  s->dest[0];
1640             dest_cb= s->dest[1];
1641             dest_cr= s->dest[2];
1642         }else{
1643             dest_y = s->sc.b_scratchpad;
1644             dest_cb= s->sc.b_scratchpad+16*linesize;
1645             dest_cr= s->sc.b_scratchpad+32*linesize;
1646         }
1647
1648         if (!s->mb_intra) {
1649             /* motion handling */
1650             /* decoding or more than one mb_type (MC was already done otherwise) */
1651             if(!s->encoding){
1652
1653                 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
1654                     if (s->mv_dir & MV_DIR_FORWARD) {
1655                         ff_thread_await_progress(&s->last_picture_ptr->tf,
1656                                                  ff_mpv_lowest_referenced_row(s, 0),
1657                                                  0);
1658                     }
1659                     if (s->mv_dir & MV_DIR_BACKWARD) {
1660                         ff_thread_await_progress(&s->next_picture_ptr->tf,
1661                                                  ff_mpv_lowest_referenced_row(s, 1),
1662                                                  0);
1663                     }
1664                 }
1665
1666                 op_qpix= s->me.qpel_put;
1667                 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
1668                     op_pix = s->hdsp.put_pixels_tab;
1669                 }else{
1670                     op_pix = s->hdsp.put_no_rnd_pixels_tab;
1671                 }
1672                 if (s->mv_dir & MV_DIR_FORWARD) {
1673                     ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
1674                     op_pix = s->hdsp.avg_pixels_tab;
1675                     op_qpix= s->me.qpel_avg;
1676                 }
1677                 if (s->mv_dir & MV_DIR_BACKWARD) {
1678                     ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
1679                 }
1680             }
1681
1682             /* skip dequant / idct if we are really late ;) */
1683             if(s->avctx->skip_idct){
1684                 if(  (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
1685                    ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
1686                    || s->avctx->skip_idct >= AVDISCARD_ALL)
1687                     goto skip_idct;
1688             }
1689
1690             /* add dct residue */
1691             if(s->encoding || !(   s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
1692                                 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
1693                 add_dequant_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
1694                 add_dequant_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
1695                 add_dequant_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
1696                 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1697
1698                 if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
1699                     if (s->chroma_y_shift){
1700                         add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1701                         add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1702                     }else{
1703                         dct_linesize >>= 1;
1704                         dct_offset >>=1;
1705                         add_dequant_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
1706                         add_dequant_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
1707                         add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1708                         add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1709                     }
1710                 }
1711             } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
1712                 add_dct(s, block[0], 0, dest_y                          , dct_linesize);
1713                 add_dct(s, block[1], 1, dest_y              + block_size, dct_linesize);
1714                 add_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize);
1715                 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
1716
1717                 if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
1718                     if(s->chroma_y_shift){//Chroma420
1719                         add_dct(s, block[4], 4, dest_cb, uvlinesize);
1720                         add_dct(s, block[5], 5, dest_cr, uvlinesize);
1721                     }else{
1722                         //chroma422
1723                         dct_linesize = uvlinesize << s->interlaced_dct;
1724                         dct_offset   = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
1725
1726                         add_dct(s, block[4], 4, dest_cb, dct_linesize);
1727                         add_dct(s, block[5], 5, dest_cr, dct_linesize);
1728                         add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
1729                         add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
1730                         if(!s->chroma_x_shift){//Chroma444
1731                             add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
1732                             add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
1733                             add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
1734                             add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
1735                         }
1736                     }
1737                 }//fi gray
1738             }
1739             else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
1740                 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
1741             }
1742         } else {
1743             /* dct only in intra block */
1744             if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
1745                 put_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
1746                 put_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
1747                 put_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
1748                 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1749
1750                 if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
1751                     if(s->chroma_y_shift){
1752                         put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1753                         put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1754                     }else{
1755                         dct_offset >>=1;
1756                         dct_linesize >>=1;
1757                         put_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
1758                         put_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
1759                         put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1760                         put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1761                     }
1762                 }
1763             }else{
1764                 s->idsp.idct_put(dest_y,                           dct_linesize, block[0]);
1765                 s->idsp.idct_put(dest_y              + block_size, dct_linesize, block[1]);
1766                 s->idsp.idct_put(dest_y + dct_offset,              dct_linesize, block[2]);
1767                 s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
1768
1769                 if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
1770                     if(s->chroma_y_shift){
1771                         s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
1772                         s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
1773                     }else{
1774
1775                         dct_linesize = uvlinesize << s->interlaced_dct;
1776                         dct_offset   = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
1777
1778                         s->idsp.idct_put(dest_cb,              dct_linesize, block[4]);
1779                         s->idsp.idct_put(dest_cr,              dct_linesize, block[5]);
1780                         s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
1781                         s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
1782                         if(!s->chroma_x_shift){//Chroma444
1783                             s->idsp.idct_put(dest_cb + 8,              dct_linesize, block[8]);
1784                             s->idsp.idct_put(dest_cr + 8,              dct_linesize, block[9]);
1785                             s->idsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
1786                             s->idsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
1787                         }
1788                     }
1789                 }//gray
1790             }
1791         }
1792 skip_idct:
1793         if(!readable){
1794             s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y ,   linesize,16);
1795             s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
1796             s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
1797         }
1798     }
1799 }
1800
1801 void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
1802 {
1803 #if !CONFIG_SMALL
1804     if(s->out_format == FMT_MPEG1) {
1805         mpv_decode_mb_internal(s, block, 1);
1806     } else
1807 #endif
1808         mpv_decode_mb_internal(s, block, 0);
1809 }
1810
1811 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
1812 {
1813     ff_draw_horiz_band(s->avctx, s->current_picture.f,
1814                        s->last_picture.f, y, h, s->picture_structure,
1815                        s->first_field, s->low_delay);
1816 }
1817
1818 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
1819     const int linesize   = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
1820     const int uvlinesize = s->current_picture.f->linesize[1];
1821     const int mb_size= 4;
1822
1823     s->block_index[0]= s->b8_stride*(s->mb_y*2    ) - 2 + s->mb_x*2;
1824     s->block_index[1]= s->b8_stride*(s->mb_y*2    ) - 1 + s->mb_x*2;
1825     s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
1826     s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
1827     s->block_index[4]= s->mb_stride*(s->mb_y + 1)                + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
1828     s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
1829     //block_index is not used by mpeg2, so it is not affected by chroma_format
1830
1831     s->dest[0] = s->current_picture.f->data[0] + ((s->mb_x - 1) <<  mb_size);
1832     s->dest[1] = s->current_picture.f->data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
1833     s->dest[2] = s->current_picture.f->data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
1834
1835     if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
1836     {
1837         if(s->picture_structure==PICT_FRAME){
1838         s->dest[0] += s->mb_y *   linesize << mb_size;
1839         s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
1840         s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
1841         }else{
1842             s->dest[0] += (s->mb_y>>1) *   linesize << mb_size;
1843             s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
1844             s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
1845             assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
1846         }
1847     }
1848 }
1849
1850 /**
1851  * Permute an 8x8 block.
1852  * @param block the block which will be permuted according to the given permutation vector
1853  * @param permutation the permutation vector
1854  * @param last the last non zero coefficient in scantable order, used to speed the permutation up
1855  * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
1856  *                  (inverse) permutated to scantable order!
1857  */
1858 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
1859 {
1860     int i;
1861     int16_t temp[64];
1862
1863     if(last<=0) return;
1864     //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
1865
1866     for(i=0; i<=last; i++){
1867         const int j= scantable[i];
1868         temp[j]= block[j];
1869         block[j]=0;
1870     }
1871
1872     for(i=0; i<=last; i++){
1873         const int j= scantable[i];
1874         const int perm_j= permutation[j];
1875         block[perm_j]= temp[j];
1876     }
1877 }
1878
1879 void ff_mpeg_flush(AVCodecContext *avctx){
1880     int i;
1881     MpegEncContext *s = avctx->priv_data;
1882
1883     if (!s || !s->picture)
1884         return;
1885
1886     for (i = 0; i < MAX_PICTURE_COUNT; i++)
1887         ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1888     s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
1889
1890     ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1891     ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1892     ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1893
1894     s->mb_x= s->mb_y= 0;
1895
1896     s->parse_context.state= -1;
1897     s->parse_context.frame_start_found= 0;
1898     s->parse_context.overread= 0;
1899     s->parse_context.overread_index= 0;
1900     s->parse_context.index= 0;
1901     s->parse_context.last_index= 0;
1902     s->bitstream_buffer_size=0;
1903     s->pp_time=0;
1904 }
1905
1906 /**
1907  * set qscale and update qscale dependent variables.
1908  */
1909 void ff_set_qscale(MpegEncContext * s, int qscale)
1910 {
1911     if (qscale < 1)
1912         qscale = 1;
1913     else if (qscale > 31)
1914         qscale = 31;
1915
1916     s->qscale = qscale;
1917     s->chroma_qscale= s->chroma_qscale_table[qscale];
1918
1919     s->y_dc_scale= s->y_dc_scale_table[ qscale ];
1920     s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
1921 }
1922
1923 void ff_mpv_report_decode_progress(MpegEncContext *s)
1924 {
1925     if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
1926         ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
1927 }