mpegvideo_enc: add chroma/luma_elim_threshold private options.
[ffmpeg.git] / libavcodec / mpegvideo_enc.c
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of Libav.
9  *
10  * Libav is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * Libav is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with Libav; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29
30 #include "libavutil/intmath.h"
31 #include "libavutil/mathematics.h"
32 #include "libavutil/opt.h"
33 #include "avcodec.h"
34 #include "dsputil.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
37 #include "h263.h"
38 #include "mjpegenc.h"
39 #include "msmpeg4.h"
40 #include "faandct.h"
41 #include "thread.h"
42 #include "aandcttab.h"
43 #include "flv.h"
44 #include "mpeg4video.h"
45 #include "internal.h"
46 #include <limits.h>
47
48 //#undef NDEBUG
49 //#include <assert.h>
50
51 static int encode_picture(MpegEncContext *s, int picture_number);
52 static int dct_quantize_refine(MpegEncContext *s, DCTELEM *block, int16_t *weight, DCTELEM *orig, int n, int qscale);
53 static int sse_mb(MpegEncContext *s);
54 static void denoise_dct_c(MpegEncContext *s, DCTELEM *block);
55 static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
56
57 /* enable all paranoid tests for rounding, overflows, etc... */
58 //#define PARANOID
59
60 //#define DEBUG
61
62 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
63 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
64
65 const AVOption ff_mpv_generic_options[] = {
66     FF_MPV_COMMON_OPTS
67     { NULL },
68 };
69
70 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
71                        uint16_t (*qmat16)[2][64],
72                        const uint16_t *quant_matrix,
73                        int bias, int qmin, int qmax, int intra)
74 {
75     int qscale;
76     int shift = 0;
77
78     for (qscale = qmin; qscale <= qmax; qscale++) {
79         int i;
80         if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
81             dsp->fdct == ff_jpeg_fdct_islow_10
82 #ifdef FAAN_POSTSCALE
83             || dsp->fdct == ff_faandct
84 #endif
85             ) {
86             for (i = 0; i < 64; i++) {
87                 const int j = dsp->idct_permutation[i];
88                 /* 16 <= qscale * quant_matrix[i] <= 7905
89                  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
90                  *             19952 <=              x  <= 249205026
91                  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
92                  *           3444240 >= (1 << 36) / (x) >= 275 */
93
94                 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
95                                         (qscale * quant_matrix[j]));
96             }
97         } else if (dsp->fdct == ff_fdct_ifast
98 #ifndef FAAN_POSTSCALE
99                    || dsp->fdct == ff_faandct
100 #endif
101                    ) {
102             for (i = 0; i < 64; i++) {
103                 const int j = dsp->idct_permutation[i];
104                 /* 16 <= qscale * quant_matrix[i] <= 7905
105                  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
106                  *             19952 <=              x  <= 249205026
107                  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
108                  *           3444240 >= (1 << 36) / (x) >= 275 */
109
110                 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
111                                         (ff_aanscales[i] * qscale *
112                                          quant_matrix[j]));
113             }
114         } else {
115             for (i = 0; i < 64; i++) {
116                 const int j = dsp->idct_permutation[i];
117                 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
118                  * Assume x = qscale * quant_matrix[i]
119                  * So             16 <=              x  <= 7905
120                  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
121                  * so          32768 >= (1 << 19) / (x) >= 67 */
122                 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
123                                         (qscale * quant_matrix[j]));
124                 //qmat  [qscale][i] = (1 << QMAT_SHIFT_MMX) /
125                 //                    (qscale * quant_matrix[i]);
126                 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
127                                        (qscale * quant_matrix[j]);
128
129                 if (qmat16[qscale][0][i] == 0 ||
130                     qmat16[qscale][0][i] == 128 * 256)
131                     qmat16[qscale][0][i] = 128 * 256 - 1;
132                 qmat16[qscale][1][i] =
133                     ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
134                                 qmat16[qscale][0][i]);
135             }
136         }
137
138         for (i = intra; i < 64; i++) {
139             int64_t max = 8191;
140             if (dsp->fdct == ff_fdct_ifast
141 #ifndef FAAN_POSTSCALE
142                 || dsp->fdct == ff_faandct
143 #endif
144                ) {
145                 max = (8191LL * ff_aanscales[i]) >> 14;
146             }
147             while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
148                 shift++;
149             }
150         }
151     }
152     if (shift) {
153         av_log(NULL, AV_LOG_INFO,
154                "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
155                QMAT_SHIFT - shift);
156     }
157 }
158
159 static inline void update_qscale(MpegEncContext *s)
160 {
161     s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
162                 (FF_LAMBDA_SHIFT + 7);
163     s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
164
165     s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
166                  FF_LAMBDA_SHIFT;
167 }
168
169 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
170 {
171     int i;
172
173     if (matrix) {
174         put_bits(pb, 1, 1);
175         for (i = 0; i < 64; i++) {
176             put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
177         }
178     } else
179         put_bits(pb, 1, 0);
180 }
181
182 /**
183  * init s->current_picture.qscale_table from s->lambda_table
184  */
185 void ff_init_qscale_tab(MpegEncContext *s)
186 {
187     int8_t * const qscale_table = s->current_picture.f.qscale_table;
188     int i;
189
190     for (i = 0; i < s->mb_num; i++) {
191         unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
192         int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
193         qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
194                                                   s->avctx->qmax);
195     }
196 }
197
198 static void copy_picture_attributes(MpegEncContext *s,
199                                     AVFrame *dst,
200                                     AVFrame *src)
201 {
202     int i;
203
204     dst->pict_type              = src->pict_type;
205     dst->quality                = src->quality;
206     dst->coded_picture_number   = src->coded_picture_number;
207     dst->display_picture_number = src->display_picture_number;
208     //dst->reference              = src->reference;
209     dst->pts                    = src->pts;
210     dst->interlaced_frame       = src->interlaced_frame;
211     dst->top_field_first        = src->top_field_first;
212
213     if (s->avctx->me_threshold) {
214         if (!src->motion_val[0])
215             av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_val not set!\n");
216         if (!src->mb_type)
217             av_log(s->avctx, AV_LOG_ERROR, "AVFrame.mb_type not set!\n");
218         if (!src->ref_index[0])
219             av_log(s->avctx, AV_LOG_ERROR, "AVFrame.ref_index not set!\n");
220         if (src->motion_subsample_log2 != dst->motion_subsample_log2)
221             av_log(s->avctx, AV_LOG_ERROR,
222                    "AVFrame.motion_subsample_log2 doesn't match! (%d!=%d)\n",
223                    src->motion_subsample_log2, dst->motion_subsample_log2);
224
225         memcpy(dst->mb_type, src->mb_type,
226                s->mb_stride * s->mb_height * sizeof(dst->mb_type[0]));
227
228         for (i = 0; i < 2; i++) {
229             int stride = ((16 * s->mb_width ) >>
230                           src->motion_subsample_log2) + 1;
231             int height = ((16 * s->mb_height) >> src->motion_subsample_log2);
232
233             if (src->motion_val[i] &&
234                 src->motion_val[i] != dst->motion_val[i]) {
235                 memcpy(dst->motion_val[i], src->motion_val[i],
236                        2 * stride * height * sizeof(int16_t));
237             }
238             if (src->ref_index[i] && src->ref_index[i] != dst->ref_index[i]) {
239                 memcpy(dst->ref_index[i], src->ref_index[i],
240                        s->mb_stride * 4 * s->mb_height * sizeof(int8_t));
241             }
242         }
243     }
244 }
245
246 static void update_duplicate_context_after_me(MpegEncContext *dst,
247                                               MpegEncContext *src)
248 {
249 #define COPY(a) dst->a= src->a
250     COPY(pict_type);
251     COPY(current_picture);
252     COPY(f_code);
253     COPY(b_code);
254     COPY(qscale);
255     COPY(lambda);
256     COPY(lambda2);
257     COPY(picture_in_gop_number);
258     COPY(gop_picture_number);
259     COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
260     COPY(progressive_frame);    // FIXME don't set in encode_header
261     COPY(partitioned_frame);    // FIXME don't set in encode_header
262 #undef COPY
263 }
264
265 /**
266  * Set the given MpegEncContext to defaults for encoding.
267  * the changed fields will not depend upon the prior state of the MpegEncContext.
268  */
269 static void MPV_encode_defaults(MpegEncContext *s)
270 {
271     int i;
272     ff_MPV_common_defaults(s);
273
274     for (i = -16; i < 16; i++) {
275         default_fcode_tab[i + MAX_MV] = 1;
276     }
277     s->me.mv_penalty = default_mv_penalty;
278     s->fcode_tab     = default_fcode_tab;
279 }
280
281 /* init video encoder */
282 av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
283 {
284     MpegEncContext *s = avctx->priv_data;
285     int i;
286     int chroma_h_shift, chroma_v_shift;
287
288     MPV_encode_defaults(s);
289
290     switch (avctx->codec_id) {
291     case CODEC_ID_MPEG2VIDEO:
292         if (avctx->pix_fmt != PIX_FMT_YUV420P &&
293             avctx->pix_fmt != PIX_FMT_YUV422P) {
294             av_log(avctx, AV_LOG_ERROR,
295                    "only YUV420 and YUV422 are supported\n");
296             return -1;
297         }
298         break;
299     case CODEC_ID_LJPEG:
300         if (avctx->pix_fmt != PIX_FMT_YUVJ420P &&
301             avctx->pix_fmt != PIX_FMT_YUVJ422P &&
302             avctx->pix_fmt != PIX_FMT_YUVJ444P &&
303             avctx->pix_fmt != PIX_FMT_BGRA     &&
304             ((avctx->pix_fmt != PIX_FMT_YUV420P &&
305               avctx->pix_fmt != PIX_FMT_YUV422P &&
306               avctx->pix_fmt != PIX_FMT_YUV444P) ||
307              avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
308             av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
309             return -1;
310         }
311         break;
312     case CODEC_ID_MJPEG:
313         if (avctx->pix_fmt != PIX_FMT_YUVJ420P &&
314             avctx->pix_fmt != PIX_FMT_YUVJ422P &&
315             ((avctx->pix_fmt != PIX_FMT_YUV420P &&
316               avctx->pix_fmt != PIX_FMT_YUV422P) ||
317              avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
318             av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
319             return -1;
320         }
321         break;
322     default:
323         if (avctx->pix_fmt != PIX_FMT_YUV420P) {
324             av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
325             return -1;
326         }
327     }
328
329     switch (avctx->pix_fmt) {
330     case PIX_FMT_YUVJ422P:
331     case PIX_FMT_YUV422P:
332         s->chroma_format = CHROMA_422;
333         break;
334     case PIX_FMT_YUVJ420P:
335     case PIX_FMT_YUV420P:
336     default:
337         s->chroma_format = CHROMA_420;
338         break;
339     }
340
341     s->bit_rate = avctx->bit_rate;
342     s->width    = avctx->width;
343     s->height   = avctx->height;
344     if (avctx->gop_size > 600 &&
345         avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
346         av_log(avctx, AV_LOG_ERROR,
347                "Warning keyframe interval too large! reducing it ...\n");
348         avctx->gop_size = 600;
349     }
350     s->gop_size     = avctx->gop_size;
351     s->avctx        = avctx;
352     s->flags        = avctx->flags;
353     s->flags2       = avctx->flags2;
354     s->max_b_frames = avctx->max_b_frames;
355     s->codec_id     = avctx->codec->id;
356 #if FF_API_MPV_GLOBAL_OPTS
357     if (avctx->luma_elim_threshold)
358         s->luma_elim_threshold   = avctx->luma_elim_threshold;
359     if (avctx->chroma_elim_threshold)
360         s->chroma_elim_threshold = avctx->chroma_elim_threshold;
361 #endif
362     s->strict_std_compliance = avctx->strict_std_compliance;
363     s->quarter_sample     = (avctx->flags & CODEC_FLAG_QPEL) != 0;
364     s->mpeg_quant         = avctx->mpeg_quant;
365     s->rtp_mode           = !!avctx->rtp_payload_size;
366     s->intra_dc_precision = avctx->intra_dc_precision;
367     s->user_specified_pts = AV_NOPTS_VALUE;
368
369     if (s->gop_size <= 1) {
370         s->intra_only = 1;
371         s->gop_size   = 12;
372     } else {
373         s->intra_only = 0;
374     }
375
376     s->me_method = avctx->me_method;
377
378     /* Fixed QSCALE */
379     s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
380
381 #if FF_API_MPV_GLOBAL_OPTS
382     if (s->flags & CODEC_FLAG_QP_RD)
383         s->mpv_flags |= FF_MPV_FLAG_QP_RD;
384 #endif
385
386     s->adaptive_quant = (s->avctx->lumi_masking ||
387                          s->avctx->dark_masking ||
388                          s->avctx->temporal_cplx_masking ||
389                          s->avctx->spatial_cplx_masking  ||
390                          s->avctx->p_masking      ||
391                          s->avctx->border_masking ||
392                          (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
393                         !s->fixed_qscale;
394
395     s->loop_filter      = !!(s->flags & CODEC_FLAG_LOOP_FILTER);
396
397     if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
398         av_log(avctx, AV_LOG_ERROR,
399                "a vbv buffer size is needed, "
400                "for encoding with a maximum bitrate\n");
401         return -1;
402     }
403
404     if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
405         av_log(avctx, AV_LOG_INFO,
406                "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
407     }
408
409     if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
410         av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
411         return -1;
412     }
413
414     if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
415         av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
416         return -1;
417     }
418
419     if (avctx->rc_max_rate &&
420         avctx->rc_max_rate == avctx->bit_rate &&
421         avctx->rc_max_rate != avctx->rc_min_rate) {
422         av_log(avctx, AV_LOG_INFO,
423                "impossible bitrate constraints, this will fail\n");
424     }
425
426     if (avctx->rc_buffer_size &&
427         avctx->bit_rate * (int64_t)avctx->time_base.num >
428             avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
429         av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
430         return -1;
431     }
432
433     if (!s->fixed_qscale &&
434         avctx->bit_rate * av_q2d(avctx->time_base) >
435             avctx->bit_rate_tolerance) {
436         av_log(avctx, AV_LOG_ERROR,
437                "bitrate tolerance too small for bitrate\n");
438         return -1;
439     }
440
441     if (s->avctx->rc_max_rate &&
442         s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
443         (s->codec_id == CODEC_ID_MPEG1VIDEO ||
444          s->codec_id == CODEC_ID_MPEG2VIDEO) &&
445         90000LL * (avctx->rc_buffer_size - 1) >
446             s->avctx->rc_max_rate * 0xFFFFLL) {
447         av_log(avctx, AV_LOG_INFO,
448                "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
449                "specified vbv buffer is too large for the given bitrate!\n");
450     }
451
452     if ((s->flags & CODEC_FLAG_4MV)  && s->codec_id != CODEC_ID_MPEG4 &&
453         s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P &&
454         s->codec_id != CODEC_ID_FLV1) {
455         av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
456         return -1;
457     }
458
459     if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
460         av_log(avctx, AV_LOG_ERROR,
461                "OBMC is only supported with simple mb decision\n");
462         return -1;
463     }
464
465     if (s->quarter_sample && s->codec_id != CODEC_ID_MPEG4) {
466         av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
467         return -1;
468     }
469
470     if (s->max_b_frames                    &&
471         s->codec_id != CODEC_ID_MPEG4      &&
472         s->codec_id != CODEC_ID_MPEG1VIDEO &&
473         s->codec_id != CODEC_ID_MPEG2VIDEO) {
474         av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
475         return -1;
476     }
477
478     if ((s->codec_id == CODEC_ID_MPEG4 ||
479          s->codec_id == CODEC_ID_H263  ||
480          s->codec_id == CODEC_ID_H263P) &&
481         (avctx->sample_aspect_ratio.num > 255 ||
482          avctx->sample_aspect_ratio.den > 255)) {
483         av_log(avctx, AV_LOG_ERROR,
484                "Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
485                avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
486         return -1;
487     }
488
489     if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) &&
490         s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG2VIDEO) {
491         av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
492         return -1;
493     }
494
495     // FIXME mpeg2 uses that too
496     if (s->mpeg_quant && s->codec_id != CODEC_ID_MPEG4) {
497         av_log(avctx, AV_LOG_ERROR,
498                "mpeg2 style quantization not supported by codec\n");
499         return -1;
500     }
501
502 #if FF_API_MPV_GLOBAL_OPTS
503     if (s->flags & CODEC_FLAG_CBP_RD)
504         s->mpv_flags |= FF_MPV_FLAG_CBP_RD;
505 #endif
506
507     if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
508         av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
509         return -1;
510     }
511
512     if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
513         s->avctx->mb_decision != FF_MB_DECISION_RD) {
514         av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
515         return -1;
516     }
517
518     if (s->avctx->scenechange_threshold < 1000000000 &&
519         (s->flags & CODEC_FLAG_CLOSED_GOP)) {
520         av_log(avctx, AV_LOG_ERROR,
521                "closed gop with scene change detection are not supported yet, "
522                "set threshold to 1000000000\n");
523         return -1;
524     }
525
526     if (s->flags & CODEC_FLAG_LOW_DELAY) {
527         if (s->codec_id != CODEC_ID_MPEG2VIDEO) {
528             av_log(avctx, AV_LOG_ERROR,
529                   "low delay forcing is only available for mpeg2\n");
530             return -1;
531         }
532         if (s->max_b_frames != 0) {
533             av_log(avctx, AV_LOG_ERROR,
534                    "b frames cannot be used with low delay\n");
535             return -1;
536         }
537     }
538
539     if (s->q_scale_type == 1) {
540         if (avctx->qmax > 12) {
541             av_log(avctx, AV_LOG_ERROR,
542                    "non linear quant only supports qmax <= 12 currently\n");
543             return -1;
544         }
545     }
546
547     if (s->avctx->thread_count > 1         &&
548         s->codec_id != CODEC_ID_MPEG4      &&
549         s->codec_id != CODEC_ID_MPEG1VIDEO &&
550         s->codec_id != CODEC_ID_MPEG2VIDEO &&
551         (s->codec_id != CODEC_ID_H263P)) {
552         av_log(avctx, AV_LOG_ERROR,
553                "multi threaded encoding not supported by codec\n");
554         return -1;
555     }
556
557     if (s->avctx->thread_count < 1) {
558         av_log(avctx, AV_LOG_ERROR,
559                "automatic thread number detection not supported by codec,"
560                "patch welcome\n");
561         return -1;
562     }
563
564     if (s->avctx->thread_count > 1)
565         s->rtp_mode = 1;
566
567     if (!avctx->time_base.den || !avctx->time_base.num) {
568         av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
569         return -1;
570     }
571
572     i = (INT_MAX / 2 + 128) >> 8;
573     if (avctx->me_threshold >= i) {
574         av_log(avctx, AV_LOG_ERROR, "me_threshold too large, max is %d\n",
575                i - 1);
576         return -1;
577     }
578     if (avctx->mb_threshold >= i) {
579         av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
580                i - 1);
581         return -1;
582     }
583
584     if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
585         av_log(avctx, AV_LOG_INFO,
586                "notice: b_frame_strategy only affects the first pass\n");
587         avctx->b_frame_strategy = 0;
588     }
589
590     i = av_gcd(avctx->time_base.den, avctx->time_base.num);
591     if (i > 1) {
592         av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
593         avctx->time_base.den /= i;
594         avctx->time_base.num /= i;
595         //return -1;
596     }
597
598     if (s->mpeg_quant || s->codec_id == CODEC_ID_MPEG1VIDEO ||
599         s->codec_id == CODEC_ID_MPEG2VIDEO || s->codec_id == CODEC_ID_MJPEG) {
600         // (a + x * 3 / 8) / x
601         s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
602         s->inter_quant_bias = 0;
603     } else {
604         s->intra_quant_bias = 0;
605         // (a - x / 4) / x
606         s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
607     }
608
609     if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
610         s->intra_quant_bias = avctx->intra_quant_bias;
611     if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
612         s->inter_quant_bias = avctx->inter_quant_bias;
613
614     avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift,
615                                   &chroma_v_shift);
616
617     if (avctx->codec_id == CODEC_ID_MPEG4 &&
618         s->avctx->time_base.den > (1 << 16) - 1) {
619         av_log(avctx, AV_LOG_ERROR,
620                "timebase %d/%d not supported by MPEG 4 standard, "
621                "the maximum admitted value for the timebase denominator "
622                "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
623                (1 << 16) - 1);
624         return -1;
625     }
626     s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
627
628 #if FF_API_MPV_GLOBAL_OPTS
629     if (avctx->flags2 & CODEC_FLAG2_SKIP_RD)
630         s->mpv_flags |= FF_MPV_FLAG_SKIP_RD;
631     if (avctx->flags2 & CODEC_FLAG2_STRICT_GOP)
632         s->mpv_flags |= FF_MPV_FLAG_STRICT_GOP;
633 #endif
634
635     switch (avctx->codec->id) {
636     case CODEC_ID_MPEG1VIDEO:
637         s->out_format = FMT_MPEG1;
638         s->low_delay  = !!(s->flags & CODEC_FLAG_LOW_DELAY);
639         avctx->delay  = s->low_delay ? 0 : (s->max_b_frames + 1);
640         break;
641     case CODEC_ID_MPEG2VIDEO:
642         s->out_format = FMT_MPEG1;
643         s->low_delay  = !!(s->flags & CODEC_FLAG_LOW_DELAY);
644         avctx->delay  = s->low_delay ? 0 : (s->max_b_frames + 1);
645         s->rtp_mode   = 1;
646         break;
647     case CODEC_ID_LJPEG:
648     case CODEC_ID_MJPEG:
649         s->out_format = FMT_MJPEG;
650         s->intra_only = 1; /* force intra only for jpeg */
651         if (avctx->codec->id == CODEC_ID_LJPEG &&
652             avctx->pix_fmt   == PIX_FMT_BGRA) {
653             s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
654             s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
655             s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
656         } else {
657             s->mjpeg_vsample[0] = 2;
658             s->mjpeg_vsample[1] = 2 >> chroma_v_shift;
659             s->mjpeg_vsample[2] = 2 >> chroma_v_shift;
660             s->mjpeg_hsample[0] = 2;
661             s->mjpeg_hsample[1] = 2 >> chroma_h_shift;
662             s->mjpeg_hsample[2] = 2 >> chroma_h_shift;
663         }
664         if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) ||
665             ff_mjpeg_encode_init(s) < 0)
666             return -1;
667         avctx->delay = 0;
668         s->low_delay = 1;
669         break;
670     case CODEC_ID_H261:
671         if (!CONFIG_H261_ENCODER)
672             return -1;
673         if (ff_h261_get_picture_format(s->width, s->height) < 0) {
674             av_log(avctx, AV_LOG_ERROR,
675                    "The specified picture size of %dx%d is not valid for the "
676                    "H.261 codec.\nValid sizes are 176x144, 352x288\n",
677                     s->width, s->height);
678             return -1;
679         }
680         s->out_format = FMT_H261;
681         avctx->delay  = 0;
682         s->low_delay  = 1;
683         break;
684     case CODEC_ID_H263:
685         if (!CONFIG_H263_ENCODER)
686         return -1;
687         if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
688                              s->width, s->height) == 8) {
689             av_log(avctx, AV_LOG_INFO,
690                    "The specified picture size of %dx%d is not valid for "
691                    "the H.263 codec.\nValid sizes are 128x96, 176x144, "
692                    "352x288, 704x576, and 1408x1152."
693                    "Try H.263+.\n", s->width, s->height);
694             return -1;
695         }
696         s->out_format = FMT_H263;
697         avctx->delay  = 0;
698         s->low_delay  = 1;
699         break;
700     case CODEC_ID_H263P:
701         s->out_format = FMT_H263;
702         s->h263_plus  = 1;
703         /* Fx */
704         s->h263_aic        = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
705         s->modified_quant  = s->h263_aic;
706         s->loop_filter     = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
707         s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
708
709         /* /Fx */
710         /* These are just to be sure */
711         avctx->delay = 0;
712         s->low_delay = 1;
713         break;
714     case CODEC_ID_FLV1:
715         s->out_format      = FMT_H263;
716         s->h263_flv        = 2; /* format = 1; 11-bit codes */
717         s->unrestricted_mv = 1;
718         s->rtp_mode  = 0; /* don't allow GOB */
719         avctx->delay = 0;
720         s->low_delay = 1;
721         break;
722     case CODEC_ID_RV10:
723         s->out_format = FMT_H263;
724         avctx->delay  = 0;
725         s->low_delay  = 1;
726         break;
727     case CODEC_ID_RV20:
728         s->out_format      = FMT_H263;
729         avctx->delay       = 0;
730         s->low_delay       = 1;
731         s->modified_quant  = 1;
732         s->h263_aic        = 1;
733         s->h263_plus       = 1;
734         s->loop_filter     = 1;
735         s->unrestricted_mv = 0;
736         break;
737     case CODEC_ID_MPEG4:
738         s->out_format      = FMT_H263;
739         s->h263_pred       = 1;
740         s->unrestricted_mv = 1;
741         s->low_delay       = s->max_b_frames ? 0 : 1;
742         avctx->delay       = s->low_delay ? 0 : (s->max_b_frames + 1);
743         break;
744     case CODEC_ID_MSMPEG4V2:
745         s->out_format      = FMT_H263;
746         s->h263_pred       = 1;
747         s->unrestricted_mv = 1;
748         s->msmpeg4_version = 2;
749         avctx->delay       = 0;
750         s->low_delay       = 1;
751         break;
752     case CODEC_ID_MSMPEG4V3:
753         s->out_format        = FMT_H263;
754         s->h263_pred         = 1;
755         s->unrestricted_mv   = 1;
756         s->msmpeg4_version   = 3;
757         s->flipflop_rounding = 1;
758         avctx->delay         = 0;
759         s->low_delay         = 1;
760         break;
761     case CODEC_ID_WMV1:
762         s->out_format        = FMT_H263;
763         s->h263_pred         = 1;
764         s->unrestricted_mv   = 1;
765         s->msmpeg4_version   = 4;
766         s->flipflop_rounding = 1;
767         avctx->delay         = 0;
768         s->low_delay         = 1;
769         break;
770     case CODEC_ID_WMV2:
771         s->out_format        = FMT_H263;
772         s->h263_pred         = 1;
773         s->unrestricted_mv   = 1;
774         s->msmpeg4_version   = 5;
775         s->flipflop_rounding = 1;
776         avctx->delay         = 0;
777         s->low_delay         = 1;
778         break;
779     default:
780         return -1;
781     }
782
783     avctx->has_b_frames = !s->low_delay;
784
785     s->encoding = 1;
786
787     s->progressive_frame    =
788     s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
789                                                 CODEC_FLAG_INTERLACED_ME) ||
790                                 s->alternate_scan);
791
792     /* init */
793     if (ff_MPV_common_init(s) < 0)
794         return -1;
795
796     if (!s->dct_quantize)
797         s->dct_quantize = ff_dct_quantize_c;
798     if (!s->denoise_dct)
799         s->denoise_dct  = denoise_dct_c;
800     s->fast_dct_quantize = s->dct_quantize;
801     if (avctx->trellis)
802         s->dct_quantize  = dct_quantize_trellis_c;
803
804     if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
805         s->chroma_qscale_table = ff_h263_chroma_qscale_table;
806
807     s->quant_precision = 5;
808
809     ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
810     ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
811
812     if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
813         ff_h261_encode_init(s);
814     if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
815         ff_h263_encode_init(s);
816     if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
817         ff_msmpeg4_encode_init(s);
818     if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
819         && s->out_format == FMT_MPEG1)
820         ff_mpeg1_encode_init(s);
821
822     /* init q matrix */
823     for (i = 0; i < 64; i++) {
824         int j = s->dsp.idct_permutation[i];
825         if (CONFIG_MPEG4_ENCODER && s->codec_id == CODEC_ID_MPEG4 &&
826             s->mpeg_quant) {
827             s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
828             s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
829         } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
830             s->intra_matrix[j] =
831             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
832         } else {
833             /* mpeg1/2 */
834             s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
835             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
836         }
837         if (s->avctx->intra_matrix)
838             s->intra_matrix[j] = s->avctx->intra_matrix[i];
839         if (s->avctx->inter_matrix)
840             s->inter_matrix[j] = s->avctx->inter_matrix[i];
841     }
842
843     /* precompute matrix */
844     /* for mjpeg, we do include qscale in the matrix */
845     if (s->out_format != FMT_MJPEG) {
846         ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
847                           s->intra_matrix, s->intra_quant_bias, avctx->qmin,
848                           31, 1);
849         ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
850                           s->inter_matrix, s->inter_quant_bias, avctx->qmin,
851                           31, 0);
852     }
853
854     if (ff_rate_control_init(s) < 0)
855         return -1;
856
857     return 0;
858 }
859
860 av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
861 {
862     MpegEncContext *s = avctx->priv_data;
863
864     ff_rate_control_uninit(s);
865
866     ff_MPV_common_end(s);
867     if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
868         s->out_format == FMT_MJPEG)
869         ff_mjpeg_encode_close(s);
870
871     av_freep(&avctx->extradata);
872
873     return 0;
874 }
875
876 static int get_sae(uint8_t *src, int ref, int stride)
877 {
878     int x,y;
879     int acc = 0;
880
881     for (y = 0; y < 16; y++) {
882         for (x = 0; x < 16; x++) {
883             acc += FFABS(src[x + y * stride] - ref);
884         }
885     }
886
887     return acc;
888 }
889
890 static int get_intra_count(MpegEncContext *s, uint8_t *src,
891                            uint8_t *ref, int stride)
892 {
893     int x, y, w, h;
894     int acc = 0;
895
896     w = s->width  & ~15;
897     h = s->height & ~15;
898
899     for (y = 0; y < h; y += 16) {
900         for (x = 0; x < w; x += 16) {
901             int offset = x + y * stride;
902             int sad  = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
903                                      16);
904             int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
905             int sae  = get_sae(src + offset, mean, stride);
906
907             acc += sae + 500 < sad;
908         }
909     }
910     return acc;
911 }
912
913
914 static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg)
915 {
916     AVFrame *pic = NULL;
917     int64_t pts;
918     int i;
919     const int encoding_delay = s->max_b_frames ? s->max_b_frames :
920                                                  (s->low_delay ? 0 : 1);
921     int direct = 1;
922
923     if (pic_arg) {
924         pts = pic_arg->pts;
925         pic_arg->display_picture_number = s->input_picture_number++;
926
927         if (pts != AV_NOPTS_VALUE) {
928             if (s->user_specified_pts != AV_NOPTS_VALUE) {
929                 int64_t time = pts;
930                 int64_t last = s->user_specified_pts;
931
932                 if (time <= last) {
933                     av_log(s->avctx, AV_LOG_ERROR,
934                            "Error, Invalid timestamp=%"PRId64", "
935                            "last=%"PRId64"\n", pts, s->user_specified_pts);
936                     return -1;
937                 }
938
939                 if (!s->low_delay && pic_arg->display_picture_number == 1)
940                     s->dts_delta = time - last;
941             }
942             s->user_specified_pts = pts;
943         } else {
944             if (s->user_specified_pts != AV_NOPTS_VALUE) {
945                 s->user_specified_pts =
946                 pts = s->user_specified_pts + 1;
947                 av_log(s->avctx, AV_LOG_INFO,
948                        "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
949                        pts);
950             } else {
951                 pts = pic_arg->display_picture_number;
952             }
953         }
954     }
955
956   if (pic_arg) {
957     if (encoding_delay && !(s->flags & CODEC_FLAG_INPUT_PRESERVED))
958         direct = 0;
959     if (pic_arg->linesize[0] != s->linesize)
960         direct = 0;
961     if (pic_arg->linesize[1] != s->uvlinesize)
962         direct = 0;
963     if (pic_arg->linesize[2] != s->uvlinesize)
964         direct = 0;
965
966     //av_log(AV_LOG_DEBUG, "%d %d %d %d\n",pic_arg->linesize[0],
967     //       pic_arg->linesize[1], s->linesize, s->uvlinesize);
968
969     if (direct) {
970         i = ff_find_unused_picture(s, 1);
971         if (i < 0)
972             return i;
973
974         pic = (AVFrame *) &s->picture[i];
975         pic->reference = 3;
976
977         for (i = 0; i < 4; i++) {
978             pic->data[i]     = pic_arg->data[i];
979             pic->linesize[i] = pic_arg->linesize[i];
980         }
981         if (ff_alloc_picture(s, (Picture *) pic, 1) < 0) {
982             return -1;
983         }
984     } else {
985         i = ff_find_unused_picture(s, 0);
986         if (i < 0)
987             return i;
988
989         pic = (AVFrame *) &s->picture[i];
990         pic->reference = 3;
991
992         if (ff_alloc_picture(s, (Picture *) pic, 0) < 0) {
993             return -1;
994         }
995
996         if (pic->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
997             pic->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
998             pic->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
999             // empty
1000         } else {
1001             int h_chroma_shift, v_chroma_shift;
1002             avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift,
1003                                           &v_chroma_shift);
1004
1005             for (i = 0; i < 3; i++) {
1006                 int src_stride = pic_arg->linesize[i];
1007                 int dst_stride = i ? s->uvlinesize : s->linesize;
1008                 int h_shift = i ? h_chroma_shift : 0;
1009                 int v_shift = i ? v_chroma_shift : 0;
1010                 int w = s->width  >> h_shift;
1011                 int h = s->height >> v_shift;
1012                 uint8_t *src = pic_arg->data[i];
1013                 uint8_t *dst = pic->data[i];
1014
1015                 if (!s->avctx->rc_buffer_size)
1016                     dst += INPLACE_OFFSET;
1017
1018                 if (src_stride == dst_stride)
1019                     memcpy(dst, src, src_stride * h);
1020                 else {
1021                     while (h--) {
1022                         memcpy(dst, src, w);
1023                         dst += dst_stride;
1024                         src += src_stride;
1025                     }
1026                 }
1027             }
1028         }
1029     }
1030     copy_picture_attributes(s, pic, pic_arg);
1031     pic->pts = pts; // we set this here to avoid modifiying pic_arg
1032   }
1033
1034     /* shift buffer entries */
1035     for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1036         s->input_picture[i - 1] = s->input_picture[i];
1037
1038     s->input_picture[encoding_delay] = (Picture*) pic;
1039
1040     return 0;
1041 }
1042
1043 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1044 {
1045     int x, y, plane;
1046     int score = 0;
1047     int64_t score64 = 0;
1048
1049     for (plane = 0; plane < 3; plane++) {
1050         const int stride = p->f.linesize[plane];
1051         const int bw = plane ? 1 : 2;
1052         for (y = 0; y < s->mb_height * bw; y++) {
1053             for (x = 0; x < s->mb_width * bw; x++) {
1054                 int off = p->f.type == FF_BUFFER_TYPE_SHARED ? 0 : 16;
1055                 uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
1056                 uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
1057                 int v   = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1058
1059                 switch (s->avctx->frame_skip_exp) {
1060                 case 0: score    =  FFMAX(score, v);          break;
1061                 case 1: score   += FFABS(v);                  break;
1062                 case 2: score   += v * v;                     break;
1063                 case 3: score64 += FFABS(v * v * (int64_t)v); break;
1064                 case 4: score64 += v * v * (int64_t)(v * v);  break;
1065                 }
1066             }
1067         }
1068     }
1069
1070     if (score)
1071         score64 = score;
1072
1073     if (score64 < s->avctx->frame_skip_threshold)
1074         return 1;
1075     if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1076         return 1;
1077     return 0;
1078 }
1079
1080 static int estimate_best_b_count(MpegEncContext *s)
1081 {
1082     AVCodec *codec    = avcodec_find_encoder(s->avctx->codec_id);
1083     AVCodecContext *c = avcodec_alloc_context3(NULL);
1084     AVFrame input[FF_MAX_B_FRAMES + 2];
1085     const int scale = s->avctx->brd_scale;
1086     int i, j, out_size, p_lambda, b_lambda, lambda2;
1087     int outbuf_size  = s->width * s->height; // FIXME
1088     uint8_t *outbuf  = av_malloc(outbuf_size);
1089     int64_t best_rd  = INT64_MAX;
1090     int best_b_count = -1;
1091
1092     assert(scale >= 0 && scale <= 3);
1093
1094     //emms_c();
1095     //s->next_picture_ptr->quality;
1096     p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1097     //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1098     b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1099     if (!b_lambda) // FIXME we should do this somewhere else
1100         b_lambda = p_lambda;
1101     lambda2  = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1102                FF_LAMBDA_SHIFT;
1103
1104     c->width        = s->width  >> scale;
1105     c->height       = s->height >> scale;
1106     c->flags        = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR |
1107                       CODEC_FLAG_INPUT_PRESERVED /*| CODEC_FLAG_EMU_EDGE*/;
1108     c->flags       |= s->avctx->flags & CODEC_FLAG_QPEL;
1109     c->mb_decision  = s->avctx->mb_decision;
1110     c->me_cmp       = s->avctx->me_cmp;
1111     c->mb_cmp       = s->avctx->mb_cmp;
1112     c->me_sub_cmp   = s->avctx->me_sub_cmp;
1113     c->pix_fmt      = PIX_FMT_YUV420P;
1114     c->time_base    = s->avctx->time_base;
1115     c->max_b_frames = s->max_b_frames;
1116
1117     if (avcodec_open2(c, codec, NULL) < 0)
1118         return -1;
1119
1120     for (i = 0; i < s->max_b_frames + 2; i++) {
1121         int ysize = c->width * c->height;
1122         int csize = (c->width / 2) * (c->height / 2);
1123         Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1124                                                 s->next_picture_ptr;
1125
1126         avcodec_get_frame_defaults(&input[i]);
1127         input[i].data[0]     = av_malloc(ysize + 2 * csize);
1128         input[i].data[1]     = input[i].data[0] + ysize;
1129         input[i].data[2]     = input[i].data[1] + csize;
1130         input[i].linesize[0] = c->width;
1131         input[i].linesize[1] =
1132         input[i].linesize[2] = c->width / 2;
1133
1134         if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1135             pre_input = *pre_input_ptr;
1136
1137             if (pre_input.f.type != FF_BUFFER_TYPE_SHARED && i) {
1138                 pre_input.f.data[0] += INPLACE_OFFSET;
1139                 pre_input.f.data[1] += INPLACE_OFFSET;
1140                 pre_input.f.data[2] += INPLACE_OFFSET;
1141             }
1142
1143             s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0],
1144                                  pre_input.f.data[0], pre_input.f.linesize[0],
1145                                  c->width,      c->height);
1146             s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1],
1147                                  pre_input.f.data[1], pre_input.f.linesize[1],
1148                                  c->width >> 1, c->height >> 1);
1149             s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2],
1150                                  pre_input.f.data[2], pre_input.f.linesize[2],
1151                                  c->width >> 1, c->height >> 1);
1152         }
1153     }
1154
1155     for (j = 0; j < s->max_b_frames + 1; j++) {
1156         int64_t rd = 0;
1157
1158         if (!s->input_picture[j])
1159             break;
1160
1161         c->error[0] = c->error[1] = c->error[2] = 0;
1162
1163         input[0].pict_type = AV_PICTURE_TYPE_I;
1164         input[0].quality   = 1 * FF_QP2LAMBDA;
1165         out_size           = avcodec_encode_video(c, outbuf,
1166                                                   outbuf_size, &input[0]);
1167         //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1168
1169         for (i = 0; i < s->max_b_frames + 1; i++) {
1170             int is_p = i % (j + 1) == j || i == s->max_b_frames;
1171
1172             input[i + 1].pict_type = is_p ?
1173                                      AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1174             input[i + 1].quality   = is_p ? p_lambda : b_lambda;
1175             out_size = avcodec_encode_video(c, outbuf, outbuf_size,
1176                                             &input[i + 1]);
1177             rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1178         }
1179
1180         /* get the delayed frames */
1181         while (out_size) {
1182             out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
1183             rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1184         }
1185
1186         rd += c->error[0] + c->error[1] + c->error[2];
1187
1188         if (rd < best_rd) {
1189             best_rd = rd;
1190             best_b_count = j;
1191         }
1192     }
1193
1194     av_freep(&outbuf);
1195     avcodec_close(c);
1196     av_freep(&c);
1197
1198     for (i = 0; i < s->max_b_frames + 2; i++) {
1199         av_freep(&input[i].data[0]);
1200     }
1201
1202     return best_b_count;
1203 }
1204
1205 static int select_input_picture(MpegEncContext *s)
1206 {
1207     int i;
1208
1209     for (i = 1; i < MAX_PICTURE_COUNT; i++)
1210         s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1211     s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1212
1213     /* set next picture type & ordering */
1214     if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
1215         if (/*s->picture_in_gop_number >= s->gop_size ||*/
1216             s->next_picture_ptr == NULL || s->intra_only) {
1217             s->reordered_input_picture[0] = s->input_picture[0];
1218             s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I;
1219             s->reordered_input_picture[0]->f.coded_picture_number =
1220                 s->coded_picture_number++;
1221         } else {
1222             int b_frames;
1223
1224             if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1225                 if (s->picture_in_gop_number < s->gop_size &&
1226                     skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1227                     // FIXME check that te gop check above is +-1 correct
1228                     //av_log(NULL, AV_LOG_DEBUG, "skip %p %"PRId64"\n",
1229                     //       s->input_picture[0]->f.data[0],
1230                     //       s->input_picture[0]->pts);
1231
1232                     if (s->input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED) {
1233                         for (i = 0; i < 4; i++)
1234                             s->input_picture[0]->f.data[i] = NULL;
1235                         s->input_picture[0]->f.type = 0;
1236                     } else {
1237                         assert(s->input_picture[0]->f.type == FF_BUFFER_TYPE_USER ||
1238                                s->input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL);
1239
1240                         s->avctx->release_buffer(s->avctx,
1241                                                  (AVFrame *) s->input_picture[0]);
1242                     }
1243
1244                     emms_c();
1245                     ff_vbv_update(s, 0);
1246
1247                     goto no_output_pic;
1248                 }
1249             }
1250
1251             if (s->flags & CODEC_FLAG_PASS2) {
1252                 for (i = 0; i < s->max_b_frames + 1; i++) {
1253                     int pict_num = s->input_picture[0]->f.display_picture_number + i;
1254
1255                     if (pict_num >= s->rc_context.num_entries)
1256                         break;
1257                     if (!s->input_picture[i]) {
1258                         s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1259                         break;
1260                     }
1261
1262                     s->input_picture[i]->f.pict_type =
1263                         s->rc_context.entry[pict_num].new_pict_type;
1264                 }
1265             }
1266
1267             if (s->avctx->b_frame_strategy == 0) {
1268                 b_frames = s->max_b_frames;
1269                 while (b_frames && !s->input_picture[b_frames])
1270                     b_frames--;
1271             } else if (s->avctx->b_frame_strategy == 1) {
1272                 for (i = 1; i < s->max_b_frames + 1; i++) {
1273                     if (s->input_picture[i] &&
1274                         s->input_picture[i]->b_frame_score == 0) {
1275                         s->input_picture[i]->b_frame_score =
1276                             get_intra_count(s,
1277                                             s->input_picture[i    ]->f.data[0],
1278                                             s->input_picture[i - 1]->f.data[0],
1279                                             s->linesize) + 1;
1280                     }
1281                 }
1282                 for (i = 0; i < s->max_b_frames + 1; i++) {
1283                     if (s->input_picture[i] == NULL ||
1284                         s->input_picture[i]->b_frame_score - 1 >
1285                             s->mb_num / s->avctx->b_sensitivity)
1286                         break;
1287                 }
1288
1289                 b_frames = FFMAX(0, i - 1);
1290
1291                 /* reset scores */
1292                 for (i = 0; i < b_frames + 1; i++) {
1293                     s->input_picture[i]->b_frame_score = 0;
1294                 }
1295             } else if (s->avctx->b_frame_strategy == 2) {
1296                 b_frames = estimate_best_b_count(s);
1297             } else {
1298                 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1299                 b_frames = 0;
1300             }
1301
1302             emms_c();
1303             //static int b_count = 0;
1304             //b_count += b_frames;
1305             //av_log(s->avctx, AV_LOG_DEBUG, "b_frames: %d\n", b_count);
1306
1307             for (i = b_frames - 1; i >= 0; i--) {
1308                 int type = s->input_picture[i]->f.pict_type;
1309                 if (type && type != AV_PICTURE_TYPE_B)
1310                     b_frames = i;
1311             }
1312             if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
1313                 b_frames == s->max_b_frames) {
1314                 av_log(s->avctx, AV_LOG_ERROR,
1315                        "warning, too many b frames in a row\n");
1316             }
1317
1318             if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1319                 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1320                     s->gop_size > s->picture_in_gop_number) {
1321                     b_frames = s->gop_size - s->picture_in_gop_number - 1;
1322                 } else {
1323                     if (s->flags & CODEC_FLAG_CLOSED_GOP)
1324                         b_frames = 0;
1325                     s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
1326                 }
1327             }
1328
1329             if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
1330                 s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
1331                 b_frames--;
1332
1333             s->reordered_input_picture[0] = s->input_picture[b_frames];
1334             if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I)
1335                 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P;
1336             s->reordered_input_picture[0]->f.coded_picture_number =
1337                 s->coded_picture_number++;
1338             for (i = 0; i < b_frames; i++) {
1339                 s->reordered_input_picture[i + 1] = s->input_picture[i];
1340                 s->reordered_input_picture[i + 1]->f.pict_type =
1341                     AV_PICTURE_TYPE_B;
1342                 s->reordered_input_picture[i + 1]->f.coded_picture_number =
1343                     s->coded_picture_number++;
1344             }
1345         }
1346     }
1347 no_output_pic:
1348     if (s->reordered_input_picture[0]) {
1349         s->reordered_input_picture[0]->f.reference =
1350            s->reordered_input_picture[0]->f.pict_type !=
1351                AV_PICTURE_TYPE_B ? 3 : 0;
1352
1353         ff_copy_picture(&s->new_picture, s->reordered_input_picture[0]);
1354
1355         if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED ||
1356             s->avctx->rc_buffer_size) {
1357             // input is a shared pix, so we can't modifiy it -> alloc a new
1358             // one & ensure that the shared one is reuseable
1359
1360             Picture *pic;
1361             int i = ff_find_unused_picture(s, 0);
1362             if (i < 0)
1363                 return i;
1364             pic = &s->picture[i];
1365
1366             pic->f.reference = s->reordered_input_picture[0]->f.reference;
1367             if (ff_alloc_picture(s, pic, 0) < 0) {
1368                 return -1;
1369             }
1370
1371             /* mark us unused / free shared pic */
1372             if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL)
1373                 s->avctx->release_buffer(s->avctx,
1374                                          (AVFrame *) s->reordered_input_picture[0]);
1375             for (i = 0; i < 4; i++)
1376                 s->reordered_input_picture[0]->f.data[i] = NULL;
1377             s->reordered_input_picture[0]->f.type = 0;
1378
1379             copy_picture_attributes(s, (AVFrame *) pic,
1380                                     (AVFrame *) s->reordered_input_picture[0]);
1381
1382             s->current_picture_ptr = pic;
1383         } else {
1384             // input is not a shared pix -> reuse buffer for current_pix
1385
1386             assert(s->reordered_input_picture[0]->f.type ==
1387                        FF_BUFFER_TYPE_USER ||
1388                    s->reordered_input_picture[0]->f.type ==
1389                        FF_BUFFER_TYPE_INTERNAL);
1390
1391             s->current_picture_ptr = s->reordered_input_picture[0];
1392             for (i = 0; i < 4; i++) {
1393                 s->new_picture.f.data[i] += INPLACE_OFFSET;
1394             }
1395         }
1396         ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1397
1398         s->picture_number = s->new_picture.f.display_picture_number;
1399         //printf("dpn:%d\n", s->picture_number);
1400     } else {
1401         memset(&s->new_picture, 0, sizeof(Picture));
1402     }
1403     return 0;
1404 }
1405
1406 int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1407                           const AVFrame *pic_arg, int *got_packet)
1408 {
1409     MpegEncContext *s = avctx->priv_data;
1410     int i, stuffing_count, ret;
1411     int context_count = s->slice_context_count;
1412
1413     s->picture_in_gop_number++;
1414
1415     if (load_input_picture(s, pic_arg) < 0)
1416         return -1;
1417
1418     if (select_input_picture(s) < 0) {
1419         return -1;
1420     }
1421
1422     /* output? */
1423     if (s->new_picture.f.data[0]) {
1424         if (!pkt->data &&
1425             (ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*MAX_MB_BYTES)) < 0)
1426             return ret;
1427
1428         for (i = 0; i < context_count; i++) {
1429             int start_y = s->thread_context[i]->start_mb_y;
1430             int   end_y = s->thread_context[i]->  end_mb_y;
1431             int h       = s->mb_height;
1432             uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1433             uint8_t *end   = pkt->data + (size_t)(((int64_t) pkt->size) *   end_y / h);
1434
1435             init_put_bits(&s->thread_context[i]->pb, start, end - start);
1436         }
1437
1438         s->pict_type = s->new_picture.f.pict_type;
1439         //emms_c();
1440         //printf("qs:%f %f %d\n", s->new_picture.quality,
1441         //       s->current_picture.quality, s->qscale);
1442         ff_MPV_frame_start(s, avctx);
1443 vbv_retry:
1444         if (encode_picture(s, s->picture_number) < 0)
1445             return -1;
1446
1447         avctx->header_bits = s->header_bits;
1448         avctx->mv_bits     = s->mv_bits;
1449         avctx->misc_bits   = s->misc_bits;
1450         avctx->i_tex_bits  = s->i_tex_bits;
1451         avctx->p_tex_bits  = s->p_tex_bits;
1452         avctx->i_count     = s->i_count;
1453         // FIXME f/b_count in avctx
1454         avctx->p_count     = s->mb_num - s->i_count - s->skip_count;
1455         avctx->skip_count  = s->skip_count;
1456
1457         ff_MPV_frame_end(s);
1458
1459         if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1460             ff_mjpeg_encode_picture_trailer(s);
1461
1462         if (avctx->rc_buffer_size) {
1463             RateControlContext *rcc = &s->rc_context;
1464             int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
1465
1466             if (put_bits_count(&s->pb) > max_size &&
1467                 s->lambda < s->avctx->lmax) {
1468                 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1469                                        (s->qscale + 1) / s->qscale);
1470                 if (s->adaptive_quant) {
1471                     int i;
1472                     for (i = 0; i < s->mb_height * s->mb_stride; i++)
1473                         s->lambda_table[i] =
1474                             FFMAX(s->lambda_table[i] + 1,
1475                                   s->lambda_table[i] * (s->qscale + 1) /
1476                                   s->qscale);
1477                 }
1478                 s->mb_skipped = 0;        // done in MPV_frame_start()
1479                 // done in encode_picture() so we must undo it
1480                 if (s->pict_type == AV_PICTURE_TYPE_P) {
1481                     if (s->flipflop_rounding          ||
1482                         s->codec_id == CODEC_ID_H263P ||
1483                         s->codec_id == CODEC_ID_MPEG4)
1484                         s->no_rounding ^= 1;
1485                 }
1486                 if (s->pict_type != AV_PICTURE_TYPE_B) {
1487                     s->time_base       = s->last_time_base;
1488                     s->last_non_b_time = s->time - s->pp_time;
1489                 }
1490                 //av_log(NULL, AV_LOG_ERROR, "R:%d ", s->next_lambda);
1491                 for (i = 0; i < context_count; i++) {
1492                     PutBitContext *pb = &s->thread_context[i]->pb;
1493                     init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1494                 }
1495                 goto vbv_retry;
1496             }
1497
1498             assert(s->avctx->rc_max_rate);
1499         }
1500
1501         if (s->flags & CODEC_FLAG_PASS1)
1502             ff_write_pass1_stats(s);
1503
1504         for (i = 0; i < 4; i++) {
1505             s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i];
1506             avctx->error[i] += s->current_picture_ptr->f.error[i];
1507         }
1508
1509         if (s->flags & CODEC_FLAG_PASS1)
1510             assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1511                    avctx->i_tex_bits + avctx->p_tex_bits ==
1512                        put_bits_count(&s->pb));
1513         flush_put_bits(&s->pb);
1514         s->frame_bits  = put_bits_count(&s->pb);
1515
1516         stuffing_count = ff_vbv_update(s, s->frame_bits);
1517         if (stuffing_count) {
1518             if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1519                     stuffing_count + 50) {
1520                 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1521                 return -1;
1522             }
1523
1524             switch (s->codec_id) {
1525             case CODEC_ID_MPEG1VIDEO:
1526             case CODEC_ID_MPEG2VIDEO:
1527                 while (stuffing_count--) {
1528                     put_bits(&s->pb, 8, 0);
1529                 }
1530             break;
1531             case CODEC_ID_MPEG4:
1532                 put_bits(&s->pb, 16, 0);
1533                 put_bits(&s->pb, 16, 0x1C3);
1534                 stuffing_count -= 4;
1535                 while (stuffing_count--) {
1536                     put_bits(&s->pb, 8, 0xFF);
1537                 }
1538             break;
1539             default:
1540                 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1541             }
1542             flush_put_bits(&s->pb);
1543             s->frame_bits  = put_bits_count(&s->pb);
1544         }
1545
1546         /* update mpeg1/2 vbv_delay for CBR */
1547         if (s->avctx->rc_max_rate                          &&
1548             s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1549             s->out_format == FMT_MPEG1                     &&
1550             90000LL * (avctx->rc_buffer_size - 1) <=
1551                 s->avctx->rc_max_rate * 0xFFFFLL) {
1552             int vbv_delay, min_delay;
1553             double inbits  = s->avctx->rc_max_rate *
1554                              av_q2d(s->avctx->time_base);
1555             int    minbits = s->frame_bits - 8 *
1556                              (s->vbv_delay_ptr - s->pb.buf - 1);
1557             double bits    = s->rc_context.buffer_index + minbits - inbits;
1558
1559             if (bits < 0)
1560                 av_log(s->avctx, AV_LOG_ERROR,
1561                        "Internal error, negative bits\n");
1562
1563             assert(s->repeat_first_field == 0);
1564
1565             vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1566             min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1567                         s->avctx->rc_max_rate;
1568
1569             vbv_delay = FFMAX(vbv_delay, min_delay);
1570
1571             assert(vbv_delay < 0xFFFF);
1572
1573             s->vbv_delay_ptr[0] &= 0xF8;
1574             s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1575             s->vbv_delay_ptr[1]  = vbv_delay >> 5;
1576             s->vbv_delay_ptr[2] &= 0x07;
1577             s->vbv_delay_ptr[2] |= vbv_delay << 3;
1578             avctx->vbv_delay     = vbv_delay * 300;
1579         }
1580         s->total_bits     += s->frame_bits;
1581         avctx->frame_bits  = s->frame_bits;
1582
1583         pkt->pts = s->current_picture.f.pts;
1584         if (!s->low_delay) {
1585             if (!s->current_picture.f.coded_picture_number)
1586                 pkt->dts = pkt->pts - s->dts_delta;
1587             else
1588                 pkt->dts = s->reordered_pts;
1589             s->reordered_pts = s->input_picture[0]->f.pts;
1590         } else
1591             pkt->dts = pkt->pts;
1592         if (s->current_picture.f.key_frame)
1593             pkt->flags |= AV_PKT_FLAG_KEY;
1594     } else {
1595         assert((put_bits_ptr(&s->pb) == s->pb.buf));
1596         s->frame_bits = 0;
1597     }
1598     assert((s->frame_bits & 7) == 0);
1599
1600     pkt->size = s->frame_bits / 8;
1601     *got_packet = !!pkt->size;
1602     return 0;
1603 }
1604
1605 static inline void dct_single_coeff_elimination(MpegEncContext *s,
1606                                                 int n, int threshold)
1607 {
1608     static const char tab[64] = {
1609         3, 2, 2, 1, 1, 1, 1, 1,
1610         1, 1, 1, 1, 1, 1, 1, 1,
1611         1, 1, 1, 1, 1, 1, 1, 1,
1612         0, 0, 0, 0, 0, 0, 0, 0,
1613         0, 0, 0, 0, 0, 0, 0, 0,
1614         0, 0, 0, 0, 0, 0, 0, 0,
1615         0, 0, 0, 0, 0, 0, 0, 0,
1616         0, 0, 0, 0, 0, 0, 0, 0
1617     };
1618     int score = 0;
1619     int run = 0;
1620     int i;
1621     DCTELEM *block = s->block[n];
1622     const int last_index = s->block_last_index[n];
1623     int skip_dc;
1624
1625     if (threshold < 0) {
1626         skip_dc = 0;
1627         threshold = -threshold;
1628     } else
1629         skip_dc = 1;
1630
1631     /* Are all we could set to zero already zero? */
1632     if (last_index <= skip_dc - 1)
1633         return;
1634
1635     for (i = 0; i <= last_index; i++) {
1636         const int j = s->intra_scantable.permutated[i];
1637         const int level = FFABS(block[j]);
1638         if (level == 1) {
1639             if (skip_dc && i == 0)
1640                 continue;
1641             score += tab[run];
1642             run = 0;
1643         } else if (level > 1) {
1644             return;
1645         } else {
1646             run++;
1647         }
1648     }
1649     if (score >= threshold)
1650         return;
1651     for (i = skip_dc; i <= last_index; i++) {
1652         const int j = s->intra_scantable.permutated[i];
1653         block[j] = 0;
1654     }
1655     if (block[0])
1656         s->block_last_index[n] = 0;
1657     else
1658         s->block_last_index[n] = -1;
1659 }
1660
1661 static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block,
1662                                int last_index)
1663 {
1664     int i;
1665     const int maxlevel = s->max_qcoeff;
1666     const int minlevel = s->min_qcoeff;
1667     int overflow = 0;
1668
1669     if (s->mb_intra) {
1670         i = 1; // skip clipping of intra dc
1671     } else
1672         i = 0;
1673
1674     for (; i <= last_index; i++) {
1675         const int j = s->intra_scantable.permutated[i];
1676         int level = block[j];
1677
1678         if (level > maxlevel) {
1679             level = maxlevel;
1680             overflow++;
1681         } else if (level < minlevel) {
1682             level = minlevel;
1683             overflow++;
1684         }
1685
1686         block[j] = level;
1687     }
1688
1689     if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
1690         av_log(s->avctx, AV_LOG_INFO,
1691                "warning, clipping %d dct coefficients to %d..%d\n",
1692                overflow, minlevel, maxlevel);
1693 }
1694
1695 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
1696 {
1697     int x, y;
1698     // FIXME optimize
1699     for (y = 0; y < 8; y++) {
1700         for (x = 0; x < 8; x++) {
1701             int x2, y2;
1702             int sum = 0;
1703             int sqr = 0;
1704             int count = 0;
1705
1706             for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
1707                 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
1708                     int v = ptr[x2 + y2 * stride];
1709                     sum += v;
1710                     sqr += v * v;
1711                     count++;
1712                 }
1713             }
1714             weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
1715         }
1716     }
1717 }
1718
1719 static av_always_inline void encode_mb_internal(MpegEncContext *s,
1720                                                 int motion_x, int motion_y,
1721                                                 int mb_block_height,
1722                                                 int mb_block_count)
1723 {
1724     int16_t weight[8][64];
1725     DCTELEM orig[8][64];
1726     const int mb_x = s->mb_x;
1727     const int mb_y = s->mb_y;
1728     int i;
1729     int skip_dct[8];
1730     int dct_offset = s->linesize * 8; // default for progressive frames
1731     uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1732     int wrap_y, wrap_c;
1733
1734     for (i = 0; i < mb_block_count; i++)
1735         skip_dct[i] = s->skipdct;
1736
1737     if (s->adaptive_quant) {
1738         const int last_qp = s->qscale;
1739         const int mb_xy = mb_x + mb_y * s->mb_stride;
1740
1741         s->lambda = s->lambda_table[mb_xy];
1742         update_qscale(s);
1743
1744         if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
1745             s->qscale = s->current_picture_ptr->f.qscale_table[mb_xy];
1746             s->dquant = s->qscale - last_qp;
1747
1748             if (s->out_format == FMT_H263) {
1749                 s->dquant = av_clip(s->dquant, -2, 2);
1750
1751                 if (s->codec_id == CODEC_ID_MPEG4) {
1752                     if (!s->mb_intra) {
1753                         if (s->pict_type == AV_PICTURE_TYPE_B) {
1754                             if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
1755                                 s->dquant = 0;
1756                         }
1757                         if (s->mv_type == MV_TYPE_8X8)
1758                             s->dquant = 0;
1759                     }
1760                 }
1761             }
1762         }
1763         ff_set_qscale(s, last_qp + s->dquant);
1764     } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
1765         ff_set_qscale(s, s->qscale + s->dquant);
1766
1767     wrap_y = s->linesize;
1768     wrap_c = s->uvlinesize;
1769     ptr_y  = s->new_picture.f.data[0] +
1770              (mb_y * 16 * wrap_y)              + mb_x * 16;
1771     ptr_cb = s->new_picture.f.data[1] +
1772              (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1773     ptr_cr = s->new_picture.f.data[2] +
1774              (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1775
1776     if (mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) {
1777         uint8_t *ebuf = s->edge_emu_buffer + 32;
1778         s->dsp.emulated_edge_mc(ebuf, ptr_y, wrap_y, 16, 16, mb_x * 16,
1779                                 mb_y * 16, s->width, s->height);
1780         ptr_y = ebuf;
1781         s->dsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb, wrap_c, 8,
1782                                 mb_block_height, mb_x * 8, mb_y * 8,
1783                                 s->width >> 1, s->height >> 1);
1784         ptr_cb = ebuf + 18 * wrap_y;
1785         s->dsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr, wrap_c, 8,
1786                                 mb_block_height, mb_x * 8, mb_y * 8,
1787                                 s->width >> 1, s->height >> 1);
1788         ptr_cr = ebuf + 18 * wrap_y + 8;
1789     }
1790
1791     if (s->mb_intra) {
1792         if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1793             int progressive_score, interlaced_score;
1794
1795             s->interlaced_dct = 0;
1796             progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
1797                                                     NULL, wrap_y, 8) +
1798                                 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
1799                                                     NULL, wrap_y, 8) - 400;
1800
1801             if (progressive_score > 0) {
1802                 interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
1803                                                        NULL, wrap_y * 2, 8) +
1804                                    s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
1805                                                        NULL, wrap_y * 2, 8);
1806                 if (progressive_score > interlaced_score) {
1807                     s->interlaced_dct = 1;
1808
1809                     dct_offset = wrap_y;
1810                     wrap_y <<= 1;
1811                     if (s->chroma_format == CHROMA_422)
1812                         wrap_c <<= 1;
1813                 }
1814             }
1815         }
1816
1817         s->dsp.get_pixels(s->block[0], ptr_y                  , wrap_y);
1818         s->dsp.get_pixels(s->block[1], ptr_y              + 8 , wrap_y);
1819         s->dsp.get_pixels(s->block[2], ptr_y + dct_offset     , wrap_y);
1820         s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
1821
1822         if (s->flags & CODEC_FLAG_GRAY) {
1823             skip_dct[4] = 1;
1824             skip_dct[5] = 1;
1825         } else {
1826             s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
1827             s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
1828             if (!s->chroma_y_shift) { /* 422 */
1829                 s->dsp.get_pixels(s->block[6],
1830                                   ptr_cb + (dct_offset >> 1), wrap_c);
1831                 s->dsp.get_pixels(s->block[7],
1832                                   ptr_cr + (dct_offset >> 1), wrap_c);
1833             }
1834         }
1835     } else {
1836         op_pixels_func (*op_pix)[4];
1837         qpel_mc_func (*op_qpix)[16];
1838         uint8_t *dest_y, *dest_cb, *dest_cr;
1839
1840         dest_y  = s->dest[0];
1841         dest_cb = s->dest[1];
1842         dest_cr = s->dest[2];
1843
1844         if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
1845             op_pix  = s->dsp.put_pixels_tab;
1846             op_qpix = s->dsp.put_qpel_pixels_tab;
1847         } else {
1848             op_pix  = s->dsp.put_no_rnd_pixels_tab;
1849             op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
1850         }
1851
1852         if (s->mv_dir & MV_DIR_FORWARD) {
1853             MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data,
1854                        op_pix, op_qpix);
1855             op_pix  = s->dsp.avg_pixels_tab;
1856             op_qpix = s->dsp.avg_qpel_pixels_tab;
1857         }
1858         if (s->mv_dir & MV_DIR_BACKWARD) {
1859             MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data,
1860                        op_pix, op_qpix);
1861         }
1862
1863         if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1864             int progressive_score, interlaced_score;
1865
1866             s->interlaced_dct = 0;
1867             progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
1868                                                     ptr_y,              wrap_y,
1869                                                     8) +
1870                                 s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
1871                                                     ptr_y + wrap_y * 8, wrap_y,
1872                                                     8) - 400;
1873
1874             if (s->avctx->ildct_cmp == FF_CMP_VSSE)
1875                 progressive_score -= 400;
1876
1877             if (progressive_score > 0) {
1878                 interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
1879                                                        ptr_y,
1880                                                        wrap_y * 2, 8) +
1881                                    s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
1882                                                        ptr_y + wrap_y,
1883                                                        wrap_y * 2, 8);
1884
1885                 if (progressive_score > interlaced_score) {
1886                     s->interlaced_dct = 1;
1887
1888                     dct_offset = wrap_y;
1889                     wrap_y <<= 1;
1890                     if (s->chroma_format == CHROMA_422)
1891                         wrap_c <<= 1;
1892                 }
1893             }
1894         }
1895
1896         s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
1897         s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
1898         s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
1899                            dest_y + dct_offset, wrap_y);
1900         s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
1901                            dest_y + dct_offset + 8, wrap_y);
1902
1903         if (s->flags & CODEC_FLAG_GRAY) {
1904             skip_dct[4] = 1;
1905             skip_dct[5] = 1;
1906         } else {
1907             s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
1908             s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
1909             if (!s->chroma_y_shift) { /* 422 */
1910                 s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1),
1911                                    dest_cb + (dct_offset >> 1), wrap_c);
1912                 s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1),
1913                                    dest_cr + (dct_offset >> 1), wrap_c);
1914             }
1915         }
1916         /* pre quantization */
1917         if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
1918                 2 * s->qscale * s->qscale) {
1919             // FIXME optimize
1920             if (s->dsp.sad[1](NULL, ptr_y , dest_y,
1921                               wrap_y, 8) < 20 * s->qscale)
1922                 skip_dct[0] = 1;
1923             if (s->dsp.sad[1](NULL, ptr_y + 8,
1924                               dest_y + 8, wrap_y, 8) < 20 * s->qscale)
1925                 skip_dct[1] = 1;
1926             if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
1927                               dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
1928                 skip_dct[2] = 1;
1929             if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
1930                               dest_y + dct_offset + 8,
1931                               wrap_y, 8) < 20 * s->qscale)
1932                 skip_dct[3] = 1;
1933             if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
1934                               wrap_c, 8) < 20 * s->qscale)
1935                 skip_dct[4] = 1;
1936             if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
1937                               wrap_c, 8) < 20 * s->qscale)
1938                 skip_dct[5] = 1;
1939             if (!s->chroma_y_shift) { /* 422 */
1940                 if (s->dsp.sad[1](NULL, ptr_cb + (dct_offset >> 1),
1941                                   dest_cb + (dct_offset >> 1),
1942                                   wrap_c, 8) < 20 * s->qscale)
1943                     skip_dct[6] = 1;
1944                 if (s->dsp.sad[1](NULL, ptr_cr + (dct_offset >> 1),
1945                                   dest_cr + (dct_offset >> 1),
1946                                   wrap_c, 8) < 20 * s->qscale)
1947                     skip_dct[7] = 1;
1948             }
1949         }
1950     }
1951
1952     if (s->avctx->quantizer_noise_shaping) {
1953         if (!skip_dct[0])
1954             get_visual_weight(weight[0], ptr_y                 , wrap_y);
1955         if (!skip_dct[1])
1956             get_visual_weight(weight[1], ptr_y              + 8, wrap_y);
1957         if (!skip_dct[2])
1958             get_visual_weight(weight[2], ptr_y + dct_offset    , wrap_y);
1959         if (!skip_dct[3])
1960             get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
1961         if (!skip_dct[4])
1962             get_visual_weight(weight[4], ptr_cb                , wrap_c);
1963         if (!skip_dct[5])
1964             get_visual_weight(weight[5], ptr_cr                , wrap_c);
1965         if (!s->chroma_y_shift) { /* 422 */
1966             if (!skip_dct[6])
1967                 get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1),
1968                                   wrap_c);
1969             if (!skip_dct[7])
1970                 get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1),
1971                                   wrap_c);
1972         }
1973         memcpy(orig[0], s->block[0], sizeof(DCTELEM) * 64 * mb_block_count);
1974     }
1975
1976     /* DCT & quantize */
1977     assert(s->out_format != FMT_MJPEG || s->qscale == 8);
1978     {
1979         for (i = 0; i < mb_block_count; i++) {
1980             if (!skip_dct[i]) {
1981                 int overflow;
1982                 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
1983                 // FIXME we could decide to change to quantizer instead of
1984                 // clipping
1985                 // JS: I don't think that would be a good idea it could lower
1986                 //     quality instead of improve it. Just INTRADC clipping
1987                 //     deserves changes in quantizer
1988                 if (overflow)
1989                     clip_coeffs(s, s->block[i], s->block_last_index[i]);
1990             } else
1991                 s->block_last_index[i] = -1;
1992         }
1993         if (s->avctx->quantizer_noise_shaping) {
1994             for (i = 0; i < mb_block_count; i++) {
1995                 if (!skip_dct[i]) {
1996                     s->block_last_index[i] =
1997                         dct_quantize_refine(s, s->block[i], weight[i],
1998                                             orig[i], i, s->qscale);
1999                 }
2000             }
2001         }
2002
2003         if (s->luma_elim_threshold && !s->mb_intra)
2004             for (i = 0; i < 4; i++)
2005                 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2006         if (s->chroma_elim_threshold && !s->mb_intra)
2007             for (i = 4; i < mb_block_count; i++)
2008                 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2009
2010         if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2011             for (i = 0; i < mb_block_count; i++) {
2012                 if (s->block_last_index[i] == -1)
2013                     s->coded_score[i] = INT_MAX / 256;
2014             }
2015         }
2016     }
2017
2018     if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
2019         s->block_last_index[4] =
2020         s->block_last_index[5] = 0;
2021         s->block[4][0] =
2022         s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2023     }
2024
2025     // non c quantize code returns incorrect block_last_index FIXME
2026     if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2027         for (i = 0; i < mb_block_count; i++) {
2028             int j;
2029             if (s->block_last_index[i] > 0) {
2030                 for (j = 63; j > 0; j--) {
2031                     if (s->block[i][s->intra_scantable.permutated[j]])
2032                         break;
2033                 }
2034                 s->block_last_index[i] = j;
2035             }
2036         }
2037     }
2038
2039     /* huffman encode */
2040     switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2041     case CODEC_ID_MPEG1VIDEO:
2042     case CODEC_ID_MPEG2VIDEO:
2043         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2044             ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2045         break;
2046     case CODEC_ID_MPEG4:
2047         if (CONFIG_MPEG4_ENCODER)
2048             ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2049         break;
2050     case CODEC_ID_MSMPEG4V2:
2051     case CODEC_ID_MSMPEG4V3:
2052     case CODEC_ID_WMV1:
2053         if (CONFIG_MSMPEG4_ENCODER)
2054             ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2055         break;
2056     case CODEC_ID_WMV2:
2057         if (CONFIG_WMV2_ENCODER)
2058             ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2059         break;
2060     case CODEC_ID_H261:
2061         if (CONFIG_H261_ENCODER)
2062             ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2063         break;
2064     case CODEC_ID_H263:
2065     case CODEC_ID_H263P:
2066     case CODEC_ID_FLV1:
2067     case CODEC_ID_RV10:
2068     case CODEC_ID_RV20:
2069         if (CONFIG_H263_ENCODER)
2070             ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2071         break;
2072     case CODEC_ID_MJPEG:
2073         if (CONFIG_MJPEG_ENCODER)
2074             ff_mjpeg_encode_mb(s, s->block);
2075         break;
2076     default:
2077         assert(0);
2078     }
2079 }
2080
2081 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2082 {
2083     if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y,  8, 6);
2084     else                                encode_mb_internal(s, motion_x, motion_y, 16, 8);
2085 }
2086
2087 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2088     int i;
2089
2090     memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2091
2092     /* mpeg1 */
2093     d->mb_skip_run= s->mb_skip_run;
2094     for(i=0; i<3; i++)
2095         d->last_dc[i] = s->last_dc[i];
2096
2097     /* statistics */
2098     d->mv_bits= s->mv_bits;
2099     d->i_tex_bits= s->i_tex_bits;
2100     d->p_tex_bits= s->p_tex_bits;
2101     d->i_count= s->i_count;
2102     d->f_count= s->f_count;
2103     d->b_count= s->b_count;
2104     d->skip_count= s->skip_count;
2105     d->misc_bits= s->misc_bits;
2106     d->last_bits= 0;
2107
2108     d->mb_skipped= 0;
2109     d->qscale= s->qscale;
2110     d->dquant= s->dquant;
2111
2112     d->esc3_level_length= s->esc3_level_length;
2113 }
2114
2115 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2116     int i;
2117
2118     memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2119     memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2120
2121     /* mpeg1 */
2122     d->mb_skip_run= s->mb_skip_run;
2123     for(i=0; i<3; i++)
2124         d->last_dc[i] = s->last_dc[i];
2125
2126     /* statistics */
2127     d->mv_bits= s->mv_bits;
2128     d->i_tex_bits= s->i_tex_bits;
2129     d->p_tex_bits= s->p_tex_bits;
2130     d->i_count= s->i_count;
2131     d->f_count= s->f_count;
2132     d->b_count= s->b_count;
2133     d->skip_count= s->skip_count;
2134     d->misc_bits= s->misc_bits;
2135
2136     d->mb_intra= s->mb_intra;
2137     d->mb_skipped= s->mb_skipped;
2138     d->mv_type= s->mv_type;
2139     d->mv_dir= s->mv_dir;
2140     d->pb= s->pb;
2141     if(s->data_partitioning){
2142         d->pb2= s->pb2;
2143         d->tex_pb= s->tex_pb;
2144     }
2145     d->block= s->block;
2146     for(i=0; i<8; i++)
2147         d->block_last_index[i]= s->block_last_index[i];
2148     d->interlaced_dct= s->interlaced_dct;
2149     d->qscale= s->qscale;
2150
2151     d->esc3_level_length= s->esc3_level_length;
2152 }
2153
2154 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2155                            PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2156                            int *dmin, int *next_block, int motion_x, int motion_y)
2157 {
2158     int score;
2159     uint8_t *dest_backup[3];
2160
2161     copy_context_before_encode(s, backup, type);
2162
2163     s->block= s->blocks[*next_block];
2164     s->pb= pb[*next_block];
2165     if(s->data_partitioning){
2166         s->pb2   = pb2   [*next_block];
2167         s->tex_pb= tex_pb[*next_block];
2168     }
2169
2170     if(*next_block){
2171         memcpy(dest_backup, s->dest, sizeof(s->dest));
2172         s->dest[0] = s->rd_scratchpad;
2173         s->dest[1] = s->rd_scratchpad + 16*s->linesize;
2174         s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
2175         assert(s->linesize >= 32); //FIXME
2176     }
2177
2178     encode_mb(s, motion_x, motion_y);
2179
2180     score= put_bits_count(&s->pb);
2181     if(s->data_partitioning){
2182         score+= put_bits_count(&s->pb2);
2183         score+= put_bits_count(&s->tex_pb);
2184     }
2185
2186     if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2187         ff_MPV_decode_mb(s, s->block);
2188
2189         score *= s->lambda2;
2190         score += sse_mb(s) << FF_LAMBDA_SHIFT;
2191     }
2192
2193     if(*next_block){
2194         memcpy(s->dest, dest_backup, sizeof(s->dest));
2195     }
2196
2197     if(score<*dmin){
2198         *dmin= score;
2199         *next_block^=1;
2200
2201         copy_context_after_encode(best, s, type);
2202     }
2203 }
2204
2205 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2206     uint32_t *sq = ff_squareTbl + 256;
2207     int acc=0;
2208     int x,y;
2209
2210     if(w==16 && h==16)
2211         return s->dsp.sse[0](NULL, src1, src2, stride, 16);
2212     else if(w==8 && h==8)
2213         return s->dsp.sse[1](NULL, src1, src2, stride, 8);
2214
2215     for(y=0; y<h; y++){
2216         for(x=0; x<w; x++){
2217             acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2218         }
2219     }
2220
2221     assert(acc>=0);
2222
2223     return acc;
2224 }
2225
2226 static int sse_mb(MpegEncContext *s){
2227     int w= 16;
2228     int h= 16;
2229
2230     if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2231     if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2232
2233     if(w==16 && h==16)
2234       if(s->avctx->mb_cmp == FF_CMP_NSSE){
2235         return  s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2236                +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2237                +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2238       }else{
2239         return  s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2240                +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2241                +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2242       }
2243     else
2244         return  sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2245                +sse(s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2246                +sse(s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2247 }
2248
2249 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2250     MpegEncContext *s= *(void**)arg;
2251
2252
2253     s->me.pre_pass=1;
2254     s->me.dia_size= s->avctx->pre_dia_size;
2255     s->first_slice_line=1;
2256     for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2257         for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2258             ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2259         }
2260         s->first_slice_line=0;
2261     }
2262
2263     s->me.pre_pass=0;
2264
2265     return 0;
2266 }
2267
2268 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2269     MpegEncContext *s= *(void**)arg;
2270
2271     ff_check_alignment();
2272
2273     s->me.dia_size= s->avctx->dia_size;
2274     s->first_slice_line=1;
2275     for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2276         s->mb_x=0; //for block init below
2277         ff_init_block_index(s);
2278         for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2279             s->block_index[0]+=2;
2280             s->block_index[1]+=2;
2281             s->block_index[2]+=2;
2282             s->block_index[3]+=2;
2283
2284             /* compute motion vector & mb_type and store in context */
2285             if(s->pict_type==AV_PICTURE_TYPE_B)
2286                 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2287             else
2288                 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2289         }
2290         s->first_slice_line=0;
2291     }
2292     return 0;
2293 }
2294
2295 static int mb_var_thread(AVCodecContext *c, void *arg){
2296     MpegEncContext *s= *(void**)arg;
2297     int mb_x, mb_y;
2298
2299     ff_check_alignment();
2300
2301     for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2302         for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2303             int xx = mb_x * 16;
2304             int yy = mb_y * 16;
2305             uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
2306             int varc;
2307             int sum = s->dsp.pix_sum(pix, s->linesize);
2308
2309             varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
2310
2311             s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2312             s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2313             s->me.mb_var_sum_temp    += varc;
2314         }
2315     }
2316     return 0;
2317 }
2318
2319 static void write_slice_end(MpegEncContext *s){
2320     if(CONFIG_MPEG4_ENCODER && s->codec_id==CODEC_ID_MPEG4){
2321         if(s->partitioned_frame){
2322             ff_mpeg4_merge_partitions(s);
2323         }
2324
2325         ff_mpeg4_stuffing(&s->pb);
2326     }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2327         ff_mjpeg_encode_stuffing(&s->pb);
2328     }
2329
2330     avpriv_align_put_bits(&s->pb);
2331     flush_put_bits(&s->pb);
2332
2333     if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
2334         s->misc_bits+= get_bits_diff(s);
2335 }
2336
2337 static int encode_thread(AVCodecContext *c, void *arg){
2338     MpegEncContext *s= *(void**)arg;
2339     int mb_x, mb_y, pdif = 0;
2340     int chr_h= 16>>s->chroma_y_shift;
2341     int i, j;
2342     MpegEncContext best_s, backup_s;
2343     uint8_t bit_buf[2][MAX_MB_BYTES];
2344     uint8_t bit_buf2[2][MAX_MB_BYTES];
2345     uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2346     PutBitContext pb[2], pb2[2], tex_pb[2];
2347 //printf("%d->%d\n", s->resync_mb_y, s->end_mb_y);
2348
2349     ff_check_alignment();
2350
2351     for(i=0; i<2; i++){
2352         init_put_bits(&pb    [i], bit_buf    [i], MAX_MB_BYTES);
2353         init_put_bits(&pb2   [i], bit_buf2   [i], MAX_MB_BYTES);
2354         init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2355     }
2356
2357     s->last_bits= put_bits_count(&s->pb);
2358     s->mv_bits=0;
2359     s->misc_bits=0;
2360     s->i_tex_bits=0;
2361     s->p_tex_bits=0;
2362     s->i_count=0;
2363     s->f_count=0;
2364     s->b_count=0;
2365     s->skip_count=0;
2366
2367     for(i=0; i<3; i++){
2368         /* init last dc values */
2369         /* note: quant matrix value (8) is implied here */
2370         s->last_dc[i] = 128 << s->intra_dc_precision;
2371
2372         s->current_picture.f.error[i] = 0;
2373     }
2374     s->mb_skip_run = 0;
2375     memset(s->last_mv, 0, sizeof(s->last_mv));
2376
2377     s->last_mv_dir = 0;
2378
2379     switch(s->codec_id){
2380     case CODEC_ID_H263:
2381     case CODEC_ID_H263P:
2382     case CODEC_ID_FLV1:
2383         if (CONFIG_H263_ENCODER)
2384             s->gob_index = ff_h263_get_gob_height(s);
2385         break;
2386     case CODEC_ID_MPEG4:
2387         if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2388             ff_mpeg4_init_partitions(s);
2389         break;
2390     }
2391
2392     s->resync_mb_x=0;
2393     s->resync_mb_y=0;
2394     s->first_slice_line = 1;
2395     s->ptr_lastgob = s->pb.buf;
2396     for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2397 //    printf("row %d at %X\n", s->mb_y, (int)s);
2398         s->mb_x=0;
2399         s->mb_y= mb_y;
2400
2401         ff_set_qscale(s, s->qscale);
2402         ff_init_block_index(s);
2403
2404         for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2405             int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2406             int mb_type= s->mb_type[xy];
2407 //            int d;
2408             int dmin= INT_MAX;
2409             int dir;
2410
2411             if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2412                 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2413                 return -1;
2414             }
2415             if(s->data_partitioning){
2416                 if(   s->pb2   .buf_end - s->pb2   .buf - (put_bits_count(&s->    pb2)>>3) < MAX_MB_BYTES
2417                    || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2418                     av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2419                     return -1;
2420                 }
2421             }
2422
2423             s->mb_x = mb_x;
2424             s->mb_y = mb_y;  // moved into loop, can get changed by H.261
2425             ff_update_block_index(s);
2426
2427             if(CONFIG_H261_ENCODER && s->codec_id == CODEC_ID_H261){
2428                 ff_h261_reorder_mb_index(s);
2429                 xy= s->mb_y*s->mb_stride + s->mb_x;
2430                 mb_type= s->mb_type[xy];
2431             }
2432
2433             /* write gob / video packet header  */
2434             if(s->rtp_mode){
2435                 int current_packet_size, is_gob_start;
2436
2437                 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2438
2439                 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2440
2441                 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2442
2443                 switch(s->codec_id){
2444                 case CODEC_ID_H263:
2445                 case CODEC_ID_H263P:
2446                     if(!s->h263_slice_structured)
2447                         if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2448                     break;
2449                 case CODEC_ID_MPEG2VIDEO:
2450                     if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2451                 case CODEC_ID_MPEG1VIDEO:
2452                     if(s->mb_skip_run) is_gob_start=0;
2453                     break;
2454                 }
2455
2456                 if(is_gob_start){
2457                     if(s->start_mb_y != mb_y || mb_x!=0){
2458                         write_slice_end(s);
2459
2460                         if(CONFIG_MPEG4_ENCODER && s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame){
2461                             ff_mpeg4_init_partitions(s);
2462                         }
2463                     }
2464
2465                     assert((put_bits_count(&s->pb)&7) == 0);
2466                     current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2467
2468                     if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){
2469                         int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2470                         int d= 100 / s->avctx->error_rate;
2471                         if(r % d == 0){
2472                             current_packet_size=0;
2473                             s->pb.buf_ptr= s->ptr_lastgob;
2474                             assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2475                         }
2476                     }
2477
2478                     if (s->avctx->rtp_callback){
2479                         int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2480                         s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2481                     }
2482
2483                     switch(s->codec_id){
2484                     case CODEC_ID_MPEG4:
2485                         if (CONFIG_MPEG4_ENCODER) {
2486                             ff_mpeg4_encode_video_packet_header(s);
2487                             ff_mpeg4_clean_buffers(s);
2488                         }
2489                     break;
2490                     case CODEC_ID_MPEG1VIDEO:
2491                     case CODEC_ID_MPEG2VIDEO:
2492                         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2493                             ff_mpeg1_encode_slice_header(s);
2494                             ff_mpeg1_clean_buffers(s);
2495                         }
2496                     break;
2497                     case CODEC_ID_H263:
2498                     case CODEC_ID_H263P:
2499                         if (CONFIG_H263_ENCODER)
2500                             ff_h263_encode_gob_header(s, mb_y);
2501                     break;
2502                     }
2503
2504                     if(s->flags&CODEC_FLAG_PASS1){
2505                         int bits= put_bits_count(&s->pb);
2506                         s->misc_bits+= bits - s->last_bits;
2507                         s->last_bits= bits;
2508                     }
2509
2510                     s->ptr_lastgob += current_packet_size;
2511                     s->first_slice_line=1;
2512                     s->resync_mb_x=mb_x;
2513                     s->resync_mb_y=mb_y;
2514                 }
2515             }
2516
2517             if(  (s->resync_mb_x   == s->mb_x)
2518                && s->resync_mb_y+1 == s->mb_y){
2519                 s->first_slice_line=0;
2520             }
2521
2522             s->mb_skipped=0;
2523             s->dquant=0; //only for QP_RD
2524
2525             if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2526                 int next_block=0;
2527                 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2528
2529                 copy_context_before_encode(&backup_s, s, -1);
2530                 backup_s.pb= s->pb;
2531                 best_s.data_partitioning= s->data_partitioning;
2532                 best_s.partitioned_frame= s->partitioned_frame;
2533                 if(s->data_partitioning){
2534                     backup_s.pb2= s->pb2;
2535                     backup_s.tex_pb= s->tex_pb;
2536                 }
2537
2538                 if(mb_type&CANDIDATE_MB_TYPE_INTER){
2539                     s->mv_dir = MV_DIR_FORWARD;
2540                     s->mv_type = MV_TYPE_16X16;
2541                     s->mb_intra= 0;
2542                     s->mv[0][0][0] = s->p_mv_table[xy][0];
2543                     s->mv[0][0][1] = s->p_mv_table[xy][1];
2544                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2545                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2546                 }
2547                 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2548                     s->mv_dir = MV_DIR_FORWARD;
2549                     s->mv_type = MV_TYPE_FIELD;
2550                     s->mb_intra= 0;
2551                     for(i=0; i<2; i++){
2552                         j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2553                         s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2554                         s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2555                     }
2556                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2557                                  &dmin, &next_block, 0, 0);
2558                 }
2559                 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2560                     s->mv_dir = MV_DIR_FORWARD;
2561                     s->mv_type = MV_TYPE_16X16;
2562                     s->mb_intra= 0;
2563                     s->mv[0][0][0] = 0;
2564                     s->mv[0][0][1] = 0;
2565                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2566                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2567                 }
2568                 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
2569                     s->mv_dir = MV_DIR_FORWARD;
2570                     s->mv_type = MV_TYPE_8X8;
2571                     s->mb_intra= 0;
2572                     for(i=0; i<4; i++){
2573                         s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
2574                         s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
2575                     }
2576                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
2577                                  &dmin, &next_block, 0, 0);
2578                 }
2579                 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
2580                     s->mv_dir = MV_DIR_FORWARD;
2581                     s->mv_type = MV_TYPE_16X16;
2582                     s->mb_intra= 0;
2583                     s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2584                     s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2585                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
2586                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2587                 }
2588                 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
2589                     s->mv_dir = MV_DIR_BACKWARD;
2590                     s->mv_type = MV_TYPE_16X16;
2591                     s->mb_intra= 0;
2592                     s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2593                     s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2594                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
2595                                  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
2596                 }
2597                 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
2598                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2599                     s->mv_type = MV_TYPE_16X16;
2600                     s->mb_intra= 0;
2601                     s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2602                     s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2603                     s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2604                     s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2605                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
2606                                  &dmin, &next_block, 0, 0);
2607                 }
2608                 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
2609                     s->mv_dir = MV_DIR_FORWARD;
2610                     s->mv_type = MV_TYPE_FIELD;
2611                     s->mb_intra= 0;
2612                     for(i=0; i<2; i++){
2613                         j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2614                         s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2615                         s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2616                     }
2617                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
2618                                  &dmin, &next_block, 0, 0);
2619                 }
2620                 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
2621                     s->mv_dir = MV_DIR_BACKWARD;
2622                     s->mv_type = MV_TYPE_FIELD;
2623                     s->mb_intra= 0;
2624                     for(i=0; i<2; i++){
2625                         j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2626                         s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2627                         s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2628                     }
2629                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
2630                                  &dmin, &next_block, 0, 0);
2631                 }
2632                 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
2633                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2634                     s->mv_type = MV_TYPE_FIELD;
2635                     s->mb_intra= 0;
2636                     for(dir=0; dir<2; dir++){
2637                         for(i=0; i<2; i++){
2638                             j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2639                             s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2640                             s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2641                         }
2642                     }
2643                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
2644                                  &dmin, &next_block, 0, 0);
2645                 }
2646                 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
2647                     s->mv_dir = 0;
2648                     s->mv_type = MV_TYPE_16X16;
2649                     s->mb_intra= 1;
2650                     s->mv[0][0][0] = 0;
2651                     s->mv[0][0][1] = 0;
2652                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
2653                                  &dmin, &next_block, 0, 0);
2654                     if(s->h263_pred || s->h263_aic){
2655                         if(best_s.mb_intra)
2656                             s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
2657                         else
2658                             ff_clean_intra_table_entries(s); //old mode?
2659                     }
2660                 }
2661
2662                 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
2663                     if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
2664                         const int last_qp= backup_s.qscale;
2665                         int qpi, qp, dc[6];
2666                         DCTELEM ac[6][16];
2667                         const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
2668                         static const int dquant_tab[4]={-1,1,-2,2};
2669
2670                         assert(backup_s.dquant == 0);
2671
2672                         //FIXME intra
2673                         s->mv_dir= best_s.mv_dir;
2674                         s->mv_type = MV_TYPE_16X16;
2675                         s->mb_intra= best_s.mb_intra;
2676                         s->mv[0][0][0] = best_s.mv[0][0][0];
2677                         s->mv[0][0][1] = best_s.mv[0][0][1];
2678                         s->mv[1][0][0] = best_s.mv[1][0][0];
2679                         s->mv[1][0][1] = best_s.mv[1][0][1];
2680
2681                         qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
2682                         for(; qpi<4; qpi++){
2683                             int dquant= dquant_tab[qpi];
2684                             qp= last_qp + dquant;
2685                             if(qp < s->avctx->qmin || qp > s->avctx->qmax)
2686                                 continue;
2687                             backup_s.dquant= dquant;
2688                             if(s->mb_intra && s->dc_val[0]){
2689                                 for(i=0; i<6; i++){
2690                                     dc[i]= s->dc_val[0][ s->block_index[i] ];
2691                                     memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(DCTELEM)*16);
2692                                 }
2693                             }
2694
2695                             encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2696                                          &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
2697                             if(best_s.qscale != qp){
2698                                 if(s->mb_intra && s->dc_val[0]){
2699                                     for(i=0; i<6; i++){
2700                                         s->dc_val[0][ s->block_index[i] ]= dc[i];
2701                                         memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(DCTELEM)*16);
2702                                     }
2703                                 }
2704                             }
2705                         }
2706                     }
2707                 }
2708                 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
2709                     int mx= s->b_direct_mv_table[xy][0];
2710                     int my= s->b_direct_mv_table[xy][1];
2711
2712                     backup_s.dquant = 0;
2713                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2714                     s->mb_intra= 0;
2715                     ff_mpeg4_set_direct_mv(s, mx, my);
2716                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2717                                  &dmin, &next_block, mx, my);
2718                 }
2719                 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
2720                     backup_s.dquant = 0;
2721                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2722                     s->mb_intra= 0;
2723                     ff_mpeg4_set_direct_mv(s, 0, 0);
2724                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2725                                  &dmin, &next_block, 0, 0);
2726                 }
2727                 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
2728                     int coded=0;
2729                     for(i=0; i<6; i++)
2730                         coded |= s->block_last_index[i];
2731                     if(coded){
2732                         int mx,my;
2733                         memcpy(s->mv, best_s.mv, sizeof(s->mv));
2734                         if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
2735                             mx=my=0; //FIXME find the one we actually used
2736                             ff_mpeg4_set_direct_mv(s, mx, my);
2737                         }else if(best_s.mv_dir&MV_DIR_BACKWARD){
2738                             mx= s->mv[1][0][0];
2739                             my= s->mv[1][0][1];
2740                         }else{
2741                             mx= s->mv[0][0][0];
2742                             my= s->mv[0][0][1];
2743                         }
2744
2745                         s->mv_dir= best_s.mv_dir;
2746                         s->mv_type = best_s.mv_type;
2747                         s->mb_intra= 0;
2748 /*                        s->mv[0][0][0] = best_s.mv[0][0][0];
2749                         s->mv[0][0][1] = best_s.mv[0][0][1];
2750                         s->mv[1][0][0] = best_s.mv[1][0][0];
2751                         s->mv[1][0][1] = best_s.mv[1][0][1];*/
2752                         backup_s.dquant= 0;
2753                         s->skipdct=1;
2754                         encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2755                                         &dmin, &next_block, mx, my);
2756                         s->skipdct=0;
2757                     }
2758                 }
2759
2760                 s->current_picture.f.qscale_table[xy] = best_s.qscale;
2761
2762                 copy_context_after_encode(s, &best_s, -1);
2763
2764                 pb_bits_count= put_bits_count(&s->pb);
2765                 flush_put_bits(&s->pb);
2766                 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
2767                 s->pb= backup_s.pb;
2768
2769                 if(s->data_partitioning){
2770                     pb2_bits_count= put_bits_count(&s->pb2);
2771                     flush_put_bits(&s->pb2);
2772                     avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
2773                     s->pb2= backup_s.pb2;
2774
2775                     tex_pb_bits_count= put_bits_count(&s->tex_pb);
2776                     flush_put_bits(&s->tex_pb);
2777                     avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
2778                     s->tex_pb= backup_s.tex_pb;
2779                 }
2780                 s->last_bits= put_bits_count(&s->pb);
2781
2782                 if (CONFIG_H263_ENCODER &&
2783                     s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2784                     ff_h263_update_motion_val(s);
2785
2786                 if(next_block==0){ //FIXME 16 vs linesize16
2787                     s->dsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad                     , s->linesize  ,16);
2788                     s->dsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize    , s->uvlinesize, 8);
2789                     s->dsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
2790                 }
2791
2792                 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
2793                     ff_MPV_decode_mb(s, s->block);
2794             } else {
2795                 int motion_x = 0, motion_y = 0;
2796                 s->mv_type=MV_TYPE_16X16;
2797                 // only one MB-Type possible
2798
2799                 switch(mb_type){
2800                 case CANDIDATE_MB_TYPE_INTRA:
2801                     s->mv_dir = 0;
2802                     s->mb_intra= 1;
2803                     motion_x= s->mv[0][0][0] = 0;
2804                     motion_y= s->mv[0][0][1] = 0;
2805                     break;
2806                 case CANDIDATE_MB_TYPE_INTER:
2807                     s->mv_dir = MV_DIR_FORWARD;
2808                     s->mb_intra= 0;
2809                     motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
2810                     motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
2811                     break;
2812                 case CANDIDATE_MB_TYPE_INTER_I:
2813                     s->mv_dir = MV_DIR_FORWARD;
2814                     s->mv_type = MV_TYPE_FIELD;
2815                     s->mb_intra= 0;
2816                     for(i=0; i<2; i++){
2817                         j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2818                         s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2819                         s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2820                     }
2821                     break;
2822                 case CANDIDATE_MB_TYPE_INTER4V:
2823                     s->mv_dir = MV_DIR_FORWARD;
2824                     s->mv_type = MV_TYPE_8X8;
2825                     s->mb_intra= 0;
2826                     for(i=0; i<4; i++){
2827                         s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
2828                         s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
2829                     }
2830                     break;
2831                 case CANDIDATE_MB_TYPE_DIRECT:
2832                     if (CONFIG_MPEG4_ENCODER) {
2833                         s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2834                         s->mb_intra= 0;
2835                         motion_x=s->b_direct_mv_table[xy][0];
2836                         motion_y=s->b_direct_mv_table[xy][1];
2837                         ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
2838                     }
2839                     break;
2840                 case CANDIDATE_MB_TYPE_DIRECT0:
2841                     if (CONFIG_MPEG4_ENCODER) {
2842                         s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2843                         s->mb_intra= 0;
2844                         ff_mpeg4_set_direct_mv(s, 0, 0);
2845                     }
2846                     break;
2847                 case CANDIDATE_MB_TYPE_BIDIR:
2848                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2849                     s->mb_intra= 0;
2850                     s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2851                     s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2852                     s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2853                     s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2854                     break;
2855                 case CANDIDATE_MB_TYPE_BACKWARD:
2856                     s->mv_dir = MV_DIR_BACKWARD;
2857                     s->mb_intra= 0;
2858                     motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2859                     motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2860                     break;
2861                 case CANDIDATE_MB_TYPE_FORWARD:
2862                     s->mv_dir = MV_DIR_FORWARD;
2863                     s->mb_intra= 0;
2864                     motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2865                     motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2866 //                    printf(" %d %d ", motion_x, motion_y);
2867                     break;
2868                 case CANDIDATE_MB_TYPE_FORWARD_I:
2869                     s->mv_dir = MV_DIR_FORWARD;
2870                     s->mv_type = MV_TYPE_FIELD;
2871                     s->mb_intra= 0;
2872                     for(i=0; i<2; i++){
2873                         j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2874                         s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2875                         s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2876                     }
2877                     break;
2878                 case CANDIDATE_MB_TYPE_BACKWARD_I:
2879                     s->mv_dir = MV_DIR_BACKWARD;
2880                     s->mv_type = MV_TYPE_FIELD;
2881                     s->mb_intra= 0;
2882                     for(i=0; i<2; i++){
2883                         j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2884                         s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2885                         s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2886                     }
2887                     break;
2888                 case CANDIDATE_MB_TYPE_BIDIR_I:
2889                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2890                     s->mv_type = MV_TYPE_FIELD;
2891                     s->mb_intra= 0;
2892                     for(dir=0; dir<2; dir++){
2893                         for(i=0; i<2; i++){
2894                             j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2895                             s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2896                             s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2897                         }
2898                     }
2899                     break;
2900                 default:
2901                     av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
2902                 }
2903
2904                 encode_mb(s, motion_x, motion_y);
2905
2906                 // RAL: Update last macroblock type
2907                 s->last_mv_dir = s->mv_dir;
2908
2909                 if (CONFIG_H263_ENCODER &&
2910                     s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2911                     ff_h263_update_motion_val(s);
2912
2913                 ff_MPV_decode_mb(s, s->block);
2914             }
2915
2916             /* clean the MV table in IPS frames for direct mode in B frames */
2917             if(s->mb_intra /* && I,P,S_TYPE */){
2918                 s->p_mv_table[xy][0]=0;
2919                 s->p_mv_table[xy][1]=0;
2920             }
2921
2922             if(s->flags&CODEC_FLAG_PSNR){
2923                 int w= 16;
2924                 int h= 16;
2925
2926                 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2927                 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2928
2929                 s->current_picture.f.error[0] += sse(
2930                     s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
2931                     s->dest[0], w, h, s->linesize);
2932                 s->current_picture.f.error[1] += sse(
2933                     s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*chr_h,
2934                     s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2935                 s->current_picture.f.error[2] += sse(
2936                     s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*chr_h,
2937                     s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2938             }
2939             if(s->loop_filter){
2940                 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
2941                     ff_h263_loop_filter(s);
2942             }
2943 //printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_stride, put_bits_count(&s->pb));
2944         }
2945     }
2946
2947     //not beautiful here but we must write it before flushing so it has to be here
2948     if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
2949         ff_msmpeg4_encode_ext_header(s);
2950
2951     write_slice_end(s);
2952
2953     /* Send the last GOB if RTP */
2954     if (s->avctx->rtp_callback) {
2955         int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
2956         pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
2957         /* Call the RTP callback to send the last GOB */
2958         emms_c();
2959         s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
2960     }
2961
2962     return 0;
2963 }
2964
2965 #define MERGE(field) dst->field += src->field; src->field=0
2966 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
2967     MERGE(me.scene_change_score);
2968     MERGE(me.mc_mb_var_sum_temp);
2969     MERGE(me.mb_var_sum_temp);
2970 }
2971
2972 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
2973     int i;
2974
2975     MERGE(dct_count[0]); //note, the other dct vars are not part of the context
2976     MERGE(dct_count[1]);
2977     MERGE(mv_bits);
2978     MERGE(i_tex_bits);
2979     MERGE(p_tex_bits);
2980     MERGE(i_count);
2981     MERGE(f_count);
2982     MERGE(b_count);
2983     MERGE(skip_count);
2984     MERGE(misc_bits);
2985     MERGE(error_count);
2986     MERGE(padding_bug_score);
2987     MERGE(current_picture.f.error[0]);
2988     MERGE(current_picture.f.error[1]);
2989     MERGE(current_picture.f.error[2]);
2990
2991     if(dst->avctx->noise_reduction){
2992         for(i=0; i<64; i++){
2993             MERGE(dct_error_sum[0][i]);
2994             MERGE(dct_error_sum[1][i]);
2995         }
2996     }
2997
2998     assert(put_bits_count(&src->pb) % 8 ==0);
2999     assert(put_bits_count(&dst->pb) % 8 ==0);
3000     avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3001     flush_put_bits(&dst->pb);
3002 }
3003
3004 static int estimate_qp(MpegEncContext *s, int dry_run){
3005     if (s->next_lambda){
3006         s->current_picture_ptr->f.quality =
3007         s->current_picture.f.quality = s->next_lambda;
3008         if(!dry_run) s->next_lambda= 0;
3009     } else if (!s->fixed_qscale) {
3010         s->current_picture_ptr->f.quality =
3011         s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run);
3012         if (s->current_picture.f.quality < 0)
3013             return -1;
3014     }
3015
3016     if(s->adaptive_quant){
3017         switch(s->codec_id){
3018         case CODEC_ID_MPEG4:
3019             if (CONFIG_MPEG4_ENCODER)
3020                 ff_clean_mpeg4_qscales(s);
3021             break;
3022         case CODEC_ID_H263:
3023         case CODEC_ID_H263P:
3024         case CODEC_ID_FLV1:
3025             if (CONFIG_H263_ENCODER)
3026                 ff_clean_h263_qscales(s);
3027             break;
3028         default:
3029             ff_init_qscale_tab(s);
3030         }
3031
3032         s->lambda= s->lambda_table[0];
3033         //FIXME broken
3034     }else
3035         s->lambda = s->current_picture.f.quality;
3036 //printf("%d %d\n", s->avctx->global_quality, s->current_picture.quality);
3037     update_qscale(s);
3038     return 0;
3039 }
3040
3041 /* must be called before writing the header */
3042 static void set_frame_distances(MpegEncContext * s){
3043     assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
3044     s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
3045
3046     if(s->pict_type==AV_PICTURE_TYPE_B){
3047         s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3048         assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3049     }else{
3050         s->pp_time= s->time - s->last_non_b_time;
3051         s->last_non_b_time= s->time;
3052         assert(s->picture_number==0 || s->pp_time > 0);
3053     }
3054 }
3055
3056 static int encode_picture(MpegEncContext *s, int picture_number)
3057 {
3058     int i;
3059     int bits;
3060     int context_count = s->slice_context_count;
3061
3062     s->picture_number = picture_number;
3063
3064     /* Reset the average MB variance */
3065     s->me.mb_var_sum_temp    =
3066     s->me.mc_mb_var_sum_temp = 0;
3067
3068     /* we need to initialize some time vars before we can encode b-frames */
3069     // RAL: Condition added for MPEG1VIDEO
3070     if (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3071         set_frame_distances(s);
3072     if(CONFIG_MPEG4_ENCODER && s->codec_id == CODEC_ID_MPEG4)
3073         ff_set_mpeg4_time(s);
3074
3075     s->me.scene_change_score=0;
3076
3077 //    s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion