4bf4471847d4eadf7a3463a710bb44339e82bb5e
[ffmpeg.git] / libavcodec / mpegvideo_enc.c
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of Libav.
9  *
10  * Libav is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * Libav is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with Libav; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29
30 #include "libavutil/intmath.h"
31 #include "libavutil/mathematics.h"
32 #include "libavutil/opt.h"
33 #include "avcodec.h"
34 #include "dsputil.h"
35 #include "mpegvideo.h"
36 #include "h263.h"
37 #include "mjpegenc.h"
38 #include "msmpeg4.h"
39 #include "faandct.h"
40 #include "thread.h"
41 #include "aandcttab.h"
42 #include "flv.h"
43 #include "mpeg4video.h"
44 #include "internal.h"
45 #include "bytestream.h"
46 #include <limits.h>
47
48 //#undef NDEBUG
49 //#include <assert.h>
50
51 static int encode_picture(MpegEncContext *s, int picture_number);
52 static int dct_quantize_refine(MpegEncContext *s, DCTELEM *block, int16_t *weight, DCTELEM *orig, int n, int qscale);
53 static int sse_mb(MpegEncContext *s);
54 static void denoise_dct_c(MpegEncContext *s, DCTELEM *block);
55 static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
56
57 /* enable all paranoid tests for rounding, overflows, etc... */
58 //#define PARANOID
59
60 //#define DEBUG
61
62 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
63 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
64
65 const AVOption ff_mpv_generic_options[] = {
66     FF_MPV_COMMON_OPTS
67     { NULL },
68 };
69
70 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
71                        uint16_t (*qmat16)[2][64],
72                        const uint16_t *quant_matrix,
73                        int bias, int qmin, int qmax, int intra)
74 {
75     int qscale;
76     int shift = 0;
77
78     for (qscale = qmin; qscale <= qmax; qscale++) {
79         int i;
80         if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
81             dsp->fdct == ff_jpeg_fdct_islow_10 ||
82             dsp->fdct == ff_faandct) {
83             for (i = 0; i < 64; i++) {
84                 const int j = dsp->idct_permutation[i];
85                 /* 16 <= qscale * quant_matrix[i] <= 7905
86                  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
87                  *             19952 <=              x  <= 249205026
88                  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
89                  *           3444240 >= (1 << 36) / (x) >= 275 */
90
91                 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
92                                         (qscale * quant_matrix[j]));
93             }
94         } else if (dsp->fdct == ff_fdct_ifast) {
95             for (i = 0; i < 64; i++) {
96                 const int j = dsp->idct_permutation[i];
97                 /* 16 <= qscale * quant_matrix[i] <= 7905
98                  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
99                  *             19952 <=              x  <= 249205026
100                  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
101                  *           3444240 >= (1 << 36) / (x) >= 275 */
102
103                 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
104                                         (ff_aanscales[i] * qscale *
105                                          quant_matrix[j]));
106             }
107         } else {
108             for (i = 0; i < 64; i++) {
109                 const int j = dsp->idct_permutation[i];
110                 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
111                  * Assume x = qscale * quant_matrix[i]
112                  * So             16 <=              x  <= 7905
113                  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
114                  * so          32768 >= (1 << 19) / (x) >= 67 */
115                 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
116                                         (qscale * quant_matrix[j]));
117                 //qmat  [qscale][i] = (1 << QMAT_SHIFT_MMX) /
118                 //                    (qscale * quant_matrix[i]);
119                 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
120                                        (qscale * quant_matrix[j]);
121
122                 if (qmat16[qscale][0][i] == 0 ||
123                     qmat16[qscale][0][i] == 128 * 256)
124                     qmat16[qscale][0][i] = 128 * 256 - 1;
125                 qmat16[qscale][1][i] =
126                     ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
127                                 qmat16[qscale][0][i]);
128             }
129         }
130
131         for (i = intra; i < 64; i++) {
132             int64_t max = 8191;
133             if (dsp->fdct == ff_fdct_ifast) {
134                 max = (8191LL * ff_aanscales[i]) >> 14;
135             }
136             while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
137                 shift++;
138             }
139         }
140     }
141     if (shift) {
142         av_log(NULL, AV_LOG_INFO,
143                "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
144                QMAT_SHIFT - shift);
145     }
146 }
147
148 static inline void update_qscale(MpegEncContext *s)
149 {
150     s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
151                 (FF_LAMBDA_SHIFT + 7);
152     s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
153
154     s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
155                  FF_LAMBDA_SHIFT;
156 }
157
158 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
159 {
160     int i;
161
162     if (matrix) {
163         put_bits(pb, 1, 1);
164         for (i = 0; i < 64; i++) {
165             put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
166         }
167     } else
168         put_bits(pb, 1, 0);
169 }
170
171 /**
172  * init s->current_picture.qscale_table from s->lambda_table
173  */
174 void ff_init_qscale_tab(MpegEncContext *s)
175 {
176     int8_t * const qscale_table = s->current_picture.f.qscale_table;
177     int i;
178
179     for (i = 0; i < s->mb_num; i++) {
180         unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
181         int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
182         qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
183                                                   s->avctx->qmax);
184     }
185 }
186
187 static void copy_picture_attributes(MpegEncContext *s,
188                                     AVFrame *dst,
189                                     AVFrame *src)
190 {
191     int i;
192
193     dst->pict_type              = src->pict_type;
194     dst->quality                = src->quality;
195     dst->coded_picture_number   = src->coded_picture_number;
196     dst->display_picture_number = src->display_picture_number;
197     //dst->reference              = src->reference;
198     dst->pts                    = src->pts;
199     dst->interlaced_frame       = src->interlaced_frame;
200     dst->top_field_first        = src->top_field_first;
201
202     if (s->avctx->me_threshold) {
203         if (!src->motion_val[0])
204             av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_val not set!\n");
205         if (!src->mb_type)
206             av_log(s->avctx, AV_LOG_ERROR, "AVFrame.mb_type not set!\n");
207         if (!src->ref_index[0])
208             av_log(s->avctx, AV_LOG_ERROR, "AVFrame.ref_index not set!\n");
209         if (src->motion_subsample_log2 != dst->motion_subsample_log2)
210             av_log(s->avctx, AV_LOG_ERROR,
211                    "AVFrame.motion_subsample_log2 doesn't match! (%d!=%d)\n",
212                    src->motion_subsample_log2, dst->motion_subsample_log2);
213
214         memcpy(dst->mb_type, src->mb_type,
215                s->mb_stride * s->mb_height * sizeof(dst->mb_type[0]));
216
217         for (i = 0; i < 2; i++) {
218             int stride = ((16 * s->mb_width ) >>
219                           src->motion_subsample_log2) + 1;
220             int height = ((16 * s->mb_height) >> src->motion_subsample_log2);
221
222             if (src->motion_val[i] &&
223                 src->motion_val[i] != dst->motion_val[i]) {
224                 memcpy(dst->motion_val[i], src->motion_val[i],
225                        2 * stride * height * sizeof(int16_t));
226             }
227             if (src->ref_index[i] && src->ref_index[i] != dst->ref_index[i]) {
228                 memcpy(dst->ref_index[i], src->ref_index[i],
229                        s->mb_stride * 4 * s->mb_height * sizeof(int8_t));
230             }
231         }
232     }
233 }
234
235 static void update_duplicate_context_after_me(MpegEncContext *dst,
236                                               MpegEncContext *src)
237 {
238 #define COPY(a) dst->a= src->a
239     COPY(pict_type);
240     COPY(current_picture);
241     COPY(f_code);
242     COPY(b_code);
243     COPY(qscale);
244     COPY(lambda);
245     COPY(lambda2);
246     COPY(picture_in_gop_number);
247     COPY(gop_picture_number);
248     COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
249     COPY(progressive_frame);    // FIXME don't set in encode_header
250     COPY(partitioned_frame);    // FIXME don't set in encode_header
251 #undef COPY
252 }
253
254 /**
255  * Set the given MpegEncContext to defaults for encoding.
256  * the changed fields will not depend upon the prior state of the MpegEncContext.
257  */
258 static void MPV_encode_defaults(MpegEncContext *s)
259 {
260     int i;
261     ff_MPV_common_defaults(s);
262
263     for (i = -16; i < 16; i++) {
264         default_fcode_tab[i + MAX_MV] = 1;
265     }
266     s->me.mv_penalty = default_mv_penalty;
267     s->fcode_tab     = default_fcode_tab;
268 }
269
270 /* init video encoder */
271 av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
272 {
273     MpegEncContext *s = avctx->priv_data;
274     int i;
275     int chroma_h_shift, chroma_v_shift;
276
277     MPV_encode_defaults(s);
278
279     switch (avctx->codec_id) {
280     case AV_CODEC_ID_MPEG2VIDEO:
281         if (avctx->pix_fmt != PIX_FMT_YUV420P &&
282             avctx->pix_fmt != PIX_FMT_YUV422P) {
283             av_log(avctx, AV_LOG_ERROR,
284                    "only YUV420 and YUV422 are supported\n");
285             return -1;
286         }
287         break;
288     case AV_CODEC_ID_LJPEG:
289         if (avctx->pix_fmt != PIX_FMT_YUVJ420P &&
290             avctx->pix_fmt != PIX_FMT_YUVJ422P &&
291             avctx->pix_fmt != PIX_FMT_YUVJ444P &&
292             avctx->pix_fmt != PIX_FMT_BGRA     &&
293             ((avctx->pix_fmt != PIX_FMT_YUV420P &&
294               avctx->pix_fmt != PIX_FMT_YUV422P &&
295               avctx->pix_fmt != PIX_FMT_YUV444P) ||
296              avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
297             av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
298             return -1;
299         }
300         break;
301     case AV_CODEC_ID_MJPEG:
302         if (avctx->pix_fmt != PIX_FMT_YUVJ420P &&
303             avctx->pix_fmt != PIX_FMT_YUVJ422P &&
304             ((avctx->pix_fmt != PIX_FMT_YUV420P &&
305               avctx->pix_fmt != PIX_FMT_YUV422P) ||
306              avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
307             av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
308             return -1;
309         }
310         break;
311     default:
312         if (avctx->pix_fmt != PIX_FMT_YUV420P) {
313             av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
314             return -1;
315         }
316     }
317
318     switch (avctx->pix_fmt) {
319     case PIX_FMT_YUVJ422P:
320     case PIX_FMT_YUV422P:
321         s->chroma_format = CHROMA_422;
322         break;
323     case PIX_FMT_YUVJ420P:
324     case PIX_FMT_YUV420P:
325     default:
326         s->chroma_format = CHROMA_420;
327         break;
328     }
329
330     s->bit_rate = avctx->bit_rate;
331     s->width    = avctx->width;
332     s->height   = avctx->height;
333     if (avctx->gop_size > 600 &&
334         avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
335         av_log(avctx, AV_LOG_ERROR,
336                "Warning keyframe interval too large! reducing it ...\n");
337         avctx->gop_size = 600;
338     }
339     s->gop_size     = avctx->gop_size;
340     s->avctx        = avctx;
341     s->flags        = avctx->flags;
342     s->flags2       = avctx->flags2;
343     s->max_b_frames = avctx->max_b_frames;
344     s->codec_id     = avctx->codec->id;
345 #if FF_API_MPV_GLOBAL_OPTS
346     if (avctx->luma_elim_threshold)
347         s->luma_elim_threshold   = avctx->luma_elim_threshold;
348     if (avctx->chroma_elim_threshold)
349         s->chroma_elim_threshold = avctx->chroma_elim_threshold;
350 #endif
351     s->strict_std_compliance = avctx->strict_std_compliance;
352     s->quarter_sample     = (avctx->flags & CODEC_FLAG_QPEL) != 0;
353     s->mpeg_quant         = avctx->mpeg_quant;
354     s->rtp_mode           = !!avctx->rtp_payload_size;
355     s->intra_dc_precision = avctx->intra_dc_precision;
356     s->user_specified_pts = AV_NOPTS_VALUE;
357
358     if (s->gop_size <= 1) {
359         s->intra_only = 1;
360         s->gop_size   = 12;
361     } else {
362         s->intra_only = 0;
363     }
364
365     s->me_method = avctx->me_method;
366
367     /* Fixed QSCALE */
368     s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
369
370 #if FF_API_MPV_GLOBAL_OPTS
371     if (s->flags & CODEC_FLAG_QP_RD)
372         s->mpv_flags |= FF_MPV_FLAG_QP_RD;
373 #endif
374
375     s->adaptive_quant = (s->avctx->lumi_masking ||
376                          s->avctx->dark_masking ||
377                          s->avctx->temporal_cplx_masking ||
378                          s->avctx->spatial_cplx_masking  ||
379                          s->avctx->p_masking      ||
380                          s->avctx->border_masking ||
381                          (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
382                         !s->fixed_qscale;
383
384     s->loop_filter      = !!(s->flags & CODEC_FLAG_LOOP_FILTER);
385
386     if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
387         av_log(avctx, AV_LOG_ERROR,
388                "a vbv buffer size is needed, "
389                "for encoding with a maximum bitrate\n");
390         return -1;
391     }
392
393     if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
394         av_log(avctx, AV_LOG_INFO,
395                "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
396     }
397
398     if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
399         av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
400         return -1;
401     }
402
403     if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
404         av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
405         return -1;
406     }
407
408     if (avctx->rc_max_rate &&
409         avctx->rc_max_rate == avctx->bit_rate &&
410         avctx->rc_max_rate != avctx->rc_min_rate) {
411         av_log(avctx, AV_LOG_INFO,
412                "impossible bitrate constraints, this will fail\n");
413     }
414
415     if (avctx->rc_buffer_size &&
416         avctx->bit_rate * (int64_t)avctx->time_base.num >
417             avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
418         av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
419         return -1;
420     }
421
422     if (!s->fixed_qscale &&
423         avctx->bit_rate * av_q2d(avctx->time_base) >
424             avctx->bit_rate_tolerance) {
425         av_log(avctx, AV_LOG_ERROR,
426                "bitrate tolerance too small for bitrate\n");
427         return -1;
428     }
429
430     if (s->avctx->rc_max_rate &&
431         s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
432         (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
433          s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
434         90000LL * (avctx->rc_buffer_size - 1) >
435             s->avctx->rc_max_rate * 0xFFFFLL) {
436         av_log(avctx, AV_LOG_INFO,
437                "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
438                "specified vbv buffer is too large for the given bitrate!\n");
439     }
440
441     if ((s->flags & CODEC_FLAG_4MV)  && s->codec_id != AV_CODEC_ID_MPEG4 &&
442         s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
443         s->codec_id != AV_CODEC_ID_FLV1) {
444         av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
445         return -1;
446     }
447
448     if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
449         av_log(avctx, AV_LOG_ERROR,
450                "OBMC is only supported with simple mb decision\n");
451         return -1;
452     }
453
454     if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
455         av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
456         return -1;
457     }
458
459     if (s->max_b_frames                    &&
460         s->codec_id != AV_CODEC_ID_MPEG4      &&
461         s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
462         s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
463         av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
464         return -1;
465     }
466
467     if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
468          s->codec_id == AV_CODEC_ID_H263  ||
469          s->codec_id == AV_CODEC_ID_H263P) &&
470         (avctx->sample_aspect_ratio.num > 255 ||
471          avctx->sample_aspect_ratio.den > 255)) {
472         av_log(avctx, AV_LOG_ERROR,
473                "Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
474                avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
475         return -1;
476     }
477
478     if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) &&
479         s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
480         av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
481         return -1;
482     }
483
484     // FIXME mpeg2 uses that too
485     if (s->mpeg_quant && s->codec_id != AV_CODEC_ID_MPEG4) {
486         av_log(avctx, AV_LOG_ERROR,
487                "mpeg2 style quantization not supported by codec\n");
488         return -1;
489     }
490
491 #if FF_API_MPV_GLOBAL_OPTS
492     if (s->flags & CODEC_FLAG_CBP_RD)
493         s->mpv_flags |= FF_MPV_FLAG_CBP_RD;
494 #endif
495
496     if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
497         av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
498         return -1;
499     }
500
501     if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
502         s->avctx->mb_decision != FF_MB_DECISION_RD) {
503         av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
504         return -1;
505     }
506
507     if (s->avctx->scenechange_threshold < 1000000000 &&
508         (s->flags & CODEC_FLAG_CLOSED_GOP)) {
509         av_log(avctx, AV_LOG_ERROR,
510                "closed gop with scene change detection are not supported yet, "
511                "set threshold to 1000000000\n");
512         return -1;
513     }
514
515     if (s->flags & CODEC_FLAG_LOW_DELAY) {
516         if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
517             av_log(avctx, AV_LOG_ERROR,
518                   "low delay forcing is only available for mpeg2\n");
519             return -1;
520         }
521         if (s->max_b_frames != 0) {
522             av_log(avctx, AV_LOG_ERROR,
523                    "b frames cannot be used with low delay\n");
524             return -1;
525         }
526     }
527
528     if (s->q_scale_type == 1) {
529         if (avctx->qmax > 12) {
530             av_log(avctx, AV_LOG_ERROR,
531                    "non linear quant only supports qmax <= 12 currently\n");
532             return -1;
533         }
534     }
535
536     if (s->avctx->thread_count > 1         &&
537         s->codec_id != AV_CODEC_ID_MPEG4      &&
538         s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
539         s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
540         (s->codec_id != AV_CODEC_ID_H263P)) {
541         av_log(avctx, AV_LOG_ERROR,
542                "multi threaded encoding not supported by codec\n");
543         return -1;
544     }
545
546     if (s->avctx->thread_count < 1) {
547         av_log(avctx, AV_LOG_ERROR,
548                "automatic thread number detection not supported by codec,"
549                "patch welcome\n");
550         return -1;
551     }
552
553     if (s->avctx->thread_count > 1)
554         s->rtp_mode = 1;
555
556     if (!avctx->time_base.den || !avctx->time_base.num) {
557         av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
558         return -1;
559     }
560
561     i = (INT_MAX / 2 + 128) >> 8;
562     if (avctx->me_threshold >= i) {
563         av_log(avctx, AV_LOG_ERROR, "me_threshold too large, max is %d\n",
564                i - 1);
565         return -1;
566     }
567     if (avctx->mb_threshold >= i) {
568         av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
569                i - 1);
570         return -1;
571     }
572
573     if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
574         av_log(avctx, AV_LOG_INFO,
575                "notice: b_frame_strategy only affects the first pass\n");
576         avctx->b_frame_strategy = 0;
577     }
578
579     i = av_gcd(avctx->time_base.den, avctx->time_base.num);
580     if (i > 1) {
581         av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
582         avctx->time_base.den /= i;
583         avctx->time_base.num /= i;
584         //return -1;
585     }
586
587     if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
588         s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG) {
589         // (a + x * 3 / 8) / x
590         s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
591         s->inter_quant_bias = 0;
592     } else {
593         s->intra_quant_bias = 0;
594         // (a - x / 4) / x
595         s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
596     }
597
598     if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
599         s->intra_quant_bias = avctx->intra_quant_bias;
600     if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
601         s->inter_quant_bias = avctx->inter_quant_bias;
602
603     avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift,
604                                   &chroma_v_shift);
605
606     if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
607         s->avctx->time_base.den > (1 << 16) - 1) {
608         av_log(avctx, AV_LOG_ERROR,
609                "timebase %d/%d not supported by MPEG 4 standard, "
610                "the maximum admitted value for the timebase denominator "
611                "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
612                (1 << 16) - 1);
613         return -1;
614     }
615     s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
616
617 #if FF_API_MPV_GLOBAL_OPTS
618     if (avctx->flags2 & CODEC_FLAG2_SKIP_RD)
619         s->mpv_flags |= FF_MPV_FLAG_SKIP_RD;
620     if (avctx->flags2 & CODEC_FLAG2_STRICT_GOP)
621         s->mpv_flags |= FF_MPV_FLAG_STRICT_GOP;
622     if (avctx->quantizer_noise_shaping)
623         s->quantizer_noise_shaping = avctx->quantizer_noise_shaping;
624 #endif
625
626     switch (avctx->codec->id) {
627     case AV_CODEC_ID_MPEG1VIDEO:
628         s->out_format = FMT_MPEG1;
629         s->low_delay  = !!(s->flags & CODEC_FLAG_LOW_DELAY);
630         avctx->delay  = s->low_delay ? 0 : (s->max_b_frames + 1);
631         break;
632     case AV_CODEC_ID_MPEG2VIDEO:
633         s->out_format = FMT_MPEG1;
634         s->low_delay  = !!(s->flags & CODEC_FLAG_LOW_DELAY);
635         avctx->delay  = s->low_delay ? 0 : (s->max_b_frames + 1);
636         s->rtp_mode   = 1;
637         break;
638     case AV_CODEC_ID_LJPEG:
639     case AV_CODEC_ID_MJPEG:
640         s->out_format = FMT_MJPEG;
641         s->intra_only = 1; /* force intra only for jpeg */
642         if (avctx->codec->id == AV_CODEC_ID_LJPEG &&
643             avctx->pix_fmt   == PIX_FMT_BGRA) {
644             s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
645             s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
646             s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
647         } else {
648             s->mjpeg_vsample[0] = 2;
649             s->mjpeg_vsample[1] = 2 >> chroma_v_shift;
650             s->mjpeg_vsample[2] = 2 >> chroma_v_shift;
651             s->mjpeg_hsample[0] = 2;
652             s->mjpeg_hsample[1] = 2 >> chroma_h_shift;
653             s->mjpeg_hsample[2] = 2 >> chroma_h_shift;
654         }
655         if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) ||
656             ff_mjpeg_encode_init(s) < 0)
657             return -1;
658         avctx->delay = 0;
659         s->low_delay = 1;
660         break;
661     case AV_CODEC_ID_H261:
662         if (!CONFIG_H261_ENCODER)
663             return -1;
664         if (ff_h261_get_picture_format(s->width, s->height) < 0) {
665             av_log(avctx, AV_LOG_ERROR,
666                    "The specified picture size of %dx%d is not valid for the "
667                    "H.261 codec.\nValid sizes are 176x144, 352x288\n",
668                     s->width, s->height);
669             return -1;
670         }
671         s->out_format = FMT_H261;
672         avctx->delay  = 0;
673         s->low_delay  = 1;
674         break;
675     case AV_CODEC_ID_H263:
676         if (!CONFIG_H263_ENCODER)
677         return -1;
678         if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
679                              s->width, s->height) == 8) {
680             av_log(avctx, AV_LOG_INFO,
681                    "The specified picture size of %dx%d is not valid for "
682                    "the H.263 codec.\nValid sizes are 128x96, 176x144, "
683                    "352x288, 704x576, and 1408x1152."
684                    "Try H.263+.\n", s->width, s->height);
685             return -1;
686         }
687         s->out_format = FMT_H263;
688         avctx->delay  = 0;
689         s->low_delay  = 1;
690         break;
691     case AV_CODEC_ID_H263P:
692         s->out_format = FMT_H263;
693         s->h263_plus  = 1;
694         /* Fx */
695         s->h263_aic        = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
696         s->modified_quant  = s->h263_aic;
697         s->loop_filter     = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
698         s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
699
700         /* /Fx */
701         /* These are just to be sure */
702         avctx->delay = 0;
703         s->low_delay = 1;
704         break;
705     case AV_CODEC_ID_FLV1:
706         s->out_format      = FMT_H263;
707         s->h263_flv        = 2; /* format = 1; 11-bit codes */
708         s->unrestricted_mv = 1;
709         s->rtp_mode  = 0; /* don't allow GOB */
710         avctx->delay = 0;
711         s->low_delay = 1;
712         break;
713     case AV_CODEC_ID_RV10:
714         s->out_format = FMT_H263;
715         avctx->delay  = 0;
716         s->low_delay  = 1;
717         break;
718     case AV_CODEC_ID_RV20:
719         s->out_format      = FMT_H263;
720         avctx->delay       = 0;
721         s->low_delay       = 1;
722         s->modified_quant  = 1;
723         s->h263_aic        = 1;
724         s->h263_plus       = 1;
725         s->loop_filter     = 1;
726         s->unrestricted_mv = 0;
727         break;
728     case AV_CODEC_ID_MPEG4:
729         s->out_format      = FMT_H263;
730         s->h263_pred       = 1;
731         s->unrestricted_mv = 1;
732         s->low_delay       = s->max_b_frames ? 0 : 1;
733         avctx->delay       = s->low_delay ? 0 : (s->max_b_frames + 1);
734         break;
735     case AV_CODEC_ID_MSMPEG4V2:
736         s->out_format      = FMT_H263;
737         s->h263_pred       = 1;
738         s->unrestricted_mv = 1;
739         s->msmpeg4_version = 2;
740         avctx->delay       = 0;
741         s->low_delay       = 1;
742         break;
743     case AV_CODEC_ID_MSMPEG4V3:
744         s->out_format        = FMT_H263;
745         s->h263_pred         = 1;
746         s->unrestricted_mv   = 1;
747         s->msmpeg4_version   = 3;
748         s->flipflop_rounding = 1;
749         avctx->delay         = 0;
750         s->low_delay         = 1;
751         break;
752     case AV_CODEC_ID_WMV1:
753         s->out_format        = FMT_H263;
754         s->h263_pred         = 1;
755         s->unrestricted_mv   = 1;
756         s->msmpeg4_version   = 4;
757         s->flipflop_rounding = 1;
758         avctx->delay         = 0;
759         s->low_delay         = 1;
760         break;
761     case AV_CODEC_ID_WMV2:
762         s->out_format        = FMT_H263;
763         s->h263_pred         = 1;
764         s->unrestricted_mv   = 1;
765         s->msmpeg4_version   = 5;
766         s->flipflop_rounding = 1;
767         avctx->delay         = 0;
768         s->low_delay         = 1;
769         break;
770     default:
771         return -1;
772     }
773
774     avctx->has_b_frames = !s->low_delay;
775
776     s->encoding = 1;
777
778     s->progressive_frame    =
779     s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
780                                                 CODEC_FLAG_INTERLACED_ME) ||
781                                 s->alternate_scan);
782
783     /* init */
784     if (ff_MPV_common_init(s) < 0)
785         return -1;
786
787     if (ARCH_X86)
788         ff_MPV_encode_init_x86(s);
789
790     if (!s->dct_quantize)
791         s->dct_quantize = ff_dct_quantize_c;
792     if (!s->denoise_dct)
793         s->denoise_dct  = denoise_dct_c;
794     s->fast_dct_quantize = s->dct_quantize;
795     if (avctx->trellis)
796         s->dct_quantize  = dct_quantize_trellis_c;
797
798     if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
799         s->chroma_qscale_table = ff_h263_chroma_qscale_table;
800
801     s->quant_precision = 5;
802
803     ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
804     ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
805
806     if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
807         ff_h261_encode_init(s);
808     if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
809         ff_h263_encode_init(s);
810     if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
811         ff_msmpeg4_encode_init(s);
812     if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
813         && s->out_format == FMT_MPEG1)
814         ff_mpeg1_encode_init(s);
815
816     /* init q matrix */
817     for (i = 0; i < 64; i++) {
818         int j = s->dsp.idct_permutation[i];
819         if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
820             s->mpeg_quant) {
821             s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
822             s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
823         } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
824             s->intra_matrix[j] =
825             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
826         } else {
827             /* mpeg1/2 */
828             s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
829             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
830         }
831         if (s->avctx->intra_matrix)
832             s->intra_matrix[j] = s->avctx->intra_matrix[i];
833         if (s->avctx->inter_matrix)
834             s->inter_matrix[j] = s->avctx->inter_matrix[i];
835     }
836
837     /* precompute matrix */
838     /* for mjpeg, we do include qscale in the matrix */
839     if (s->out_format != FMT_MJPEG) {
840         ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
841                           s->intra_matrix, s->intra_quant_bias, avctx->qmin,
842                           31, 1);
843         ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
844                           s->inter_matrix, s->inter_quant_bias, avctx->qmin,
845                           31, 0);
846     }
847
848     if (ff_rate_control_init(s) < 0)
849         return -1;
850
851     return 0;
852 }
853
854 av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
855 {
856     MpegEncContext *s = avctx->priv_data;
857
858     ff_rate_control_uninit(s);
859
860     ff_MPV_common_end(s);
861     if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
862         s->out_format == FMT_MJPEG)
863         ff_mjpeg_encode_close(s);
864
865     av_freep(&avctx->extradata);
866
867     return 0;
868 }
869
870 static int get_sae(uint8_t *src, int ref, int stride)
871 {
872     int x,y;
873     int acc = 0;
874
875     for (y = 0; y < 16; y++) {
876         for (x = 0; x < 16; x++) {
877             acc += FFABS(src[x + y * stride] - ref);
878         }
879     }
880
881     return acc;
882 }
883
884 static int get_intra_count(MpegEncContext *s, uint8_t *src,
885                            uint8_t *ref, int stride)
886 {
887     int x, y, w, h;
888     int acc = 0;
889
890     w = s->width  & ~15;
891     h = s->height & ~15;
892
893     for (y = 0; y < h; y += 16) {
894         for (x = 0; x < w; x += 16) {
895             int offset = x + y * stride;
896             int sad  = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
897                                      16);
898             int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
899             int sae  = get_sae(src + offset, mean, stride);
900
901             acc += sae + 500 < sad;
902         }
903     }
904     return acc;
905 }
906
907
908 static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg)
909 {
910     AVFrame *pic = NULL;
911     int64_t pts;
912     int i;
913     const int encoding_delay = s->max_b_frames ? s->max_b_frames :
914                                                  (s->low_delay ? 0 : 1);
915     int direct = 1;
916
917     if (pic_arg) {
918         pts = pic_arg->pts;
919         pic_arg->display_picture_number = s->input_picture_number++;
920
921         if (pts != AV_NOPTS_VALUE) {
922             if (s->user_specified_pts != AV_NOPTS_VALUE) {
923                 int64_t time = pts;
924                 int64_t last = s->user_specified_pts;
925
926                 if (time <= last) {
927                     av_log(s->avctx, AV_LOG_ERROR,
928                            "Error, Invalid timestamp=%"PRId64", "
929                            "last=%"PRId64"\n", pts, s->user_specified_pts);
930                     return -1;
931                 }
932
933                 if (!s->low_delay && pic_arg->display_picture_number == 1)
934                     s->dts_delta = time - last;
935             }
936             s->user_specified_pts = pts;
937         } else {
938             if (s->user_specified_pts != AV_NOPTS_VALUE) {
939                 s->user_specified_pts =
940                 pts = s->user_specified_pts + 1;
941                 av_log(s->avctx, AV_LOG_INFO,
942                        "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
943                        pts);
944             } else {
945                 pts = pic_arg->display_picture_number;
946             }
947         }
948     }
949
950   if (pic_arg) {
951     if (encoding_delay && !(s->flags & CODEC_FLAG_INPUT_PRESERVED))
952         direct = 0;
953     if (pic_arg->linesize[0] != s->linesize)
954         direct = 0;
955     if (pic_arg->linesize[1] != s->uvlinesize)
956         direct = 0;
957     if (pic_arg->linesize[2] != s->uvlinesize)
958         direct = 0;
959
960     //av_log(AV_LOG_DEBUG, "%d %d %d %d\n",pic_arg->linesize[0],
961     //       pic_arg->linesize[1], s->linesize, s->uvlinesize);
962
963     if (direct) {
964         i = ff_find_unused_picture(s, 1);
965         if (i < 0)
966             return i;
967
968         pic = &s->picture[i].f;
969         pic->reference = 3;
970
971         for (i = 0; i < 4; i++) {
972             pic->data[i]     = pic_arg->data[i];
973             pic->linesize[i] = pic_arg->linesize[i];
974         }
975         if (ff_alloc_picture(s, (Picture *) pic, 1) < 0) {
976             return -1;
977         }
978     } else {
979         i = ff_find_unused_picture(s, 0);
980         if (i < 0)
981             return i;
982
983         pic = &s->picture[i].f;
984         pic->reference = 3;
985
986         if (ff_alloc_picture(s, (Picture *) pic, 0) < 0) {
987             return -1;
988         }
989
990         if (pic->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
991             pic->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
992             pic->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
993             // empty
994         } else {
995             int h_chroma_shift, v_chroma_shift;
996             avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift,
997                                           &v_chroma_shift);
998
999             for (i = 0; i < 3; i++) {
1000                 int src_stride = pic_arg->linesize[i];
1001                 int dst_stride = i ? s->uvlinesize : s->linesize;
1002                 int h_shift = i ? h_chroma_shift : 0;
1003                 int v_shift = i ? v_chroma_shift : 0;
1004                 int w = s->width  >> h_shift;
1005                 int h = s->height >> v_shift;
1006                 uint8_t *src = pic_arg->data[i];
1007                 uint8_t *dst = pic->data[i];
1008
1009                 if (!s->avctx->rc_buffer_size)
1010                     dst += INPLACE_OFFSET;
1011
1012                 if (src_stride == dst_stride)
1013                     memcpy(dst, src, src_stride * h);
1014                 else {
1015                     while (h--) {
1016                         memcpy(dst, src, w);
1017                         dst += dst_stride;
1018                         src += src_stride;
1019                     }
1020                 }
1021             }
1022         }
1023     }
1024     copy_picture_attributes(s, pic, pic_arg);
1025     pic->pts = pts; // we set this here to avoid modifiying pic_arg
1026   }
1027
1028     /* shift buffer entries */
1029     for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1030         s->input_picture[i - 1] = s->input_picture[i];
1031
1032     s->input_picture[encoding_delay] = (Picture*) pic;
1033
1034     return 0;
1035 }
1036
1037 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1038 {
1039     int x, y, plane;
1040     int score = 0;
1041     int64_t score64 = 0;
1042
1043     for (plane = 0; plane < 3; plane++) {
1044         const int stride = p->f.linesize[plane];
1045         const int bw = plane ? 1 : 2;
1046         for (y = 0; y < s->mb_height * bw; y++) {
1047             for (x = 0; x < s->mb_width * bw; x++) {
1048                 int off = p->f.type == FF_BUFFER_TYPE_SHARED ? 0 : 16;
1049                 uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
1050                 uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
1051                 int v   = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1052
1053                 switch (s->avctx->frame_skip_exp) {
1054                 case 0: score    =  FFMAX(score, v);          break;
1055                 case 1: score   += FFABS(v);                  break;
1056                 case 2: score   += v * v;                     break;
1057                 case 3: score64 += FFABS(v * v * (int64_t)v); break;
1058                 case 4: score64 += v * v * (int64_t)(v * v);  break;
1059                 }
1060             }
1061         }
1062     }
1063
1064     if (score)
1065         score64 = score;
1066
1067     if (score64 < s->avctx->frame_skip_threshold)
1068         return 1;
1069     if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1070         return 1;
1071     return 0;
1072 }
1073
1074 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1075 {
1076     AVPacket pkt = { 0 };
1077     int ret, got_output;
1078
1079     av_init_packet(&pkt);
1080     ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1081     if (ret < 0)
1082         return ret;
1083
1084     ret = pkt.size;
1085     av_free_packet(&pkt);
1086     return ret;
1087 }
1088
1089 static int estimate_best_b_count(MpegEncContext *s)
1090 {
1091     AVCodec *codec    = avcodec_find_encoder(s->avctx->codec_id);
1092     AVCodecContext *c = avcodec_alloc_context3(NULL);
1093     AVFrame input[FF_MAX_B_FRAMES + 2];
1094     const int scale = s->avctx->brd_scale;
1095     int i, j, out_size, p_lambda, b_lambda, lambda2;
1096     int64_t best_rd  = INT64_MAX;
1097     int best_b_count = -1;
1098
1099     assert(scale >= 0 && scale <= 3);
1100
1101     //emms_c();
1102     //s->next_picture_ptr->quality;
1103     p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1104     //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1105     b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1106     if (!b_lambda) // FIXME we should do this somewhere else
1107         b_lambda = p_lambda;
1108     lambda2  = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1109                FF_LAMBDA_SHIFT;
1110
1111     c->width        = s->width  >> scale;
1112     c->height       = s->height >> scale;
1113     c->flags        = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR |
1114                       CODEC_FLAG_INPUT_PRESERVED /*| CODEC_FLAG_EMU_EDGE*/;
1115     c->flags       |= s->avctx->flags & CODEC_FLAG_QPEL;
1116     c->mb_decision  = s->avctx->mb_decision;
1117     c->me_cmp       = s->avctx->me_cmp;
1118     c->mb_cmp       = s->avctx->mb_cmp;
1119     c->me_sub_cmp   = s->avctx->me_sub_cmp;
1120     c->pix_fmt      = PIX_FMT_YUV420P;
1121     c->time_base    = s->avctx->time_base;
1122     c->max_b_frames = s->max_b_frames;
1123
1124     if (avcodec_open2(c, codec, NULL) < 0)
1125         return -1;
1126
1127     for (i = 0; i < s->max_b_frames + 2; i++) {
1128         int ysize = c->width * c->height;
1129         int csize = (c->width / 2) * (c->height / 2);
1130         Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1131                                                 s->next_picture_ptr;
1132
1133         avcodec_get_frame_defaults(&input[i]);
1134         input[i].data[0]     = av_malloc(ysize + 2 * csize);
1135         input[i].data[1]     = input[i].data[0] + ysize;
1136         input[i].data[2]     = input[i].data[1] + csize;
1137         input[i].linesize[0] = c->width;
1138         input[i].linesize[1] =
1139         input[i].linesize[2] = c->width / 2;
1140
1141         if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1142             pre_input = *pre_input_ptr;
1143
1144             if (pre_input.f.type != FF_BUFFER_TYPE_SHARED && i) {
1145                 pre_input.f.data[0] += INPLACE_OFFSET;
1146                 pre_input.f.data[1] += INPLACE_OFFSET;
1147                 pre_input.f.data[2] += INPLACE_OFFSET;
1148             }
1149
1150             s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0],
1151                                  pre_input.f.data[0], pre_input.f.linesize[0],
1152                                  c->width,      c->height);
1153             s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1],
1154                                  pre_input.f.data[1], pre_input.f.linesize[1],
1155                                  c->width >> 1, c->height >> 1);
1156             s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2],
1157                                  pre_input.f.data[2], pre_input.f.linesize[2],
1158                                  c->width >> 1, c->height >> 1);
1159         }
1160     }
1161
1162     for (j = 0; j < s->max_b_frames + 1; j++) {
1163         int64_t rd = 0;
1164
1165         if (!s->input_picture[j])
1166             break;
1167
1168         c->error[0] = c->error[1] = c->error[2] = 0;
1169
1170         input[0].pict_type = AV_PICTURE_TYPE_I;
1171         input[0].quality   = 1 * FF_QP2LAMBDA;
1172
1173         out_size = encode_frame(c, &input[0]);
1174
1175         //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1176
1177         for (i = 0; i < s->max_b_frames + 1; i++) {
1178             int is_p = i % (j + 1) == j || i == s->max_b_frames;
1179
1180             input[i + 1].pict_type = is_p ?
1181                                      AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1182             input[i + 1].quality   = is_p ? p_lambda : b_lambda;
1183
1184             out_size = encode_frame(c, &input[i + 1]);
1185
1186             rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1187         }
1188
1189         /* get the delayed frames */
1190         while (out_size) {
1191             out_size = encode_frame(c, NULL);
1192             rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1193         }
1194
1195         rd += c->error[0] + c->error[1] + c->error[2];
1196
1197         if (rd < best_rd) {
1198             best_rd = rd;
1199             best_b_count = j;
1200         }
1201     }
1202
1203     avcodec_close(c);
1204     av_freep(&c);
1205
1206     for (i = 0; i < s->max_b_frames + 2; i++) {
1207         av_freep(&input[i].data[0]);
1208     }
1209
1210     return best_b_count;
1211 }
1212
1213 static int select_input_picture(MpegEncContext *s)
1214 {
1215     int i;
1216
1217     for (i = 1; i < MAX_PICTURE_COUNT; i++)
1218         s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1219     s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1220
1221     /* set next picture type & ordering */
1222     if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
1223         if (/*s->picture_in_gop_number >= s->gop_size ||*/
1224             s->next_picture_ptr == NULL || s->intra_only) {
1225             s->reordered_input_picture[0] = s->input_picture[0];
1226             s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I;
1227             s->reordered_input_picture[0]->f.coded_picture_number =
1228                 s->coded_picture_number++;
1229         } else {
1230             int b_frames;
1231
1232             if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1233                 if (s->picture_in_gop_number < s->gop_size &&
1234                     skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1235                     // FIXME check that te gop check above is +-1 correct
1236                     if (s->input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED) {
1237                         for (i = 0; i < 4; i++)
1238                             s->input_picture[0]->f.data[i] = NULL;
1239                         s->input_picture[0]->f.type = 0;
1240                     } else {
1241                         assert(s->input_picture[0]->f.type == FF_BUFFER_TYPE_USER ||
1242                                s->input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL);
1243
1244                         s->avctx->release_buffer(s->avctx,
1245                                                  &s->input_picture[0]->f);
1246                     }
1247
1248                     emms_c();
1249                     ff_vbv_update(s, 0);
1250
1251                     goto no_output_pic;
1252                 }
1253             }
1254
1255             if (s->flags & CODEC_FLAG_PASS2) {
1256                 for (i = 0; i < s->max_b_frames + 1; i++) {
1257                     int pict_num = s->input_picture[0]->f.display_picture_number + i;
1258
1259                     if (pict_num >= s->rc_context.num_entries)
1260                         break;
1261                     if (!s->input_picture[i]) {
1262                         s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1263                         break;
1264                     }
1265
1266                     s->input_picture[i]->f.pict_type =
1267                         s->rc_context.entry[pict_num].new_pict_type;
1268                 }
1269             }
1270
1271             if (s->avctx->b_frame_strategy == 0) {
1272                 b_frames = s->max_b_frames;
1273                 while (b_frames && !s->input_picture[b_frames])
1274                     b_frames--;
1275             } else if (s->avctx->b_frame_strategy == 1) {
1276                 for (i = 1; i < s->max_b_frames + 1; i++) {
1277                     if (s->input_picture[i] &&
1278                         s->input_picture[i]->b_frame_score == 0) {
1279                         s->input_picture[i]->b_frame_score =
1280                             get_intra_count(s,
1281                                             s->input_picture[i    ]->f.data[0],
1282                                             s->input_picture[i - 1]->f.data[0],
1283                                             s->linesize) + 1;
1284                     }
1285                 }
1286                 for (i = 0; i < s->max_b_frames + 1; i++) {
1287                     if (s->input_picture[i] == NULL ||
1288                         s->input_picture[i]->b_frame_score - 1 >
1289                             s->mb_num / s->avctx->b_sensitivity)
1290                         break;
1291                 }
1292
1293                 b_frames = FFMAX(0, i - 1);
1294
1295                 /* reset scores */
1296                 for (i = 0; i < b_frames + 1; i++) {
1297                     s->input_picture[i]->b_frame_score = 0;
1298                 }
1299             } else if (s->avctx->b_frame_strategy == 2) {
1300                 b_frames = estimate_best_b_count(s);
1301             } else {
1302                 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1303                 b_frames = 0;
1304             }
1305
1306             emms_c();
1307
1308             for (i = b_frames - 1; i >= 0; i--) {
1309                 int type = s->input_picture[i]->f.pict_type;
1310                 if (type && type != AV_PICTURE_TYPE_B)
1311                     b_frames = i;
1312             }
1313             if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
1314                 b_frames == s->max_b_frames) {
1315                 av_log(s->avctx, AV_LOG_ERROR,
1316                        "warning, too many b frames in a row\n");
1317             }
1318
1319             if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1320                 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1321                     s->gop_size > s->picture_in_gop_number) {
1322                     b_frames = s->gop_size - s->picture_in_gop_number - 1;
1323                 } else {
1324                     if (s->flags & CODEC_FLAG_CLOSED_GOP)
1325                         b_frames = 0;
1326                     s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
1327                 }
1328             }
1329
1330             if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
1331                 s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
1332                 b_frames--;
1333
1334             s->reordered_input_picture[0] = s->input_picture[b_frames];
1335             if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I)
1336                 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P;
1337             s->reordered_input_picture[0]->f.coded_picture_number =
1338                 s->coded_picture_number++;
1339             for (i = 0; i < b_frames; i++) {
1340                 s->reordered_input_picture[i + 1] = s->input_picture[i];
1341                 s->reordered_input_picture[i + 1]->f.pict_type =
1342                     AV_PICTURE_TYPE_B;
1343                 s->reordered_input_picture[i + 1]->f.coded_picture_number =
1344                     s->coded_picture_number++;
1345             }
1346         }
1347     }
1348 no_output_pic:
1349     if (s->reordered_input_picture[0]) {
1350         s->reordered_input_picture[0]->f.reference =
1351            s->reordered_input_picture[0]->f.pict_type !=
1352                AV_PICTURE_TYPE_B ? 3 : 0;
1353
1354         ff_copy_picture(&s->new_picture, s->reordered_input_picture[0]);
1355
1356         if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED ||
1357             s->avctx->rc_buffer_size) {
1358             // input is a shared pix, so we can't modifiy it -> alloc a new
1359             // one & ensure that the shared one is reuseable
1360
1361             Picture *pic;
1362             int i = ff_find_unused_picture(s, 0);
1363             if (i < 0)
1364                 return i;
1365             pic = &s->picture[i];
1366
1367             pic->f.reference = s->reordered_input_picture[0]->f.reference;
1368             if (ff_alloc_picture(s, pic, 0) < 0) {
1369                 return -1;
1370             }
1371
1372             /* mark us unused / free shared pic */
1373             if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL)
1374                 s->avctx->release_buffer(s->avctx,
1375                                          &s->reordered_input_picture[0]->f);
1376             for (i = 0; i < 4; i++)
1377                 s->reordered_input_picture[0]->f.data[i] = NULL;
1378             s->reordered_input_picture[0]->f.type = 0;
1379
1380             copy_picture_attributes(s, &pic->f,
1381                                     &s->reordered_input_picture[0]->f);
1382
1383             s->current_picture_ptr = pic;
1384         } else {
1385             // input is not a shared pix -> reuse buffer for current_pix
1386
1387             assert(s->reordered_input_picture[0]->f.type ==
1388                        FF_BUFFER_TYPE_USER ||
1389                    s->reordered_input_picture[0]->f.type ==
1390                        FF_BUFFER_TYPE_INTERNAL);
1391
1392             s->current_picture_ptr = s->reordered_input_picture[0];
1393             for (i = 0; i < 4; i++) {
1394                 s->new_picture.f.data[i] += INPLACE_OFFSET;
1395             }
1396         }
1397         ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1398
1399         s->picture_number = s->new_picture.f.display_picture_number;
1400         //printf("dpn:%d\n", s->picture_number);
1401     } else {
1402         memset(&s->new_picture, 0, sizeof(Picture));
1403     }
1404     return 0;
1405 }
1406
1407 int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1408                           const AVFrame *pic_arg, int *got_packet)
1409 {
1410     MpegEncContext *s = avctx->priv_data;
1411     int i, stuffing_count, ret;
1412     int context_count = s->slice_context_count;
1413
1414     s->picture_in_gop_number++;
1415
1416     if (load_input_picture(s, pic_arg) < 0)
1417         return -1;
1418
1419     if (select_input_picture(s) < 0) {
1420         return -1;
1421     }
1422
1423     /* output? */
1424     if (s->new_picture.f.data[0]) {
1425         if (!pkt->data &&
1426             (ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*MAX_MB_BYTES)) < 0)
1427             return ret;
1428         if (s->mb_info) {
1429             s->mb_info_ptr = av_packet_new_side_data(pkt,
1430                                  AV_PKT_DATA_H263_MB_INFO,
1431                                  s->mb_width*s->mb_height*12);
1432             s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1433         }
1434
1435         for (i = 0; i < context_count; i++) {
1436             int start_y = s->thread_context[i]->start_mb_y;
1437             int   end_y = s->thread_context[i]->  end_mb_y;
1438             int h       = s->mb_height;
1439             uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1440             uint8_t *end   = pkt->data + (size_t)(((int64_t) pkt->size) *   end_y / h);
1441
1442             init_put_bits(&s->thread_context[i]->pb, start, end - start);
1443         }
1444
1445         s->pict_type = s->new_picture.f.pict_type;
1446         //emms_c();
1447         //printf("qs:%f %f %d\n", s->new_picture.quality,
1448         //       s->current_picture.quality, s->qscale);
1449         ff_MPV_frame_start(s, avctx);
1450 vbv_retry:
1451         if (encode_picture(s, s->picture_number) < 0)
1452             return -1;
1453
1454         avctx->header_bits = s->header_bits;
1455         avctx->mv_bits     = s->mv_bits;
1456         avctx->misc_bits   = s->misc_bits;
1457         avctx->i_tex_bits  = s->i_tex_bits;
1458         avctx->p_tex_bits  = s->p_tex_bits;
1459         avctx->i_count     = s->i_count;
1460         // FIXME f/b_count in avctx
1461         avctx->p_count     = s->mb_num - s->i_count - s->skip_count;
1462         avctx->skip_count  = s->skip_count;
1463
1464         ff_MPV_frame_end(s);
1465
1466         if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1467             ff_mjpeg_encode_picture_trailer(s);
1468
1469         if (avctx->rc_buffer_size) {
1470             RateControlContext *rcc = &s->rc_context;
1471             int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
1472
1473             if (put_bits_count(&s->pb) > max_size &&
1474                 s->lambda < s->avctx->lmax) {
1475                 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1476                                        (s->qscale + 1) / s->qscale);
1477                 if (s->adaptive_quant) {
1478                     int i;
1479                     for (i = 0; i < s->mb_height * s->mb_stride; i++)
1480                         s->lambda_table[i] =
1481                             FFMAX(s->lambda_table[i] + 1,
1482                                   s->lambda_table[i] * (s->qscale + 1) /
1483                                   s->qscale);
1484                 }
1485                 s->mb_skipped = 0;        // done in MPV_frame_start()
1486                 // done in encode_picture() so we must undo it
1487                 if (s->pict_type == AV_PICTURE_TYPE_P) {
1488                     if (s->flipflop_rounding          ||
1489                         s->codec_id == AV_CODEC_ID_H263P ||
1490                         s->codec_id == AV_CODEC_ID_MPEG4)
1491                         s->no_rounding ^= 1;
1492                 }
1493                 if (s->pict_type != AV_PICTURE_TYPE_B) {
1494                     s->time_base       = s->last_time_base;
1495                     s->last_non_b_time = s->time - s->pp_time;
1496                 }
1497                 for (i = 0; i < context_count; i++) {
1498                     PutBitContext *pb = &s->thread_context[i]->pb;
1499                     init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1500                 }
1501                 goto vbv_retry;
1502             }
1503
1504             assert(s->avctx->rc_max_rate);
1505         }
1506
1507         if (s->flags & CODEC_FLAG_PASS1)
1508             ff_write_pass1_stats(s);
1509
1510         for (i = 0; i < 4; i++) {
1511             s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i];
1512             avctx->error[i] += s->current_picture_ptr->f.error[i];
1513         }
1514
1515         if (s->flags & CODEC_FLAG_PASS1)
1516             assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1517                    avctx->i_tex_bits + avctx->p_tex_bits ==
1518                        put_bits_count(&s->pb));
1519         flush_put_bits(&s->pb);
1520         s->frame_bits  = put_bits_count(&s->pb);
1521
1522         stuffing_count = ff_vbv_update(s, s->frame_bits);
1523         if (stuffing_count) {
1524             if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1525                     stuffing_count + 50) {
1526                 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1527                 return -1;
1528             }
1529
1530             switch (s->codec_id) {
1531             case AV_CODEC_ID_MPEG1VIDEO:
1532             case AV_CODEC_ID_MPEG2VIDEO:
1533                 while (stuffing_count--) {
1534                     put_bits(&s->pb, 8, 0);
1535                 }
1536             break;
1537             case AV_CODEC_ID_MPEG4:
1538                 put_bits(&s->pb, 16, 0);
1539                 put_bits(&s->pb, 16, 0x1C3);
1540                 stuffing_count -= 4;
1541                 while (stuffing_count--) {
1542                     put_bits(&s->pb, 8, 0xFF);
1543                 }
1544             break;
1545             default:
1546                 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1547             }
1548             flush_put_bits(&s->pb);
1549             s->frame_bits  = put_bits_count(&s->pb);
1550         }
1551
1552         /* update mpeg1/2 vbv_delay for CBR */
1553         if (s->avctx->rc_max_rate                          &&
1554             s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1555             s->out_format == FMT_MPEG1                     &&
1556             90000LL * (avctx->rc_buffer_size - 1) <=
1557                 s->avctx->rc_max_rate * 0xFFFFLL) {
1558             int vbv_delay, min_delay;
1559             double inbits  = s->avctx->rc_max_rate *
1560                              av_q2d(s->avctx->time_base);
1561             int    minbits = s->frame_bits - 8 *
1562                              (s->vbv_delay_ptr - s->pb.buf - 1);
1563             double bits    = s->rc_context.buffer_index + minbits - inbits;
1564
1565             if (bits < 0)
1566                 av_log(s->avctx, AV_LOG_ERROR,
1567                        "Internal error, negative bits\n");
1568
1569             assert(s->repeat_first_field == 0);
1570
1571             vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1572             min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1573                         s->avctx->rc_max_rate;
1574
1575             vbv_delay = FFMAX(vbv_delay, min_delay);
1576
1577             assert(vbv_delay < 0xFFFF);
1578
1579             s->vbv_delay_ptr[0] &= 0xF8;
1580             s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1581             s->vbv_delay_ptr[1]  = vbv_delay >> 5;
1582             s->vbv_delay_ptr[2] &= 0x07;
1583             s->vbv_delay_ptr[2] |= vbv_delay << 3;
1584             avctx->vbv_delay     = vbv_delay * 300;
1585         }
1586         s->total_bits     += s->frame_bits;
1587         avctx->frame_bits  = s->frame_bits;
1588
1589         pkt->pts = s->current_picture.f.pts;
1590         if (!s->low_delay) {
1591             if (!s->current_picture.f.coded_picture_number)
1592                 pkt->dts = pkt->pts - s->dts_delta;
1593             else
1594                 pkt->dts = s->reordered_pts;
1595             s->reordered_pts = s->input_picture[0]->f.pts;
1596         } else
1597             pkt->dts = pkt->pts;
1598         if (s->current_picture.f.key_frame)
1599             pkt->flags |= AV_PKT_FLAG_KEY;
1600         if (s->mb_info)
1601             av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
1602     } else {
1603         s->frame_bits = 0;
1604     }
1605     assert((s->frame_bits & 7) == 0);
1606
1607     pkt->size = s->frame_bits / 8;
1608     *got_packet = !!pkt->size;
1609     return 0;
1610 }
1611
1612 static inline void dct_single_coeff_elimination(MpegEncContext *s,
1613                                                 int n, int threshold)
1614 {
1615     static const char tab[64] = {
1616         3, 2, 2, 1, 1, 1, 1, 1,
1617         1, 1, 1, 1, 1, 1, 1, 1,
1618         1, 1, 1, 1, 1, 1, 1, 1,
1619         0, 0, 0, 0, 0, 0, 0, 0,
1620         0, 0, 0, 0, 0, 0, 0, 0,
1621         0, 0, 0, 0, 0, 0, 0, 0,
1622         0, 0, 0, 0, 0, 0, 0, 0,
1623         0, 0, 0, 0, 0, 0, 0, 0
1624     };
1625     int score = 0;
1626     int run = 0;
1627     int i;
1628     DCTELEM *block = s->block[n];
1629     const int last_index = s->block_last_index[n];
1630     int skip_dc;
1631
1632     if (threshold < 0) {
1633         skip_dc = 0;
1634         threshold = -threshold;
1635     } else
1636         skip_dc = 1;
1637
1638     /* Are all we could set to zero already zero? */
1639     if (last_index <= skip_dc - 1)
1640         return;
1641
1642     for (i = 0; i <= last_index; i++) {
1643         const int j = s->intra_scantable.permutated[i];
1644         const int level = FFABS(block[j]);
1645         if (level == 1) {
1646             if (skip_dc && i == 0)
1647                 continue;
1648             score += tab[run];
1649             run = 0;
1650         } else if (level > 1) {
1651             return;
1652         } else {
1653             run++;
1654         }
1655     }
1656     if (score >= threshold)
1657         return;
1658     for (i = skip_dc; i <= last_index; i++) {
1659         const int j = s->intra_scantable.permutated[i];
1660         block[j] = 0;
1661     }
1662     if (block[0])
1663         s->block_last_index[n] = 0;
1664     else
1665         s->block_last_index[n] = -1;
1666 }
1667
1668 static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block,
1669                                int last_index)
1670 {
1671     int i;
1672     const int maxlevel = s->max_qcoeff;
1673     const int minlevel = s->min_qcoeff;
1674     int overflow = 0;
1675
1676     if (s->mb_intra) {
1677         i = 1; // skip clipping of intra dc
1678     } else
1679         i = 0;
1680
1681     for (; i <= last_index; i++) {
1682         const int j = s->intra_scantable.permutated[i];
1683         int level = block[j];
1684
1685         if (level > maxlevel) {
1686             level = maxlevel;
1687             overflow++;
1688         } else if (level < minlevel) {
1689             level = minlevel;
1690             overflow++;
1691         }
1692
1693         block[j] = level;
1694     }
1695
1696     if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
1697         av_log(s->avctx, AV_LOG_INFO,
1698                "warning, clipping %d dct coefficients to %d..%d\n",
1699                overflow, minlevel, maxlevel);
1700 }
1701
1702 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
1703 {
1704     int x, y;
1705     // FIXME optimize
1706     for (y = 0; y < 8; y++) {
1707         for (x = 0; x < 8; x++) {
1708             int x2, y2;
1709             int sum = 0;
1710             int sqr = 0;
1711             int count = 0;
1712
1713             for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
1714                 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
1715                     int v = ptr[x2 + y2 * stride];
1716                     sum += v;
1717                     sqr += v * v;
1718                     count++;
1719                 }
1720             }
1721             weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
1722         }
1723     }
1724 }
1725
1726 static av_always_inline void encode_mb_internal(MpegEncContext *s,
1727                                                 int motion_x, int motion_y,
1728                                                 int mb_block_height,
1729                                                 int mb_block_count)
1730 {
1731     int16_t weight[8][64];
1732     DCTELEM orig[8][64];
1733     const int mb_x = s->mb_x;
1734     const int mb_y = s->mb_y;
1735     int i;
1736     int skip_dct[8];
1737     int dct_offset = s->linesize * 8; // default for progressive frames
1738     uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1739     int wrap_y, wrap_c;
1740
1741     for (i = 0; i < mb_block_count; i++)
1742         skip_dct[i] = s->skipdct;
1743
1744     if (s->adaptive_quant) {
1745         const int last_qp = s->qscale;
1746         const int mb_xy = mb_x + mb_y * s->mb_stride;
1747
1748         s->lambda = s->lambda_table[mb_xy];
1749         update_qscale(s);
1750
1751         if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
1752             s->qscale = s->current_picture_ptr->f.qscale_table[mb_xy];
1753             s->dquant = s->qscale - last_qp;
1754
1755             if (s->out_format == FMT_H263) {
1756                 s->dquant = av_clip(s->dquant, -2, 2);
1757
1758                 if (s->codec_id == AV_CODEC_ID_MPEG4) {
1759                     if (!s->mb_intra) {
1760                         if (s->pict_type == AV_PICTURE_TYPE_B) {
1761                             if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
1762                                 s->dquant = 0;
1763                         }
1764                         if (s->mv_type == MV_TYPE_8X8)
1765                             s->dquant = 0;
1766                     }
1767                 }
1768             }
1769         }
1770         ff_set_qscale(s, last_qp + s->dquant);
1771     } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
1772         ff_set_qscale(s, s->qscale + s->dquant);
1773
1774     wrap_y = s->linesize;
1775     wrap_c = s->uvlinesize;
1776     ptr_y  = s->new_picture.f.data[0] +
1777              (mb_y * 16 * wrap_y)              + mb_x * 16;
1778     ptr_cb = s->new_picture.f.data[1] +
1779              (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1780     ptr_cr = s->new_picture.f.data[2] +
1781              (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1782
1783     if (mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) {
1784         uint8_t *ebuf = s->edge_emu_buffer + 32;
1785         s->dsp.emulated_edge_mc(ebuf, ptr_y, wrap_y, 16, 16, mb_x * 16,
1786                                 mb_y * 16, s->width, s->height);
1787         ptr_y = ebuf;
1788         s->dsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb, wrap_c, 8,
1789                                 mb_block_height, mb_x * 8, mb_y * 8,
1790                                 s->width >> 1, s->height >> 1);
1791         ptr_cb = ebuf + 18 * wrap_y;
1792         s->dsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr, wrap_c, 8,
1793                                 mb_block_height, mb_x * 8, mb_y * 8,
1794                                 s->width >> 1, s->height >> 1);
1795         ptr_cr = ebuf + 18 * wrap_y + 8;
1796     }
1797
1798     if (s->mb_intra) {
1799         if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1800             int progressive_score, interlaced_score;
1801
1802             s->interlaced_dct = 0;
1803             progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
1804                                                     NULL, wrap_y, 8) +
1805                                 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
1806                                                     NULL, wrap_y, 8) - 400;
1807
1808             if (progressive_score > 0) {
1809                 interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
1810                                                        NULL, wrap_y * 2, 8) +
1811                                    s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
1812                                                        NULL, wrap_y * 2, 8);
1813                 if (progressive_score > interlaced_score) {
1814                     s->interlaced_dct = 1;
1815
1816                     dct_offset = wrap_y;
1817                     wrap_y <<= 1;
1818                     if (s->chroma_format == CHROMA_422)
1819                         wrap_c <<= 1;
1820                 }
1821             }
1822         }
1823
1824         s->dsp.get_pixels(s->block[0], ptr_y                  , wrap_y);
1825         s->dsp.get_pixels(s->block[1], ptr_y              + 8 , wrap_y);
1826         s->dsp.get_pixels(s->block[2], ptr_y + dct_offset     , wrap_y);
1827         s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
1828
1829         if (s->flags & CODEC_FLAG_GRAY) {
1830             skip_dct[4] = 1;
1831             skip_dct[5] = 1;
1832         } else {
1833             s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
1834             s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
1835             if (!s->chroma_y_shift) { /* 422 */
1836                 s->dsp.get_pixels(s->block[6],
1837                                   ptr_cb + (dct_offset >> 1), wrap_c);
1838                 s->dsp.get_pixels(s->block[7],
1839                                   ptr_cr + (dct_offset >> 1), wrap_c);
1840             }
1841         }
1842     } else {
1843         op_pixels_func (*op_pix)[4];
1844         qpel_mc_func (*op_qpix)[16];
1845         uint8_t *dest_y, *dest_cb, *dest_cr;
1846
1847         dest_y  = s->dest[0];
1848         dest_cb = s->dest[1];
1849         dest_cr = s->dest[2];
1850
1851         if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
1852             op_pix  = s->dsp.put_pixels_tab;
1853             op_qpix = s->dsp.put_qpel_pixels_tab;
1854         } else {
1855             op_pix  = s->dsp.put_no_rnd_pixels_tab;
1856             op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
1857         }
1858
1859         if (s->mv_dir & MV_DIR_FORWARD) {
1860             ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0,
1861                           s->last_picture.f.data,
1862                           op_pix, op_qpix);
1863             op_pix  = s->dsp.avg_pixels_tab;
1864             op_qpix = s->dsp.avg_qpel_pixels_tab;
1865         }
1866         if (s->mv_dir & MV_DIR_BACKWARD) {
1867             ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1,
1868                           s->next_picture.f.data,
1869                           op_pix, op_qpix);
1870         }
1871
1872         if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1873             int progressive_score, interlaced_score;
1874
1875             s->interlaced_dct = 0;
1876             progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
1877                                                     ptr_y,              wrap_y,
1878                                                     8) +
1879                                 s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
1880                                                     ptr_y + wrap_y * 8, wrap_y,
1881                                                     8) - 400;
1882
1883             if (s->avctx->ildct_cmp == FF_CMP_VSSE)
1884                 progressive_score -= 400;
1885
1886             if (progressive_score > 0) {
1887                 interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
1888                                                        ptr_y,
1889                                                        wrap_y * 2, 8) +
1890                                    s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
1891                                                        ptr_y + wrap_y,
1892                                                        wrap_y * 2, 8);
1893
1894                 if (progressive_score > interlaced_score) {
1895                     s->interlaced_dct = 1;
1896
1897                     dct_offset = wrap_y;
1898                     wrap_y <<= 1;
1899                     if (s->chroma_format == CHROMA_422)
1900                         wrap_c <<= 1;
1901                 }
1902             }
1903         }
1904
1905         s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
1906         s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
1907         s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
1908                            dest_y + dct_offset, wrap_y);
1909         s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
1910                            dest_y + dct_offset + 8, wrap_y);
1911
1912         if (s->flags & CODEC_FLAG_GRAY) {
1913             skip_dct[4] = 1;
1914             skip_dct[5] = 1;
1915         } else {
1916             s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
1917             s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
1918             if (!s->chroma_y_shift) { /* 422 */
1919                 s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1),
1920                                    dest_cb + (dct_offset >> 1), wrap_c);
1921                 s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1),
1922                                    dest_cr + (dct_offset >> 1), wrap_c);
1923             }
1924         }
1925         /* pre quantization */
1926         if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
1927                 2 * s->qscale * s->qscale) {
1928             // FIXME optimize
1929             if (s->dsp.sad[1](NULL, ptr_y , dest_y,
1930                               wrap_y, 8) < 20 * s->qscale)
1931                 skip_dct[0] = 1;
1932             if (s->dsp.sad[1](NULL, ptr_y + 8,
1933                               dest_y + 8, wrap_y, 8) < 20 * s->qscale)
1934                 skip_dct[1] = 1;
1935             if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
1936                               dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
1937                 skip_dct[2] = 1;
1938             if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
1939                               dest_y + dct_offset + 8,
1940                               wrap_y, 8) < 20 * s->qscale)
1941                 skip_dct[3] = 1;
1942             if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
1943                               wrap_c, 8) < 20 * s->qscale)
1944                 skip_dct[4] = 1;
1945             if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
1946                               wrap_c, 8) < 20 * s->qscale)
1947                 skip_dct[5] = 1;
1948             if (!s->chroma_y_shift) { /* 422 */
1949                 if (s->dsp.sad[1](NULL, ptr_cb + (dct_offset >> 1),
1950                                   dest_cb + (dct_offset >> 1),
1951                                   wrap_c, 8) < 20 * s->qscale)
1952                     skip_dct[6] = 1;
1953                 if (s->dsp.sad[1](NULL, ptr_cr + (dct_offset >> 1),
1954                                   dest_cr + (dct_offset >> 1),
1955                                   wrap_c, 8) < 20 * s->qscale)
1956                     skip_dct[7] = 1;
1957             }
1958         }
1959     }
1960
1961     if (s->quantizer_noise_shaping) {
1962         if (!skip_dct[0])
1963             get_visual_weight(weight[0], ptr_y                 , wrap_y);
1964         if (!skip_dct[1])
1965             get_visual_weight(weight[1], ptr_y              + 8, wrap_y);
1966         if (!skip_dct[2])
1967             get_visual_weight(weight[2], ptr_y + dct_offset    , wrap_y);
1968         if (!skip_dct[3])
1969             get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
1970         if (!skip_dct[4])
1971             get_visual_weight(weight[4], ptr_cb                , wrap_c);
1972         if (!skip_dct[5])
1973             get_visual_weight(weight[5], ptr_cr                , wrap_c);
1974         if (!s->chroma_y_shift) { /* 422 */
1975             if (!skip_dct[6])
1976                 get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1),
1977                                   wrap_c);
1978             if (!skip_dct[7])
1979                 get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1),
1980                                   wrap_c);
1981         }
1982         memcpy(orig[0], s->block[0], sizeof(DCTELEM) * 64 * mb_block_count);
1983     }
1984
1985     /* DCT & quantize */
1986     assert(s->out_format != FMT_MJPEG || s->qscale == 8);
1987     {
1988         for (i = 0; i < mb_block_count; i++) {
1989             if (!skip_dct[i]) {
1990                 int overflow;
1991                 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
1992                 // FIXME we could decide to change to quantizer instead of
1993                 // clipping
1994                 // JS: I don't think that would be a good idea it could lower
1995                 //     quality instead of improve it. Just INTRADC clipping
1996                 //     deserves changes in quantizer
1997                 if (overflow)
1998                     clip_coeffs(s, s->block[i], s->block_last_index[i]);
1999             } else
2000                 s->block_last_index[i] = -1;
2001         }
2002         if (s->quantizer_noise_shaping) {
2003             for (i = 0; i < mb_block_count; i++) {
2004                 if (!skip_dct[i]) {
2005                     s->block_last_index[i] =
2006                         dct_quantize_refine(s, s->block[i], weight[i],
2007                                             orig[i], i, s->qscale);
2008                 }
2009             }
2010         }
2011
2012         if (s->luma_elim_threshold && !s->mb_intra)
2013             for (i = 0; i < 4; i++)
2014                 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2015         if (s->chroma_elim_threshold && !s->mb_intra)
2016             for (i = 4; i < mb_block_count; i++)
2017                 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2018
2019         if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2020             for (i = 0; i < mb_block_count; i++) {
2021                 if (s->block_last_index[i] == -1)
2022                     s->coded_score[i] = INT_MAX / 256;
2023             }
2024         }
2025     }
2026
2027     if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
2028         s->block_last_index[4] =
2029         s->block_last_index[5] = 0;
2030         s->block[4][0] =
2031         s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2032     }
2033
2034     // non c quantize code returns incorrect block_last_index FIXME
2035     if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2036         for (i = 0; i < mb_block_count; i++) {
2037             int j;
2038             if (s->block_last_index[i] > 0) {
2039                 for (j = 63; j > 0; j--) {
2040                     if (s->block[i][s->intra_scantable.permutated[j]])
2041                         break;
2042                 }
2043                 s->block_last_index[i] = j;
2044             }
2045         }
2046     }
2047
2048     /* huffman encode */
2049     switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2050     case AV_CODEC_ID_MPEG1VIDEO:
2051     case AV_CODEC_ID_MPEG2VIDEO:
2052         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2053             ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2054         break;
2055     case AV_CODEC_ID_MPEG4:
2056         if (CONFIG_MPEG4_ENCODER)
2057             ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2058         break;
2059     case AV_CODEC_ID_MSMPEG4V2:
2060     case AV_CODEC_ID_MSMPEG4V3:
2061     case AV_CODEC_ID_WMV1:
2062         if (CONFIG_MSMPEG4_ENCODER)
2063             ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2064         break;
2065     case AV_CODEC_ID_WMV2:
2066         if (CONFIG_WMV2_ENCODER)
2067             ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2068         break;
2069     case AV_CODEC_ID_H261:
2070         if (CONFIG_H261_ENCODER)
2071             ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2072         break;
2073     case AV_CODEC_ID_H263:
2074     case AV_CODEC_ID_H263P:
2075     case AV_CODEC_ID_FLV1:
2076     case AV_CODEC_ID_RV10:
2077     case AV_CODEC_ID_RV20:
2078         if (CONFIG_H263_ENCODER)
2079             ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2080         break;
2081     case AV_CODEC_ID_MJPEG:
2082         if (CONFIG_MJPEG_ENCODER)
2083             ff_mjpeg_encode_mb(s, s->block);
2084         break;
2085     default:
2086         assert(0);
2087     }
2088 }
2089
2090 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2091 {
2092     if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y,  8, 6);
2093     else                                encode_mb_internal(s, motion_x, motion_y, 16, 8);
2094 }
2095
2096 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2097     int i;
2098
2099     memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2100
2101     /* mpeg1 */
2102     d->mb_skip_run= s->mb_skip_run;
2103     for(i=0; i<3; i++)
2104         d->last_dc[i] = s->last_dc[i];
2105
2106     /* statistics */
2107     d->mv_bits= s->mv_bits;
2108     d->i_tex_bits= s->i_tex_bits;
2109     d->p_tex_bits= s->p_tex_bits;
2110     d->i_count= s->i_count;
2111     d->f_count= s->f_count;
2112     d->b_count= s->b_count;
2113     d->skip_count= s->skip_count;
2114     d->misc_bits= s->misc_bits;
2115     d->last_bits= 0;
2116
2117     d->mb_skipped= 0;
2118     d->qscale= s->qscale;
2119     d->dquant= s->dquant;
2120
2121     d->esc3_level_length= s->esc3_level_length;
2122 }
2123
2124 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2125     int i;
2126
2127     memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2128     memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2129
2130     /* mpeg1 */
2131     d->mb_skip_run= s->mb_skip_run;
2132     for(i=0; i<3; i++)
2133         d->last_dc[i] = s->last_dc[i];
2134
2135     /* statistics */
2136     d->mv_bits= s->mv_bits;
2137     d->i_tex_bits= s->i_tex_bits;
2138     d->p_tex_bits= s->p_tex_bits;
2139     d->i_count= s->i_count;
2140     d->f_count= s->f_count;
2141     d->b_count= s->b_count;
2142     d->skip_count= s->skip_count;
2143     d->misc_bits= s->misc_bits;
2144
2145     d->mb_intra= s->mb_intra;
2146     d->mb_skipped= s->mb_skipped;
2147     d->mv_type= s->mv_type;
2148     d->mv_dir= s->mv_dir;
2149     d->pb= s->pb;
2150     if(s->data_partitioning){
2151         d->pb2= s->pb2;
2152         d->tex_pb= s->tex_pb;
2153     }
2154     d->block= s->block;
2155     for(i=0; i<8; i++)
2156         d->block_last_index[i]= s->block_last_index[i];
2157     d->interlaced_dct= s->interlaced_dct;
2158     d->qscale= s->qscale;
2159
2160     d->esc3_level_length= s->esc3_level_length;
2161 }
2162
2163 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2164                            PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2165                            int *dmin, int *next_block, int motion_x, int motion_y)
2166 {
2167     int score;
2168     uint8_t *dest_backup[3];
2169
2170     copy_context_before_encode(s, backup, type);
2171
2172     s->block= s->blocks[*next_block];
2173     s->pb= pb[*next_block];
2174     if(s->data_partitioning){
2175         s->pb2   = pb2   [*next_block];
2176         s->tex_pb= tex_pb[*next_block];
2177     }
2178
2179     if(*next_block){
2180         memcpy(dest_backup, s->dest, sizeof(s->dest));
2181         s->dest[0] = s->rd_scratchpad;
2182         s->dest[1] = s->rd_scratchpad + 16*s->linesize;
2183         s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
2184         assert(s->linesize >= 32); //FIXME
2185     }
2186
2187     encode_mb(s, motion_x, motion_y);
2188
2189     score= put_bits_count(&s->pb);
2190     if(s->data_partitioning){
2191         score+= put_bits_count(&s->pb2);
2192         score+= put_bits_count(&s->tex_pb);
2193     }
2194
2195     if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2196         ff_MPV_decode_mb(s, s->block);
2197
2198         score *= s->lambda2;
2199         score += sse_mb(s) << FF_LAMBDA_SHIFT;
2200     }
2201
2202     if(*next_block){
2203         memcpy(s->dest, dest_backup, sizeof(s->dest));
2204     }
2205
2206     if(score<*dmin){
2207         *dmin= score;
2208         *next_block^=1;
2209
2210         copy_context_after_encode(best, s, type);
2211     }
2212 }
2213
2214 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2215     uint32_t *sq = ff_squareTbl + 256;
2216     int acc=0;
2217     int x,y;
2218
2219     if(w==16 && h==16)
2220         return s->dsp.sse[0](NULL, src1, src2, stride, 16);
2221     else if(w==8 && h==8)
2222         return s->dsp.sse[1](NULL, src1, src2, stride, 8);
2223
2224     for(y=0; y<h; y++){
2225         for(x=0; x<w; x++){
2226             acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2227         }
2228     }
2229
2230     assert(acc>=0);
2231
2232     return acc;
2233 }
2234
2235 static int sse_mb(MpegEncContext *s){
2236     int w= 16;
2237     int h= 16;
2238
2239     if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2240     if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2241
2242     if(w==16 && h==16)
2243       if(s->avctx->mb_cmp == FF_CMP_NSSE){
2244         return  s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2245                +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2246                +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2247       }else{
2248         return  s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2249                +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2250                +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2251       }
2252     else
2253         return  sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2254                +sse(s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2255                +sse(s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2256 }
2257
2258 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2259     MpegEncContext *s= *(void**)arg;
2260
2261
2262     s->me.pre_pass=1;
2263     s->me.dia_size= s->avctx->pre_dia_size;
2264     s->first_slice_line=1;
2265     for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2266         for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2267             ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2268         }
2269         s->first_slice_line=0;
2270     }
2271
2272     s->me.pre_pass=0;
2273
2274     return 0;
2275 }
2276
2277 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2278     MpegEncContext *s= *(void**)arg;
2279
2280     ff_check_alignment();
2281
2282     s->me.dia_size= s->avctx->dia_size;
2283     s->first_slice_line=1;
2284     for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2285         s->mb_x=0; //for block init below
2286         ff_init_block_index(s);
2287         for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2288             s->block_index[0]+=2;
2289             s->block_index[1]+=2;
2290             s->block_index[2]+=2;
2291             s->block_index[3]+=2;
2292
2293             /* compute motion vector & mb_type and store in context */
2294             if(s->pict_type==AV_PICTURE_TYPE_B)
2295                 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2296             else
2297                 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2298         }
2299         s->first_slice_line=0;
2300     }
2301     return 0;
2302 }
2303
2304 static int mb_var_thread(AVCodecContext *c, void *arg){
2305     MpegEncContext *s= *(void**)arg;
2306     int mb_x, mb_y;
2307
2308     ff_check_alignment();
2309
2310     for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2311         for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2312             int xx = mb_x * 16;
2313             int yy = mb_y * 16;
2314             uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
2315             int varc;
2316             int sum = s->dsp.pix_sum(pix, s->linesize);
2317
2318             varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
2319
2320             s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2321             s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2322             s->me.mb_var_sum_temp    += varc;
2323         }
2324     }
2325     return 0;
2326 }
2327
2328 static void write_slice_end(MpegEncContext *s){
2329     if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2330         if(s->partitioned_frame){
2331             ff_mpeg4_merge_partitions(s);
2332         }
2333
2334         ff_mpeg4_stuffing(&s->pb);
2335     }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2336         ff_mjpeg_encode_stuffing(&s->pb);
2337     }
2338
2339     avpriv_align_put_bits(&s->pb);
2340     flush_put_bits(&s->pb);
2341
2342     if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
2343         s->misc_bits+= get_bits_diff(s);
2344 }
2345
2346 static void write_mb_info(MpegEncContext *s)
2347 {
2348     uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2349     int offset = put_bits_count(&s->pb);
2350     int mba  = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2351     int gobn = s->mb_y / s->gob_index;
2352     int pred_x, pred_y;
2353     if (CONFIG_H263_ENCODER)
2354         ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2355     bytestream_put_le32(&ptr, offset);
2356     bytestream_put_byte(&ptr, s->qscale);
2357     bytestream_put_byte(&ptr, gobn);
2358     bytestream_put_le16(&ptr, mba);
2359     bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2360     bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2361     /* 4MV not implemented */
2362     bytestream_put_byte(&ptr, 0); /* hmv2 */
2363     bytestream_put_byte(&ptr, 0); /* vmv2 */
2364 }
2365
2366 static void update_mb_info(MpegEncContext *s, int startcode)
2367 {
2368     if (!s->mb_info)
2369         return;
2370     if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2371         s->mb_info_size += 12;
2372         s->prev_mb_info = s->last_mb_info;
2373     }
2374     if (startcode) {
2375         s->prev_mb_info = put_bits_count(&s->pb)/8;
2376         /* This might have incremented mb_info_size above, and we return without
2377          * actually writing any info into that slot yet. But in that case,
2378          * this will be called again at the start of the after writing the
2379          * start code, actually writing the mb info. */
2380         return;
2381     }
2382
2383     s->last_mb_info = put_bits_count(&s->pb)/8;
2384     if (!s->mb_info_size)
2385         s->mb_info_size += 12;
2386     write_mb_info(s);
2387 }
2388
2389 static int encode_thread(AVCodecContext *c, void *arg){
2390     MpegEncContext *s= *(void**)arg;
2391     int mb_x, mb_y, pdif = 0;
2392     int chr_h= 16>>s->chroma_y_shift;
2393     int i, j;
2394     MpegEncContext best_s, backup_s;
2395     uint8_t bit_buf[2][MAX_MB_BYTES];
2396     uint8_t bit_buf2[2][MAX_MB_BYTES];
2397     uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2398     PutBitContext pb[2], pb2[2], tex_pb[2];
2399 //printf("%d->%d\n", s->resync_mb_y, s->end_mb_y);
2400
2401     ff_check_alignment();
2402
2403     for(i=0; i<2; i++){
2404         init_put_bits(&pb    [i], bit_buf    [i], MAX_MB_BYTES);
2405         init_put_bits(&pb2   [i], bit_buf2   [i], MAX_MB_BYTES);
2406         init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2407     }
2408
2409     s->last_bits= put_bits_count(&s->pb);
2410     s->mv_bits=0;
2411     s->misc_bits=0;
2412     s->i_tex_bits=0;
2413     s->p_tex_bits=0;
2414     s->i_count=0;
2415     s->f_count=0;
2416     s->b_count=0;
2417     s->skip_count=0;
2418
2419     for(i=0; i<3; i++){
2420         /* init last dc values */
2421         /* note: quant matrix value (8) is implied here */
2422         s->last_dc[i] = 128 << s->intra_dc_precision;
2423
2424         s->current_picture.f.error[i] = 0;
2425     }
2426     s->mb_skip_run = 0;
2427     memset(s->last_mv, 0, sizeof(s->last_mv));
2428
2429     s->last_mv_dir = 0;
2430
2431     switch(s->codec_id){
2432     case AV_CODEC_ID_H263:
2433     case AV_CODEC_ID_H263P:
2434     case AV_CODEC_ID_FLV1:
2435         if (CONFIG_H263_ENCODER)
2436             s->gob_index = ff_h263_get_gob_height(s);
2437         break;
2438     case AV_CODEC_ID_MPEG4:
2439         if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2440             ff_mpeg4_init_partitions(s);
2441         break;
2442     }
2443
2444     s->resync_mb_x=0;
2445     s->resync_mb_y=0;
2446     s->first_slice_line = 1;
2447     s->ptr_lastgob = s->pb.buf;
2448     for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2449 //    printf("row %d at %X\n", s->mb_y, (int)s);
2450         s->mb_x=0;
2451         s->mb_y= mb_y;
2452
2453         ff_set_qscale(s, s->qscale);
2454         ff_init_block_index(s);
2455
2456         for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2457             int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2458             int mb_type= s->mb_type[xy];
2459 //            int d;
2460             int dmin= INT_MAX;
2461             int dir;
2462
2463             if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2464                 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2465                 return -1;
2466             }
2467             if(s->data_partitioning){
2468                 if(   s->pb2   .buf_end - s->pb2   .buf - (put_bits_count(&s->    pb2)>>3) < MAX_MB_BYTES
2469                    || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2470                     av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2471                     return -1;
2472                 }
2473             }
2474
2475             s->mb_x = mb_x;
2476             s->mb_y = mb_y;  // moved into loop, can get changed by H.261
2477             ff_update_block_index(s);
2478
2479             if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2480                 ff_h261_reorder_mb_index(s);
2481                 xy= s->mb_y*s->mb_stride + s->mb_x;
2482                 mb_type= s->mb_type[xy];
2483             }
2484
2485             /* write gob / video packet header  */
2486             if(s->rtp_mode){
2487                 int current_packet_size, is_gob_start;
2488
2489                 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2490
2491                 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2492
2493                 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2494
2495                 switch(s->codec_id){
2496                 case AV_CODEC_ID_H263:
2497                 case AV_CODEC_ID_H263P:
2498                     if(!s->h263_slice_structured)
2499                         if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2500                     break;
2501                 case AV_CODEC_ID_MPEG2VIDEO:
2502                     if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2503                 case AV_CODEC_ID_MPEG1VIDEO:
2504                     if(s->mb_skip_run) is_gob_start=0;
2505                     break;
2506                 }
2507
2508                 if(is_gob_start){
2509                     if(s->start_mb_y != mb_y || mb_x!=0){
2510                         write_slice_end(s);
2511
2512                         if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2513                             ff_mpeg4_init_partitions(s);
2514                         }
2515                     }
2516
2517                     assert((put_bits_count(&s->pb)&7) == 0);
2518                     current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2519
2520                     if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){
2521                         int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2522                         int d= 100 / s->avctx->error_rate;
2523                         if(r % d == 0){
2524                             current_packet_size=0;
2525                             s->pb.buf_ptr= s->ptr_lastgob;
2526                             assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2527                         }
2528                     }
2529
2530                     if (s->avctx->rtp_callback){
2531                         int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2532                         s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2533                     }
2534                     update_mb_info(s, 1);
2535
2536                     switch(s->codec_id){
2537                     case AV_CODEC_ID_MPEG4:
2538                         if (CONFIG_MPEG4_ENCODER) {
2539                             ff_mpeg4_encode_video_packet_header(s);
2540                             ff_mpeg4_clean_buffers(s);
2541                         }
2542                     break;
2543                     case AV_CODEC_ID_MPEG1VIDEO:
2544                     case AV_CODEC_ID_MPEG2VIDEO:
2545                         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2546                             ff_mpeg1_encode_slice_header(s);
2547                             ff_mpeg1_clean_buffers(s);
2548                         }
2549                     break;
2550                     case AV_CODEC_ID_H263:
2551                     case AV_CODEC_ID_H263P:
2552                         if (CONFIG_H263_ENCODER)
2553                             ff_h263_encode_gob_header(s, mb_y);
2554                     break;
2555                     }
2556
2557                     if(s->flags&CODEC_FLAG_PASS1){
2558                         int bits= put_bits_count(&s->pb);
2559                         s->misc_bits+= bits - s->last_bits;
2560                         s->last_bits= bits;
2561                     }
2562
2563                     s->ptr_lastgob += current_packet_size;
2564                     s->first_slice_line=1;
2565                     s->resync_mb_x=mb_x;
2566                     s->resync_mb_y=mb_y;
2567                 }
2568             }
2569
2570             if(  (s->resync_mb_x   == s->mb_x)
2571                && s->resync_mb_y+1 == s->mb_y){
2572                 s->first_slice_line=0;
2573             }
2574
2575             s->mb_skipped=0;
2576             s->dquant=0; //only for QP_RD
2577
2578             update_mb_info(s, 0);
2579
2580             if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2581                 int next_block=0;
2582                 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2583
2584                 copy_context_before_encode(&backup_s, s, -1);
2585                 backup_s.pb= s->pb;
2586                 best_s.data_partitioning= s->data_partitioning;
2587                 best_s.partitioned_frame= s->partitioned_frame;
2588                 if(s->data_partitioning){
2589                     backup_s.pb2= s->pb2;
2590                     backup_s.tex_pb= s->tex_pb;
2591                 }
2592
2593                 if(mb_type&CANDIDATE_MB_TYPE_INTER){
2594                     s->mv_dir = MV_DIR_FORWARD;
2595                     s->mv_type = MV_TYPE_16X16;
2596                     s->mb_intra= 0;
2597                     s->mv[0][0][0] = s->p_mv_table[xy][0];
2598                     s->mv[0][0][1] = s->p_mv_table[xy][1];
2599                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2600                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2601                 }
2602                 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2603                     s->mv_dir = MV_DIR_FORWARD;
2604                     s->mv_type = MV_TYPE_FIELD;
2605                     s->mb_intra= 0;
2606                     for(i=0; i<2; i++){
2607                         j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2608                         s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2609                         s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2610                     }
2611                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2612                                  &dmin, &next_block, 0, 0);
2613                 }
2614                 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2615                     s->mv_dir = MV_DIR_FORWARD;
2616                     s->mv_type = MV_TYPE_16X16;
2617                     s->mb_intra= 0;
2618                     s->mv[0][0][0] = 0;
2619                     s->mv[0][0][1] = 0;
2620                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2621                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2622                 }
2623                 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
2624                     s->mv_dir = MV_DIR_FORWARD;
2625                     s->mv_type = MV_TYPE_8X8;
2626                     s->mb_intra= 0;
2627                     for(i=0; i<4; i++){
2628                         s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
2629                         s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
2630                     }
2631                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
2632                                  &dmin, &next_block, 0, 0);
2633                 }
2634                 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
2635                     s->mv_dir = MV_DIR_FORWARD;
2636                     s->mv_type = MV_TYPE_16X16;
2637                     s->mb_intra= 0;
2638                     s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2639                     s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2640                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
2641                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2642                 }
2643                 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
2644                     s->mv_dir = MV_DIR_BACKWARD;
2645                     s->mv_type = MV_TYPE_16X16;
2646                     s->mb_intra= 0;
2647                     s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2648                     s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2649                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
2650                                  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
2651                 }
2652                 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
2653                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2654                     s->mv_type = MV_TYPE_16X16;
2655                     s->mb_intra= 0;
2656                     s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2657                     s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2658                     s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2659                     s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2660                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
2661                                  &dmin, &next_block, 0, 0);
2662                 }
2663                 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
2664                     s->mv_dir = MV_DIR_FORWARD;
2665                     s->mv_type = MV_TYPE_FIELD;
2666                     s->mb_intra= 0;
2667                     for(i=0; i<2; i++){
2668                         j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2669                         s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2670                         s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2671                     }
2672                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
2673                                  &dmin, &next_block, 0, 0);
2674                 }
2675                 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
2676                     s->mv_dir = MV_DIR_BACKWARD;
2677                     s->mv_type = MV_TYPE_FIELD;
2678                     s->mb_intra= 0;
2679                     for(i=0; i<2; i++){
2680                         j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2681                         s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2682                         s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2683                     }
2684                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
2685                                  &dmin, &next_block, 0, 0);
2686                 }
2687                 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
2688                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2689                     s->mv_type = MV_TYPE_FIELD;
2690                     s->mb_intra= 0;
2691                     for(dir=0; dir<2; dir++){
2692                         for(i=0; i<2; i++){
2693                             j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2694                             s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2695                             s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2696                         }
2697                     }
2698                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
2699                                  &dmin, &next_block, 0, 0);
2700                 }
2701                 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
2702                     s->mv_dir = 0;
2703                     s->mv_type = MV_TYPE_16X16;
2704                     s->mb_intra= 1;
2705                     s->mv[0][0][0] = 0;
2706                     s->mv[0][0][1] = 0;
2707                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
2708                                  &dmin, &next_block, 0, 0);
2709                     if(s->h263_pred || s->h263_aic){
2710                         if(best_s.mb_intra)
2711                             s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
2712                         else
2713                             ff_clean_intra_table_entries(s); //old mode?
2714                     }
2715                 }
2716
2717                 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
2718                     if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
2719                         const int last_qp= backup_s.qscale;
2720                         int qpi, qp, dc[6];
2721                         DCTELEM ac[6][16];
2722                         const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
2723                         static const int dquant_tab[4]={-1,1,-2,2};
2724
2725                         assert(backup_s.dquant == 0);
2726
2727                         //FIXME intra
2728                         s->mv_dir= best_s.mv_dir;
2729                         s->mv_type = MV_TYPE_16X16;
2730                         s->mb_intra= best_s.mb_intra;
2731                         s->mv[0][0][0] = best_s.mv[0][0][0];
2732                         s->mv[0][0][1] = best_s.mv[0][0][1];
2733                         s->mv[1][0][0] = best_s.mv[1][0][0];
2734                         s->mv[1][0][1] = best_s.mv[1][0][1];
2735
2736                         qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
2737                         for(; qpi<4; qpi++){
2738                             int dquant= dquant_tab[qpi];
2739                             qp= last_qp + dquant;
2740                             if(qp < s->avctx->qmin || qp > s->avctx->qmax)
2741                                 continue;
2742                             backup_s.dquant= dquant;
2743                             if(s->mb_intra && s->dc_val[0]){
2744                                 for(i=0; i<6; i++){
2745                                     dc[i]= s->dc_val[0][ s->block_index[i] ];
2746                                     memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(DCTELEM)*16);
2747                                 }
2748                             }
2749
2750                             encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2751                                          &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
2752                             if(best_s.qscale != qp){
2753                                 if(s->mb_intra && s->dc_val[0]){
2754                                     for(i=0; i<6; i++){
2755                                         s->dc_val[0][ s->block_index[i] ]= dc[i];
2756                                         memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(DCTELEM)*16);
2757                                     }
2758                                 }
2759                             }
2760                         }
2761                     }
2762                 }
2763                 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
2764                     int mx= s->b_direct_mv_table[xy][0];
2765                     int my= s->b_direct_mv_table[xy][1];
2766
2767                     backup_s.dquant = 0;
2768                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2769                     s->mb_intra= 0;
2770                     ff_mpeg4_set_direct_mv(s, mx, my);
2771                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2772                                  &dmin, &next_block, mx, my);
2773                 }
2774                 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
2775                     backup_s.dquant = 0;
2776                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2777                     s->mb_intra= 0;
2778                     ff_mpeg4_set_direct_mv(s, 0, 0);
2779                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2780                                  &dmin, &next_block, 0, 0);
2781                 }
2782                 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
2783                     int coded=0;
2784                     for(i=0; i<6; i++)
2785                         coded |= s->block_last_index[i];
2786                     if(coded){
2787                         int mx,my;
2788                         memcpy(s->mv, best_s.mv, sizeof(s->mv));
2789                         if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
2790                             mx=my=0; //FIXME find the one we actually used
2791                             ff_mpeg4_set_direct_mv(s, mx, my);
2792                         }else if(best_s.mv_dir&MV_DIR_BACKWARD){
2793                             mx= s->mv[1][0][0];
2794                             my= s->mv[1][0][1];
2795                         }else{
2796                             mx= s->mv[0][0][0];
2797                             my= s->mv[0][0][1];
2798                         }
2799
2800                         s->mv_dir= best_s.mv_dir;
2801                         s->mv_type = best_s.mv_type;
2802                         s->mb_intra= 0;
2803 /*                        s->mv[0][0][0] = best_s.mv[0][0][0];
2804                         s->mv[0][0][1] = best_s.mv[0][0][1];
2805                         s->mv[1][0][0] = best_s.mv[1][0][0];
2806                         s->mv[1][0][1] = best_s.mv[1][0][1];*/
2807                         backup_s.dquant= 0;
2808                         s->skipdct=1;
2809                         encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2810                                         &dmin, &next_block, mx, my);
2811                         s->skipdct=0;
2812                     }
2813                 }
2814
2815                 s->current_picture.f.qscale_table[xy] = best_s.qscale;
2816
2817                 copy_context_after_encode(s, &best_s, -1);
2818
2819                 pb_bits_count= put_bits_count(&s->pb);
2820                 flush_put_bits(&s->pb);
2821                 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
2822                 s->pb= backup_s.pb;
2823
2824                 if(s->data_partitioning){
2825                     pb2_bits_count= put_bits_count(&s->pb2);
2826                     flush_put_bits(&s->pb2);
2827                     avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
2828                     s->pb2= backup_s.pb2;
2829
2830                     tex_pb_bits_count= put_bits_count(&s->tex_pb);
2831                     flush_put_bits(&s->tex_pb);
2832                     avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
2833                     s->tex_pb= backup_s.tex_pb;
2834                 }
2835                 s->last_bits= put_bits_count(&s->pb);
2836
2837                 if (CONFIG_H263_ENCODER &&
2838                     s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2839                     ff_h263_update_motion_val(s);
2840
2841                 if(next_block==0){ //FIXME 16 vs linesize16
2842                     s->dsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad                     , s->linesize  ,16);
2843                     s->dsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize    , s->uvlinesize, 8);
2844                     s->dsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
2845                 }
2846
2847                 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
2848                     ff_MPV_decode_mb(s, s->block);
2849             } else {
2850                 int motion_x = 0, motion_y = 0;
2851                 s->mv_type=MV_TYPE_16X16;
2852                 // only one MB-Type possible
2853
2854                 switch(mb_type){
2855                 case CANDIDATE_MB_TYPE_INTRA:
2856                     s->mv_dir = 0;
2857                     s->mb_intra= 1;
2858                     motion_x= s->mv[0][0][0] = 0;
2859                     motion_y= s->mv[0][0][1] = 0;
2860                     break;
2861                 case CANDIDATE_MB_TYPE_INTER:
2862                     s->mv_dir = MV_DIR_FORWARD;
2863                     s->mb_intra= 0;
2864                     motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
2865                     motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
2866                     break;
2867                 case CANDIDATE_MB_TYPE_INTER_I:
2868                     s->mv_dir = MV_DIR_FORWARD;
2869                     s->mv_type = MV_TYPE_FIELD;
2870                     s->mb_intra= 0;
2871                     for(i=0; i<2; i++){
2872                         j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2873                         s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2874                         s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2875                     }
2876                     break;
2877                 case CANDIDATE_MB_TYPE_INTER4V:
2878                     s->mv_dir = MV_DIR_FORWARD;
2879                     s->mv_type = MV_TYPE_8X8;
2880                     s->mb_intra= 0;
2881                     for(i=0; i<4; i++){
2882                         s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
2883                         s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
2884                     }
2885                     break;
2886                 case CANDIDATE_MB_TYPE_DIRECT:
2887                     if (CONFIG_MPEG4_ENCODER) {
2888                         s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2889                         s->mb_intra= 0;
2890                         motion_x=s->b_direct_mv_table[xy][0];
2891                         motion_y=s->b_direct_mv_table[xy][1];
2892                         ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
2893                     }
2894                     break;
2895                 case CANDIDATE_MB_TYPE_DIRECT0:
2896                     if (CONFIG_MPEG4_ENCODER) {
2897                         s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2898                         s->mb_intra= 0;
2899                         ff_mpeg4_set_direct_mv(s, 0, 0);
2900                     }
2901                     break;
2902                 case CANDIDATE_MB_TYPE_BIDIR:
2903                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2904                     s->mb_intra= 0;
2905                     s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2906                     s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2907                     s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2908                     s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2909                     break;
2910                 case CANDIDATE_MB_TYPE_BACKWARD:
2911                     s->mv_dir = MV_DIR_BACKWARD;
2912                     s->mb_intra= 0;
2913                     motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2914                     motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2915                     break;
2916                 case CANDIDATE_MB_TYPE_FORWARD:
2917                     s->mv_dir = MV_DIR_FORWARD;
2918                     s->mb_intra= 0;
2919                     motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2920                     motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2921 //                    printf(" %d %d ", motion_x, motion_y);
2922                     break;
2923                 case CANDIDATE_MB_TYPE_FORWARD_I:
2924                     s->mv_dir = MV_DIR_FORWARD;
2925                     s->mv_type = MV_TYPE_FIELD;
2926                     s->mb_intra= 0;
2927                     for(i=0; i<2; i++){
2928                         j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2929                         s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2930                         s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2931                     }
2932                     break;
2933                 case CANDIDATE_MB_TYPE_BACKWARD_I:
2934                     s->mv_dir = MV_DIR_BACKWARD;
2935                     s->mv_type = MV_TYPE_FIELD;
2936                     s->mb_intra= 0;
2937                     for(i=0; i<2; i++){
2938                         j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2939                         s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2940                         s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2941                     }
2942                     break;
2943                 case CANDIDATE_MB_TYPE_BIDIR_I:
2944                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2945                     s->mv_type = MV_TYPE_FIELD;
2946                     s->mb_intra= 0;
2947                     for(dir=0; dir<2; dir++){
2948                         for(i=0; i<2; i++){
2949                             j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2950                             s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2951                             s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2952                         }
2953                     }
2954                     break;
2955                 default:
2956                     av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
2957                 }
2958
2959                 encode_mb(s, motion_x, motion_y);
2960
2961                 // RAL: Update last macroblock type
2962                 s->last_mv_dir = s->mv_dir;
2963
2964                 if (CONFIG_H263_ENCODER &&
2965                     s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2966                     ff_h263_update_motion_val(s);
2967
2968                 ff_MPV_decode_mb(s, s->block);
2969             }
2970
2971             /* clean the MV table in IPS frames for direct mode in B frames */
2972             if(s->mb_intra /* && I,P,S_TYPE */){
2973                 s->p_mv_table[xy][0]=0;
2974                 s->p_mv_table[xy][1]=0;
2975             }
2976
2977             if(s->flags&CODEC_FLAG_PSNR){
2978                 int w= 16;
2979                 int h= 16;
2980
2981                 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2982                 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2983
2984                 s->current_picture.f.error[0] += sse(
2985                     s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
2986                     s->dest[0], w, h, s->linesize);
2987                 s->current_picture.f.error[1] += sse(
2988                     s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*chr_h,
2989                     s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2990                 s->current_picture.f.error[2] += sse(
2991                     s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*chr_h,
2992                     s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2993             }
2994             if(s->loop_filter){
2995                 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
2996                     ff_h263_loop_filter(s);
2997             }
2998 //printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_stride, put_bits_count(&s->pb));
2999         }
3000     }
3001
3002     //not beautiful here but we must write it before flushing so it has to be here
3003     if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3004         ff_msmpeg4_encode_ext_header(s);
3005
3006     write_slice_end(s);
3007
3008     /* Send the last GOB if RTP */
3009     if (s->avctx->rtp_callback) {
3010         int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3011         pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3012         /* Call the RTP callback to send the last GOB */
3013         emms_c();
3014         s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3015     }
3016
3017     return 0;
3018 }
3019
3020 #define MERGE(field) dst->field += src->field; src->field=0
3021 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3022     MERGE(me.scene_change_score);
3023     MERGE(me.mc_mb_var_sum_temp);
3024     MERGE(me.mb_var_sum_temp);
3025 }
3026
3027 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3028     int i;
3029
3030     MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3031     MERGE(dct_count[1]);
3032     MERGE(mv_bits);
3033     MERGE(i_tex_bits);
3034     MERGE(p_tex_bits);
3035     MERGE(i_count);
3036     MERGE(f_count);
3037     MERGE(b_count);
3038     MERGE(skip_count);
3039     MERGE(misc_bits);
3040     MERGE(error_count);
3041     MERGE(padding_bug_score);
3042     MERGE(current_picture.f.error[0]);
3043     MERGE(current_picture.f.error[1]);
3044     MERGE(current_picture.f.error[2]);
3045
3046     if(dst->avctx->noise_reduction){
3047         for(i=0; i<64; i++){
3048             MERGE(dct_error_sum[0][i]);
3049             MERGE(dct_error_sum[1][i]);
3050         }
3051     }
3052
3053     assert(put_bits_count(&src->pb) % 8 ==0);
3054     assert(put_bits_count(&dst->pb) % 8 ==0);
3055     avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3056     flush_put_bits(&dst->pb);
3057 }
3058
3059 static int estimate_qp(MpegEncContext *s, int dry_run){
3060     if (s->next_lambda){
3061         s->current_picture_ptr->f.quality =
3062         s->current_picture.f.quality = s->next_lambda;
3063         if(!dry_run) s->next_lambda= 0;
3064     } else if (!s->fixed_qscale) {
3065         s->current_picture_ptr->f.quality =
3066         s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run);
3067         if (s->current_picture.f.quality < 0)
3068             return -1;
3069     }
3070
3071     if(s->adaptive_quant){
3072         switch(s->codec_id){
3073         case AV_CODEC_ID_MPEG4:
3074             if (CONFIG_MPEG4_ENCODER)
3075                 ff_clean_mpeg4_qscales(s);
3076             break;
3077         case AV_CODEC_ID_H263:
3078         case AV_CODEC_ID_H263P:
3079         case AV_CODEC_ID_FLV1:
3080             if (CONFIG_H263_ENCODER)
3081                 ff_clean_h263_qscales(s);
3082             break;
3083         default:
3084             ff_init_qscale_tab(s);
3085         }
3086
3087         s->lambda= s->lambda_table[0];
3088         //FIXME broken
3089     }else
3090         s->lambda = s->current_picture.f.quality;
3091 //printf("%d %d\n", s->avctx->global_quality, s->current_picture.quality);
3092     update_qscale(s);
3093     return 0;
3094 }
3095
3096 /* must be called before writing the header */
3097 static void set_frame_distances(MpegEncContext * s){
3098     assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
3099     s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
3100
3101     if(s->pict_type==AV_PICTURE_TYPE_B){
3102         s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3103         assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3104     }else{
3105         s->pp_time= s->time - s->last_non_b_time;
3106         s->last_non_b_time= s->time;
3107         assert(s->picture_number==0 || s->pp_time > 0);
3108     }
3109 }
3110
3111 static int encode_picture(MpegEncContext *s, int picture_number)
3112 {
3113     int i;
3114     int bits;
3115     int context_count = s->slice_context_count;
3116
3117     s->picture_number = picture_number;
3118
3119     /* Reset the average MB variance */
3120     s->me.mb_var_sum_temp    =
3121     s->me.mc_mb_var_sum_temp = 0;
3122
3123     /* we need to initialize some time vars before we can encode b-frames */
3124     // RAL: Condition added for MPEG1VIDEO
3125     if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3126         set_frame_distances(s);
3127     if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3128         ff_set_mpeg4_time(s);
3129
3130     s->me.scene_change_score=0;
3131
3132 //    s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3133
3134     if(s->pict_type==AV_PICTURE_TYPE_I){
3135         if(s->msmpeg4_version >= 3) s->no_rounding=1;
3136         else                        s->no_rounding=0;
3137     }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3138         if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3139             s->no_rounding ^= 1;
3140     }
3141
3142     if(s->flags & CODEC_FLAG_PASS2){
3143         if (estimate_qp(s,1) < 0)
3144             return -1;
3145         ff_get_2pass_fcode(s);
3146     }else if(!(s->flags & CODEC_FLAG_QSCALE)){
3147         if(s->pict_type==AV_PICTURE_TYPE_B)
3148             s->lambda= s->last_lambda_for[s->pict_type];
3149         else
3150             s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3151         update_qscale(s);
3152     }
3153
3154     s->mb_intra=0; //for the rate distortion & bit compare functions
3155     for(i=1; i<context_count; i++){
3156         ff_update_duplicate_context(s->thread_context[i], s);
3157     }
3158
3159     if(ff_init_me(s)<0)
3160         return -1;
3161
3162     /* Estimate motion for every MB */
3163     if(s->pict_type != AV_PICTURE_TYPE_I){
3164         s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3165         s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3166         if(s->pict_type != AV_PICTURE_TYPE_B && s->avctx->me_threshold==0){
3167             if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3168                 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3169             }
3170         }
3171
3172         s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3173     }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3174         /* I-Frame */
3175         for(i=0; i<s->mb_stride*s->mb_height; i++)
3176             s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3177
3178         if(!s->fixed_qscale){
3179             /* finding spatial complexity for I-frame rate control */
3180             s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3181         }
3182     }
3183     for(i=1; i<context_count; i++){
3184         merge_context_after_me(s, s->thread_context[i]);
3185     }
3186     s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3187     s->current_picture.   mb_var_sum= s->current_picture_ptr->   mb_var_sum= s->me.   mb_var_sum_temp;
3188     emms_c();
3189
3190     if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
3191         s->pict_type= AV_PICTURE_TYPE_I;
3192         for(i=0; i<s->mb_stride*s->mb_height; i++)
3193             s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3194 //printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3195     }
3196
3197     if(!s->umvplus){
3198         if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3199             s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3200
3201             if(s->flags & CODEC_FLAG_INTERLACED_ME){
3202                 int a,b;
3203                 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3204                 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3205                 s->f_code= FFMAX3(s->f_code, a, b);
3206             }
3207
3208             ff_fix_long_p_mvs(s);
3209             ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3210             if(s->flags & CODEC_FLAG_INTERLACED_ME){
3211                 int j;
3212                 for(i=0; i<2; i++){
3213                     for(j=0; j<2; j++)
3214                         ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3215                                         s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3216                 }
3217             }
3218         }
3219
3220         if(s->pict_type==AV_PICTURE_TYPE_B){
3221             int a, b;
3222
3223             a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3224             b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3225             s->f_code = FFMAX(a, b);
3226
3227             a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3228             b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3229             s->b_code = FFMAX(a, b);
3230
3231             ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3232             ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3233             ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3234             ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3235             if(s->flags & CODEC_FLAG_INTERLACED_ME){
3236                 int dir, j;
3237                 for(dir=0; dir<2; dir++){
3238                     for(i=0; i<2; i++){
3239                         for(j=0; j<2; j++){
3240                             int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3241                                           : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3242                             ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3243                                             s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3244                         }
3245                     }
3246                 }
3247             }
3248         }
3249     }
3250
3251     if (estimate_qp(s, 0) < 0)
3252         return -1;
3253
3254     if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
3255         s->qscale= 3; //reduce clipping problems
3256
3257     if (s->out_format == FMT_MJPEG) {
3258         /* for mjpeg, we do include qscale in the matrix */
3259         for(i=1;i<64;i++){
3260             int j= s->dsp.idct_permutation[i];
3261
3262             s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3263         }
3264         s->y_dc_scale_table=
3265         s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3266         s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3267         ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
3268                        s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3269         s->qscale= 8;
3270     }
3271
3272     //FIXME var duplication
3273     s->current_picture_ptr->f.key_frame =
3274     s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3275     s->current_picture_ptr->f.pict_type =
3276     s->current_picture.f.pict_type = s->pict_type;
3277
3278     if (s->current_picture.f.key_frame)
3279         s->picture_in_gop_number=0;
3280
3281     s->last_bits= put_bits_count(&s->pb);
3282     switch(s->out_format) {
3283     case FMT_MJPEG:
3284         if (CONFIG_MJPEG_ENCODER)
3285             ff_mjpeg_encode_picture_header(s);
3286         break;
3287     case FMT_H261:
3288         if (CONFIG_H261_ENCODER)
3289             ff_h261_encode_picture_header(s, picture_number);
3290         break;
3291     case FMT_H263:
3292         if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3293             ff_wmv2_encode_picture_header(s, picture_number);
3294         else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3295             ff_msmpeg4_encode_picture_header(s, picture_number);
3296         else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
3297             ff_mpeg4_encode_picture_header(s, picture_number);
3298         else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10)
3299             ff_rv10_encode_picture_header(s, picture_number);
3300         else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3301             ff_rv20_encode_picture_header(s, picture_number);
3302         else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3303             ff_flv_encode_picture_header(s, picture_number);
3304         else if (CONFIG_H263_ENCODER)
3305             ff_h263_encode_picture_header(s, picture_number);
3306         break;
3307     case FMT_MPEG1:
3308         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3309             ff_mpeg1_encode_picture_header(s, picture_number);
3310         break;
3311     case FMT_H264:
3312         break;
3313     default:
3314         assert(0);
3315     }
3316     bits= put_bits_count(&s->pb);
3317     s->header_bits= bits - s->last_bits;
3318
3319     for(i=1; i<context_count; i++){
3320         update_duplicate_context_after_me(s->thread_context[i], s);
3321     }
3322     s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3323     for(i=1; i<context_count; i++){
3324         merge_context_after_encode(s, s->thread_context[i]);
3325     }
3326     emms_c();
3327     return 0;
3328 }
3329
3330 static void denoise_dct_c(MpegEncContext *s, DCTELEM *block){
3331     const int intra= s->mb_intra;
3332     int i;
3333
3334     s->dct_count[intra]++;
3335
3336     for(i=0; i<64; i++){
3337         int level= block[i];
3338
3339         if(level){
3340             if(level>0){
3341                 s->dct_error_sum[intra][i] += level;
3342                 level -= s->dct_offset[intra][i];
3343                 if(level<0) level=0;
3344             }else{
3345                 s->dct_error_sum[intra][i] -= level;
3346                 level += s->dct_offset[intra][i];
3347                 if(level>0) level=0;
3348             }
3349             block[i]= level;
3350         }
3351     }
3352 }
3353
3354 static int dct_quantize_trellis_c(MpegEncContext *s,
3355                                   DCTELEM *block, int n,
3356                                   int qscale, int *overflow){
3357     const int *qmat;
3358     const uint8_t *scantable= s->intra_scantable.scantable;
3359     const uint8_t *perm_scantable= s->intra_scantable.permutated;
3360     int max=0;
3361     unsigned int threshold1, threshold2;
3362     int bias=0;
3363     int run_tab[65];
3364     int level_tab[65];
3365     int score_tab[65];
3366     int survivor[65];
3367     int survivor_count;
3368     int last_run=0;
3369     int last_level=0;
3370     int last_score= 0;
3371     int last_i;
3372     int coeff[2][64];
3373     int coeff_count[64];
3374     int qmul, qadd, start_i, last_non_zero, i, dc;
3375     const int esc_length= s->ac_esc_length;
3376     uint8_t * length;
3377     uint8_t * last_length;
3378     const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3379
3380     s->dsp.fdct (block);
3381
3382     if(s->dct_error_sum)
3383         s->denoise_dct(s, block);
3384     qmul= qscale*16;
3385     qadd= ((qscale-1)|1)*8;
3386
3387     if (s->mb_intra) {
3388         int q;
3389         if (!s->h263_aic) {
3390             if (n < 4)
3391                 q = s->y_dc_scale;
3392             else
3393                 q = s->c_dc_scale;
3394             q = q << 3;
3395         } else{
3396             /* For AIC we skip quant/dequant of INTRADC */
3397             q = 1 << 3;
3398             qadd=0;
3399         }
3400
3401         /* note: block[0] is assumed to be positive */
3402         block[0] = (block[0] + (q >> 1)) / q;
3403         start_i = 1;
3404         last_non_zero = 0;
3405         qmat = s->q_intra_matrix[qscale];
3406         if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3407             bias= 1<<(QMAT_SHIFT-1);
3408         length     = s->intra_ac_vlc_length;
3409         last_length= s->intra_ac_vlc_last_length;
3410     } else {
3411         start_i = 0;
3412         last_non_zero = -1;
3413         qmat = s->q_inter_matrix[qscale];
3414         length     = s->inter_ac_vlc_length;
3415         last_length= s->inter_ac_vlc_last_length;
3416     }
3417     last_i= start_i;
3418
3419     threshold1= (1<<QMAT_SHIFT) - bias - 1;
3420     threshold2= (threshold1<<1);
3421
3422     for(i=63; i>=start_i; i--) {
3423         const int j = scantable[i];
3424         int level = block[j] * qmat[j];
3425
3426         if(((unsigned)(level+threshold1))>threshold2){
3427             last_non_zero = i;
3428             break;
3429         }
3430     }
3431
3432     for(i=start_i; i<=last_non_zero; i++) {
3433         const int j = scantable[i];
3434         int level = block[j] * qmat[j];
3435
3436 //        if(   bias+level >= (1<<(QMAT_SHIFT - 3))
3437 //           || bias-level >= (1<<(QMAT_SHIFT - 3))){
3438         if(((unsigned)(level+threshold1))>threshold2){
3439             if(level>0){
3440                 level= (bias + level)>>QMAT_SHIFT;
3441                 coeff[0][i]= level;
3442                 coeff[1][i]= level-1;
3443 //                coeff[2][k]= level-2;
3444             }else{
3445                 level= (bias - level)>>QMAT_SHIFT;
3446                 coeff[0][i]= -level;
3447                 coeff[1][i]= -level+1;
3448 //                coeff[2][k]= -level+2;
3449             }
3450             coeff_count[i]= FFMIN(level, 2);
3451             assert(coeff_count[i]);
3452             max |=level;
3453         }else{
3454             coeff[0][i]= (level>>31)|1;
3455             coeff_count[i]= 1;
3456         }
3457     }
3458
3459     *overflow= s->max_qcoeff < max; //overflow might have happened
3460
3461     if(last_non_zero < start_i){
3462         memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
3463         return last_non_zero;
3464     }
3465
3466     score_tab[start_i]= 0;
3467     survivor[0]= start_i;
3468     survivor_count= 1;
3469
3470     for(i=start_i; i<=last_non_zero; i++){
3471         int level_index, j, zero_distortion;
3472         int dct_coeff= FFABS(block[ scantable[i] ]);
3473         int best_score=256*256*256*120;
3474
3475         if (s->dsp.fdct == ff_fdct_ifast)
3476             dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
3477         zero_distortion= dct_coeff*dct_coeff;
3478
3479         for(level_index=0; level_index < coeff_count[i]; level_index++){
3480             int distortion;
3481             int level= coeff[level_index][i];
3482             const int alevel= FFABS(level);
3483             int unquant_coeff;
3484
3485             assert(level);
3486
3487             if(s->out_format == FMT_H263){
3488                 unquant_coeff= alevel*qmul + qadd;
3489             }else{ //MPEG1
3490                 j= s->dsp.idct_permutation[ scantable[i] ]; //FIXME optimize
3491                 if(s->mb_intra){
3492                         unquant_coeff = (int)(  alevel  * qscale * s->intra_matrix[j]) >> 3;
3493                         unquant_coeff =   (unquant_coeff - 1) | 1;
3494                 }else{
3495                         unquant_coeff = (((  alevel  << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
3496                         unquant_coeff =   (unquant_coeff - 1) | 1;
3497                 }
3498                 unquant_coeff<<= 3;
3499             }
3500
3501             distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3502             level+=64;
3503             if((level&(~127)) == 0){
3504                 for(j=survivor_count-1; j>=0; j--){
3505                     int run= i - survivor[j];
3506                     int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3507                     score += score_tab[i-run];
3508
3509                     if(score < best_score){
3510                         best_score= score;
3511                         run_tab[i+1]= run;
3512                         level_tab[i+1]= level-64;
3513                     }
3514                 }
3515
3516                 if(s->out_format == FMT_H263){
3517                     for(j=survivor_count-1; j>=0; j--){
3518                         int run= i - survivor[j];
3519                         int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3520                         score += score_tab[i-run];
3521                         if(score < last_score){
3522                             last_score= score;
3523                             last_run= run;
3524                             last_level= level-64;
3525                             last_i= i+1;
3526                         }
3527                     }
3528                 }
3529             }else{
3530                 distortion += esc_length*lambda;
3531                 for(j=survivor_count-1; j>=0; j--){
3532                     int run= i - survivor[j];
3533                     int score= distortion + score_tab[i-run];
3534
3535                     if(score < best_score){
3536                         best_score= score;
3537                         run_tab[i+1]= run;
3538                         level_tab[i+1]= level-64;
3539                     }
3540                 }
3541
3542                 if(s->out_format == FMT_H263){
3543                   for(j=survivor_count-1; j>=0; j--){
3544                         int run= i - survivor[j];
3545                         int score= distortion + score_tab[i-run];
3546                         if(score < last_score){
3547                             last_score= score;
3548                             last_run= run;
3549                             last_level= level-64;
3550                             last_i= i+1;
3551                         }
3552                     }
3553                 }
3554             }
3555         }
3556
3557         score_tab[i+1]= best_score;
3558
3559         //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
3560         if(last_non_zero <= 27){