mpegvideo_enc: remove stray duplicate line from 7f9aaa4
[ffmpeg.git] / libavcodec / mpegvideo_enc.c
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of Libav.
9  *
10  * Libav is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * Libav is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with Libav; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29
30 #include "libavutil/intmath.h"
31 #include "libavutil/mathematics.h"
32 #include "libavutil/opt.h"
33 #include "avcodec.h"
34 #include "dsputil.h"
35 #include "mpegvideo.h"
36 #include "h263.h"
37 #include "mjpegenc.h"
38 #include "msmpeg4.h"
39 #include "faandct.h"
40 #include "thread.h"
41 #include "aandcttab.h"
42 #include "flv.h"
43 #include "mpeg4video.h"
44 #include "internal.h"
45 #include "bytestream.h"
46 #include <limits.h>
47
48 //#undef NDEBUG
49 //#include <assert.h>
50
51 static int encode_picture(MpegEncContext *s, int picture_number);
52 static int dct_quantize_refine(MpegEncContext *s, DCTELEM *block, int16_t *weight, DCTELEM *orig, int n, int qscale);
53 static int sse_mb(MpegEncContext *s);
54 static void denoise_dct_c(MpegEncContext *s, DCTELEM *block);
55 static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
56
57 /* enable all paranoid tests for rounding, overflows, etc... */
58 //#define PARANOID
59
60 //#define DEBUG
61
62 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
63 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
64
65 const AVOption ff_mpv_generic_options[] = {
66     FF_MPV_COMMON_OPTS
67     { NULL },
68 };
69
70 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
71                        uint16_t (*qmat16)[2][64],
72                        const uint16_t *quant_matrix,
73                        int bias, int qmin, int qmax, int intra)
74 {
75     int qscale;
76     int shift = 0;
77
78     for (qscale = qmin; qscale <= qmax; qscale++) {
79         int i;
80         if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
81             dsp->fdct == ff_jpeg_fdct_islow_10 ||
82             dsp->fdct == ff_faandct) {
83             for (i = 0; i < 64; i++) {
84                 const int j = dsp->idct_permutation[i];
85                 /* 16 <= qscale * quant_matrix[i] <= 7905
86                  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
87                  *             19952 <=              x  <= 249205026
88                  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
89                  *           3444240 >= (1 << 36) / (x) >= 275 */
90
91                 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
92                                         (qscale * quant_matrix[j]));
93             }
94         } else if (dsp->fdct == ff_fdct_ifast) {
95             for (i = 0; i < 64; i++) {
96                 const int j = dsp->idct_permutation[i];
97                 /* 16 <= qscale * quant_matrix[i] <= 7905
98                  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
99                  *             19952 <=              x  <= 249205026
100                  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
101                  *           3444240 >= (1 << 36) / (x) >= 275 */
102
103                 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
104                                         (ff_aanscales[i] * qscale *
105                                          quant_matrix[j]));
106             }
107         } else {
108             for (i = 0; i < 64; i++) {
109                 const int j = dsp->idct_permutation[i];
110                 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
111                  * Assume x = qscale * quant_matrix[i]
112                  * So             16 <=              x  <= 7905
113                  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
114                  * so          32768 >= (1 << 19) / (x) >= 67 */
115                 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
116                                         (qscale * quant_matrix[j]));
117                 //qmat  [qscale][i] = (1 << QMAT_SHIFT_MMX) /
118                 //                    (qscale * quant_matrix[i]);
119                 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
120                                        (qscale * quant_matrix[j]);
121
122                 if (qmat16[qscale][0][i] == 0 ||
123                     qmat16[qscale][0][i] == 128 * 256)
124                     qmat16[qscale][0][i] = 128 * 256 - 1;
125                 qmat16[qscale][1][i] =
126                     ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
127                                 qmat16[qscale][0][i]);
128             }
129         }
130
131         for (i = intra; i < 64; i++) {
132             int64_t max = 8191;
133             if (dsp->fdct == ff_fdct_ifast) {
134                 max = (8191LL * ff_aanscales[i]) >> 14;
135             }
136             while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
137                 shift++;
138             }
139         }
140     }
141     if (shift) {
142         av_log(NULL, AV_LOG_INFO,
143                "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
144                QMAT_SHIFT - shift);
145     }
146 }
147
148 static inline void update_qscale(MpegEncContext *s)
149 {
150     s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
151                 (FF_LAMBDA_SHIFT + 7);
152     s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
153
154     s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
155                  FF_LAMBDA_SHIFT;
156 }
157
158 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
159 {
160     int i;
161
162     if (matrix) {
163         put_bits(pb, 1, 1);
164         for (i = 0; i < 64; i++) {
165             put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
166         }
167     } else
168         put_bits(pb, 1, 0);
169 }
170
171 /**
172  * init s->current_picture.qscale_table from s->lambda_table
173  */
174 void ff_init_qscale_tab(MpegEncContext *s)
175 {
176     int8_t * const qscale_table = s->current_picture.f.qscale_table;
177     int i;
178
179     for (i = 0; i < s->mb_num; i++) {
180         unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
181         int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
182         qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
183                                                   s->avctx->qmax);
184     }
185 }
186
187 static void copy_picture_attributes(MpegEncContext *s,
188                                     AVFrame *dst,
189                                     AVFrame *src)
190 {
191     int i;
192
193     dst->pict_type              = src->pict_type;
194     dst->quality                = src->quality;
195     dst->coded_picture_number   = src->coded_picture_number;
196     dst->display_picture_number = src->display_picture_number;
197     //dst->reference              = src->reference;
198     dst->pts                    = src->pts;
199     dst->interlaced_frame       = src->interlaced_frame;
200     dst->top_field_first        = src->top_field_first;
201
202     if (s->avctx->me_threshold) {
203         if (!src->motion_val[0])
204             av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_val not set!\n");
205         if (!src->mb_type)
206             av_log(s->avctx, AV_LOG_ERROR, "AVFrame.mb_type not set!\n");
207         if (!src->ref_index[0])
208             av_log(s->avctx, AV_LOG_ERROR, "AVFrame.ref_index not set!\n");
209         if (src->motion_subsample_log2 != dst->motion_subsample_log2)
210             av_log(s->avctx, AV_LOG_ERROR,
211                    "AVFrame.motion_subsample_log2 doesn't match! (%d!=%d)\n",
212                    src->motion_subsample_log2, dst->motion_subsample_log2);
213
214         memcpy(dst->mb_type, src->mb_type,
215                s->mb_stride * s->mb_height * sizeof(dst->mb_type[0]));
216
217         for (i = 0; i < 2; i++) {
218             int stride = ((16 * s->mb_width ) >>
219                           src->motion_subsample_log2) + 1;
220             int height = ((16 * s->mb_height) >> src->motion_subsample_log2);
221
222             if (src->motion_val[i] &&
223                 src->motion_val[i] != dst->motion_val[i]) {
224                 memcpy(dst->motion_val[i], src->motion_val[i],
225                        2 * stride * height * sizeof(int16_t));
226             }
227             if (src->ref_index[i] && src->ref_index[i] != dst->ref_index[i]) {
228                 memcpy(dst->ref_index[i], src->ref_index[i],
229                        s->mb_stride * 4 * s->mb_height * sizeof(int8_t));
230             }
231         }
232     }
233 }
234
235 static void update_duplicate_context_after_me(MpegEncContext *dst,
236                                               MpegEncContext *src)
237 {
238 #define COPY(a) dst->a= src->a
239     COPY(pict_type);
240     COPY(current_picture);
241     COPY(f_code);
242     COPY(b_code);
243     COPY(qscale);
244     COPY(lambda);
245     COPY(lambda2);
246     COPY(picture_in_gop_number);
247     COPY(gop_picture_number);
248     COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
249     COPY(progressive_frame);    // FIXME don't set in encode_header
250     COPY(partitioned_frame);    // FIXME don't set in encode_header
251 #undef COPY
252 }
253
254 /**
255  * Set the given MpegEncContext to defaults for encoding.
256  * the changed fields will not depend upon the prior state of the MpegEncContext.
257  */
258 static void MPV_encode_defaults(MpegEncContext *s)
259 {
260     int i;
261     ff_MPV_common_defaults(s);
262
263     for (i = -16; i < 16; i++) {
264         default_fcode_tab[i + MAX_MV] = 1;
265     }
266     s->me.mv_penalty = default_mv_penalty;
267     s->fcode_tab     = default_fcode_tab;
268 }
269
270 /* init video encoder */
271 av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
272 {
273     MpegEncContext *s = avctx->priv_data;
274     int i;
275     int chroma_h_shift, chroma_v_shift;
276
277     MPV_encode_defaults(s);
278
279     switch (avctx->codec_id) {
280     case AV_CODEC_ID_MPEG2VIDEO:
281         if (avctx->pix_fmt != PIX_FMT_YUV420P &&
282             avctx->pix_fmt != PIX_FMT_YUV422P) {
283             av_log(avctx, AV_LOG_ERROR,
284                    "only YUV420 and YUV422 are supported\n");
285             return -1;
286         }
287         break;
288     case AV_CODEC_ID_LJPEG:
289         if (avctx->pix_fmt != PIX_FMT_YUVJ420P &&
290             avctx->pix_fmt != PIX_FMT_YUVJ422P &&
291             avctx->pix_fmt != PIX_FMT_YUVJ444P &&
292             avctx->pix_fmt != PIX_FMT_BGRA     &&
293             ((avctx->pix_fmt != PIX_FMT_YUV420P &&
294               avctx->pix_fmt != PIX_FMT_YUV422P &&
295               avctx->pix_fmt != PIX_FMT_YUV444P) ||
296              avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
297             av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
298             return -1;
299         }
300         break;
301     case AV_CODEC_ID_MJPEG:
302         if (avctx->pix_fmt != PIX_FMT_YUVJ420P &&
303             avctx->pix_fmt != PIX_FMT_YUVJ422P &&
304             ((avctx->pix_fmt != PIX_FMT_YUV420P &&
305               avctx->pix_fmt != PIX_FMT_YUV422P) ||
306              avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
307             av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
308             return -1;
309         }
310         break;
311     default:
312         if (avctx->pix_fmt != PIX_FMT_YUV420P) {
313             av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
314             return -1;
315         }
316     }
317
318     switch (avctx->pix_fmt) {
319     case PIX_FMT_YUVJ422P:
320     case PIX_FMT_YUV422P:
321         s->chroma_format = CHROMA_422;
322         break;
323     case PIX_FMT_YUVJ420P:
324     case PIX_FMT_YUV420P:
325     default:
326         s->chroma_format = CHROMA_420;
327         break;
328     }
329
330     s->bit_rate = avctx->bit_rate;
331     s->width    = avctx->width;
332     s->height   = avctx->height;
333     if (avctx->gop_size > 600 &&
334         avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
335         av_log(avctx, AV_LOG_ERROR,
336                "Warning keyframe interval too large! reducing it ...\n");
337         avctx->gop_size = 600;
338     }
339     s->gop_size     = avctx->gop_size;
340     s->avctx        = avctx;
341     s->flags        = avctx->flags;
342     s->flags2       = avctx->flags2;
343     s->max_b_frames = avctx->max_b_frames;
344     s->codec_id     = avctx->codec->id;
345 #if FF_API_MPV_GLOBAL_OPTS
346     if (avctx->luma_elim_threshold)
347         s->luma_elim_threshold   = avctx->luma_elim_threshold;
348     if (avctx->chroma_elim_threshold)
349         s->chroma_elim_threshold = avctx->chroma_elim_threshold;
350 #endif
351     s->strict_std_compliance = avctx->strict_std_compliance;
352     s->quarter_sample     = (avctx->flags & CODEC_FLAG_QPEL) != 0;
353     s->mpeg_quant         = avctx->mpeg_quant;
354     s->rtp_mode           = !!avctx->rtp_payload_size;
355     s->intra_dc_precision = avctx->intra_dc_precision;
356     s->user_specified_pts = AV_NOPTS_VALUE;
357
358     if (s->gop_size <= 1) {
359         s->intra_only = 1;
360         s->gop_size   = 12;
361     } else {
362         s->intra_only = 0;
363     }
364
365     s->me_method = avctx->me_method;
366
367     /* Fixed QSCALE */
368     s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
369
370 #if FF_API_MPV_GLOBAL_OPTS
371     if (s->flags & CODEC_FLAG_QP_RD)
372         s->mpv_flags |= FF_MPV_FLAG_QP_RD;
373 #endif
374
375     s->adaptive_quant = (s->avctx->lumi_masking ||
376                          s->avctx->dark_masking ||
377                          s->avctx->temporal_cplx_masking ||
378                          s->avctx->spatial_cplx_masking  ||
379                          s->avctx->p_masking      ||
380                          s->avctx->border_masking ||
381                          (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
382                         !s->fixed_qscale;
383
384     s->loop_filter      = !!(s->flags & CODEC_FLAG_LOOP_FILTER);
385
386     if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
387         av_log(avctx, AV_LOG_ERROR,
388                "a vbv buffer size is needed, "
389                "for encoding with a maximum bitrate\n");
390         return -1;
391     }
392
393     if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
394         av_log(avctx, AV_LOG_INFO,
395                "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
396     }
397
398     if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
399         av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
400         return -1;
401     }
402
403     if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
404         av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
405         return -1;
406     }
407
408     if (avctx->rc_max_rate &&
409         avctx->rc_max_rate == avctx->bit_rate &&
410         avctx->rc_max_rate != avctx->rc_min_rate) {
411         av_log(avctx, AV_LOG_INFO,
412                "impossible bitrate constraints, this will fail\n");
413     }
414
415     if (avctx->rc_buffer_size &&
416         avctx->bit_rate * (int64_t)avctx->time_base.num >
417             avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
418         av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
419         return -1;
420     }
421
422     if (!s->fixed_qscale &&
423         avctx->bit_rate * av_q2d(avctx->time_base) >
424             avctx->bit_rate_tolerance) {
425         av_log(avctx, AV_LOG_ERROR,
426                "bitrate tolerance too small for bitrate\n");
427         return -1;
428     }
429
430     if (s->avctx->rc_max_rate &&
431         s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
432         (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
433          s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
434         90000LL * (avctx->rc_buffer_size - 1) >
435             s->avctx->rc_max_rate * 0xFFFFLL) {
436         av_log(avctx, AV_LOG_INFO,
437                "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
438                "specified vbv buffer is too large for the given bitrate!\n");
439     }
440
441     if ((s->flags & CODEC_FLAG_4MV)  && s->codec_id != AV_CODEC_ID_MPEG4 &&
442         s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
443         s->codec_id != AV_CODEC_ID_FLV1) {
444         av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
445         return -1;
446     }
447
448     if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
449         av_log(avctx, AV_LOG_ERROR,
450                "OBMC is only supported with simple mb decision\n");
451         return -1;
452     }
453
454     if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
455         av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
456         return -1;
457     }
458
459     if (s->max_b_frames                    &&
460         s->codec_id != AV_CODEC_ID_MPEG4      &&
461         s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
462         s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
463         av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
464         return -1;
465     }
466
467     if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
468          s->codec_id == AV_CODEC_ID_H263  ||
469          s->codec_id == AV_CODEC_ID_H263P) &&
470         (avctx->sample_aspect_ratio.num > 255 ||
471          avctx->sample_aspect_ratio.den > 255)) {
472         av_log(avctx, AV_LOG_ERROR,
473                "Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
474                avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
475         return -1;
476     }
477
478     if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) &&
479         s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
480         av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
481         return -1;
482     }
483
484     // FIXME mpeg2 uses that too
485     if (s->mpeg_quant && s->codec_id != AV_CODEC_ID_MPEG4) {
486         av_log(avctx, AV_LOG_ERROR,
487                "mpeg2 style quantization not supported by codec\n");
488         return -1;
489     }
490
491 #if FF_API_MPV_GLOBAL_OPTS
492     if (s->flags & CODEC_FLAG_CBP_RD)
493         s->mpv_flags |= FF_MPV_FLAG_CBP_RD;
494 #endif
495
496     if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
497         av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
498         return -1;
499     }
500
501     if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
502         s->avctx->mb_decision != FF_MB_DECISION_RD) {
503         av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
504         return -1;
505     }
506
507     if (s->avctx->scenechange_threshold < 1000000000 &&
508         (s->flags & CODEC_FLAG_CLOSED_GOP)) {
509         av_log(avctx, AV_LOG_ERROR,
510                "closed gop with scene change detection are not supported yet, "
511                "set threshold to 1000000000\n");
512         return -1;
513     }
514
515     if (s->flags & CODEC_FLAG_LOW_DELAY) {
516         if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
517             av_log(avctx, AV_LOG_ERROR,
518                   "low delay forcing is only available for mpeg2\n");
519             return -1;
520         }
521         if (s->max_b_frames != 0) {
522             av_log(avctx, AV_LOG_ERROR,
523                    "b frames cannot be used with low delay\n");
524             return -1;
525         }
526     }
527
528     if (s->q_scale_type == 1) {
529         if (avctx->qmax > 12) {
530             av_log(avctx, AV_LOG_ERROR,
531                    "non linear quant only supports qmax <= 12 currently\n");
532             return -1;
533         }
534     }
535
536     if (s->avctx->thread_count > 1         &&
537         s->codec_id != AV_CODEC_ID_MPEG4      &&
538         s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
539         s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
540         (s->codec_id != AV_CODEC_ID_H263P)) {
541         av_log(avctx, AV_LOG_ERROR,
542                "multi threaded encoding not supported by codec\n");
543         return -1;
544     }
545
546     if (s->avctx->thread_count < 1) {
547         av_log(avctx, AV_LOG_ERROR,
548                "automatic thread number detection not supported by codec,"
549                "patch welcome\n");
550         return -1;
551     }
552
553     if (s->avctx->thread_count > 1)
554         s->rtp_mode = 1;
555
556     if (!avctx->time_base.den || !avctx->time_base.num) {
557         av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
558         return -1;
559     }
560
561     i = (INT_MAX / 2 + 128) >> 8;
562     if (avctx->me_threshold >= i) {
563         av_log(avctx, AV_LOG_ERROR, "me_threshold too large, max is %d\n",
564                i - 1);
565         return -1;
566     }
567     if (avctx->mb_threshold >= i) {
568         av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
569                i - 1);
570         return -1;
571     }
572
573     if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
574         av_log(avctx, AV_LOG_INFO,
575                "notice: b_frame_strategy only affects the first pass\n");
576         avctx->b_frame_strategy = 0;
577     }
578
579     i = av_gcd(avctx->time_base.den, avctx->time_base.num);
580     if (i > 1) {
581         av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
582         avctx->time_base.den /= i;
583         avctx->time_base.num /= i;
584         //return -1;
585     }
586
587     if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
588         s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG) {
589         // (a + x * 3 / 8) / x
590         s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
591         s->inter_quant_bias = 0;
592     } else {
593         s->intra_quant_bias = 0;
594         // (a - x / 4) / x
595         s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
596     }
597
598     if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
599         s->intra_quant_bias = avctx->intra_quant_bias;
600     if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
601         s->inter_quant_bias = avctx->inter_quant_bias;
602
603     avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift,
604                                   &chroma_v_shift);
605
606     if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
607         s->avctx->time_base.den > (1 << 16) - 1) {
608         av_log(avctx, AV_LOG_ERROR,
609                "timebase %d/%d not supported by MPEG 4 standard, "
610                "the maximum admitted value for the timebase denominator "
611                "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
612                (1 << 16) - 1);
613         return -1;
614     }
615     s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
616
617 #if FF_API_MPV_GLOBAL_OPTS
618     if (avctx->flags2 & CODEC_FLAG2_SKIP_RD)
619         s->mpv_flags |= FF_MPV_FLAG_SKIP_RD;
620     if (avctx->flags2 & CODEC_FLAG2_STRICT_GOP)
621         s->mpv_flags |= FF_MPV_FLAG_STRICT_GOP;
622     if (avctx->quantizer_noise_shaping)
623         s->quantizer_noise_shaping = avctx->quantizer_noise_shaping;
624 #endif
625
626     switch (avctx->codec->id) {
627     case AV_CODEC_ID_MPEG1VIDEO:
628         s->out_format = FMT_MPEG1;
629         s->low_delay  = !!(s->flags & CODEC_FLAG_LOW_DELAY);
630         avctx->delay  = s->low_delay ? 0 : (s->max_b_frames + 1);
631         break;
632     case AV_CODEC_ID_MPEG2VIDEO:
633         s->out_format = FMT_MPEG1;
634         s->low_delay  = !!(s->flags & CODEC_FLAG_LOW_DELAY);
635         avctx->delay  = s->low_delay ? 0 : (s->max_b_frames + 1);
636         s->rtp_mode   = 1;
637         break;
638     case AV_CODEC_ID_LJPEG:
639     case AV_CODEC_ID_MJPEG:
640         s->out_format = FMT_MJPEG;
641         s->intra_only = 1; /* force intra only for jpeg */
642         if (avctx->codec->id == AV_CODEC_ID_LJPEG &&
643             avctx->pix_fmt   == PIX_FMT_BGRA) {
644             s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
645             s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
646             s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
647         } else {
648             s->mjpeg_vsample[0] = 2;
649             s->mjpeg_vsample[1] = 2 >> chroma_v_shift;
650             s->mjpeg_vsample[2] = 2 >> chroma_v_shift;
651             s->mjpeg_hsample[0] = 2;
652             s->mjpeg_hsample[1] = 2 >> chroma_h_shift;
653             s->mjpeg_hsample[2] = 2 >> chroma_h_shift;
654         }
655         if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) ||
656             ff_mjpeg_encode_init(s) < 0)
657             return -1;
658         avctx->delay = 0;
659         s->low_delay = 1;
660         break;
661     case AV_CODEC_ID_H261:
662         if (!CONFIG_H261_ENCODER)
663             return -1;
664         if (ff_h261_get_picture_format(s->width, s->height) < 0) {
665             av_log(avctx, AV_LOG_ERROR,
666                    "The specified picture size of %dx%d is not valid for the "
667                    "H.261 codec.\nValid sizes are 176x144, 352x288\n",
668                     s->width, s->height);
669             return -1;
670         }
671         s->out_format = FMT_H261;
672         avctx->delay  = 0;
673         s->low_delay  = 1;
674         break;
675     case AV_CODEC_ID_H263:
676         if (!CONFIG_H263_ENCODER)
677         return -1;
678         if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
679                              s->width, s->height) == 8) {
680             av_log(avctx, AV_LOG_INFO,
681                    "The specified picture size of %dx%d is not valid for "
682                    "the H.263 codec.\nValid sizes are 128x96, 176x144, "
683                    "352x288, 704x576, and 1408x1152."
684                    "Try H.263+.\n", s->width, s->height);
685             return -1;
686         }
687         s->out_format = FMT_H263;
688         avctx->delay  = 0;
689         s->low_delay  = 1;
690         break;
691     case AV_CODEC_ID_H263P:
692         s->out_format = FMT_H263;
693         s->h263_plus  = 1;
694         /* Fx */
695         s->h263_aic        = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
696         s->modified_quant  = s->h263_aic;
697         s->loop_filter     = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
698         s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
699
700         /* /Fx */
701         /* These are just to be sure */
702         avctx->delay = 0;
703         s->low_delay = 1;
704         break;
705     case AV_CODEC_ID_FLV1:
706         s->out_format      = FMT_H263;
707         s->h263_flv        = 2; /* format = 1; 11-bit codes */
708         s->unrestricted_mv = 1;
709         s->rtp_mode  = 0; /* don't allow GOB */
710         avctx->delay = 0;
711         s->low_delay = 1;
712         break;
713     case AV_CODEC_ID_RV10:
714         s->out_format = FMT_H263;
715         avctx->delay  = 0;
716         s->low_delay  = 1;
717         break;
718     case AV_CODEC_ID_RV20:
719         s->out_format      = FMT_H263;
720         avctx->delay       = 0;
721         s->low_delay       = 1;
722         s->modified_quant  = 1;
723         s->h263_aic        = 1;
724         s->h263_plus       = 1;
725         s->loop_filter     = 1;
726         s->unrestricted_mv = 0;
727         break;
728     case AV_CODEC_ID_MPEG4:
729         s->out_format      = FMT_H263;
730         s->h263_pred       = 1;
731         s->unrestricted_mv = 1;
732         s->low_delay       = s->max_b_frames ? 0 : 1;
733         avctx->delay       = s->low_delay ? 0 : (s->max_b_frames + 1);
734         break;
735     case AV_CODEC_ID_MSMPEG4V2:
736         s->out_format      = FMT_H263;
737         s->h263_pred       = 1;
738         s->unrestricted_mv = 1;
739         s->msmpeg4_version = 2;
740         avctx->delay       = 0;
741         s->low_delay       = 1;
742         break;
743     case AV_CODEC_ID_MSMPEG4V3:
744         s->out_format        = FMT_H263;
745         s->h263_pred         = 1;
746         s->unrestricted_mv   = 1;
747         s->msmpeg4_version   = 3;
748         s->flipflop_rounding = 1;
749         avctx->delay         = 0;
750         s->low_delay         = 1;
751         break;
752     case AV_CODEC_ID_WMV1:
753         s->out_format        = FMT_H263;
754         s->h263_pred         = 1;
755         s->unrestricted_mv   = 1;
756         s->msmpeg4_version   = 4;
757         s->flipflop_rounding = 1;
758         avctx->delay         = 0;
759         s->low_delay         = 1;
760         break;
761     case AV_CODEC_ID_WMV2:
762         s->out_format        = FMT_H263;
763         s->h263_pred         = 1;
764         s->unrestricted_mv   = 1;
765         s->msmpeg4_version   = 5;
766         s->flipflop_rounding = 1;
767         avctx->delay         = 0;
768         s->low_delay         = 1;
769         break;
770     default:
771         return -1;
772     }
773
774     avctx->has_b_frames = !s->low_delay;
775
776     s->encoding = 1;
777
778     s->progressive_frame    =
779     s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
780                                                 CODEC_FLAG_INTERLACED_ME) ||
781                                 s->alternate_scan);
782
783     /* init */
784     if (ff_MPV_common_init(s) < 0)
785         return -1;
786
787     if (!s->dct_quantize)
788         s->dct_quantize = ff_dct_quantize_c;
789     if (!s->denoise_dct)
790         s->denoise_dct  = denoise_dct_c;
791     s->fast_dct_quantize = s->dct_quantize;
792     if (avctx->trellis)
793         s->dct_quantize  = dct_quantize_trellis_c;
794
795     if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
796         s->chroma_qscale_table = ff_h263_chroma_qscale_table;
797
798     s->quant_precision = 5;
799
800     ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
801     ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
802
803     if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
804         ff_h261_encode_init(s);
805     if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
806         ff_h263_encode_init(s);
807     if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
808         ff_msmpeg4_encode_init(s);
809     if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
810         && s->out_format == FMT_MPEG1)
811         ff_mpeg1_encode_init(s);
812
813     /* init q matrix */
814     for (i = 0; i < 64; i++) {
815         int j = s->dsp.idct_permutation[i];
816         if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
817             s->mpeg_quant) {
818             s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
819             s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
820         } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
821             s->intra_matrix[j] =
822             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
823         } else {
824             /* mpeg1/2 */
825             s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
826             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
827         }
828         if (s->avctx->intra_matrix)
829             s->intra_matrix[j] = s->avctx->intra_matrix[i];
830         if (s->avctx->inter_matrix)
831             s->inter_matrix[j] = s->avctx->inter_matrix[i];
832     }
833
834     /* precompute matrix */
835     /* for mjpeg, we do include qscale in the matrix */
836     if (s->out_format != FMT_MJPEG) {
837         ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
838                           s->intra_matrix, s->intra_quant_bias, avctx->qmin,
839                           31, 1);
840         ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
841                           s->inter_matrix, s->inter_quant_bias, avctx->qmin,
842                           31, 0);
843     }
844
845     if (ff_rate_control_init(s) < 0)
846         return -1;
847
848     return 0;
849 }
850
851 av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
852 {
853     MpegEncContext *s = avctx->priv_data;
854
855     ff_rate_control_uninit(s);
856
857     ff_MPV_common_end(s);
858     if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
859         s->out_format == FMT_MJPEG)
860         ff_mjpeg_encode_close(s);
861
862     av_freep(&avctx->extradata);
863
864     return 0;
865 }
866
867 static int get_sae(uint8_t *src, int ref, int stride)
868 {
869     int x,y;
870     int acc = 0;
871
872     for (y = 0; y < 16; y++) {
873         for (x = 0; x < 16; x++) {
874             acc += FFABS(src[x + y * stride] - ref);
875         }
876     }
877
878     return acc;
879 }
880
881 static int get_intra_count(MpegEncContext *s, uint8_t *src,
882                            uint8_t *ref, int stride)
883 {
884     int x, y, w, h;
885     int acc = 0;
886
887     w = s->width  & ~15;
888     h = s->height & ~15;
889
890     for (y = 0; y < h; y += 16) {
891         for (x = 0; x < w; x += 16) {
892             int offset = x + y * stride;
893             int sad  = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
894                                      16);
895             int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
896             int sae  = get_sae(src + offset, mean, stride);
897
898             acc += sae + 500 < sad;
899         }
900     }
901     return acc;
902 }
903
904
905 static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg)
906 {
907     AVFrame *pic = NULL;
908     int64_t pts;
909     int i;
910     const int encoding_delay = s->max_b_frames ? s->max_b_frames :
911                                                  (s->low_delay ? 0 : 1);
912     int direct = 1;
913
914     if (pic_arg) {
915         pts = pic_arg->pts;
916         pic_arg->display_picture_number = s->input_picture_number++;
917
918         if (pts != AV_NOPTS_VALUE) {
919             if (s->user_specified_pts != AV_NOPTS_VALUE) {
920                 int64_t time = pts;
921                 int64_t last = s->user_specified_pts;
922
923                 if (time <= last) {
924                     av_log(s->avctx, AV_LOG_ERROR,
925                            "Error, Invalid timestamp=%"PRId64", "
926                            "last=%"PRId64"\n", pts, s->user_specified_pts);
927                     return -1;
928                 }
929
930                 if (!s->low_delay && pic_arg->display_picture_number == 1)
931                     s->dts_delta = time - last;
932             }
933             s->user_specified_pts = pts;
934         } else {
935             if (s->user_specified_pts != AV_NOPTS_VALUE) {
936                 s->user_specified_pts =
937                 pts = s->user_specified_pts + 1;
938                 av_log(s->avctx, AV_LOG_INFO,
939                        "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
940                        pts);
941             } else {
942                 pts = pic_arg->display_picture_number;
943             }
944         }
945     }
946
947   if (pic_arg) {
948     if (encoding_delay && !(s->flags & CODEC_FLAG_INPUT_PRESERVED))
949         direct = 0;
950     if (pic_arg->linesize[0] != s->linesize)
951         direct = 0;
952     if (pic_arg->linesize[1] != s->uvlinesize)
953         direct = 0;
954     if (pic_arg->linesize[2] != s->uvlinesize)
955         direct = 0;
956
957     //av_log(AV_LOG_DEBUG, "%d %d %d %d\n",pic_arg->linesize[0],
958     //       pic_arg->linesize[1], s->linesize, s->uvlinesize);
959
960     if (direct) {
961         i = ff_find_unused_picture(s, 1);
962         if (i < 0)
963             return i;
964
965         pic = &s->picture[i].f;
966         pic->reference = 3;
967
968         for (i = 0; i < 4; i++) {
969             pic->data[i]     = pic_arg->data[i];
970             pic->linesize[i] = pic_arg->linesize[i];
971         }
972         if (ff_alloc_picture(s, (Picture *) pic, 1) < 0) {
973             return -1;
974         }
975     } else {
976         i = ff_find_unused_picture(s, 0);
977         if (i < 0)
978             return i;
979
980         pic = &s->picture[i].f;
981         pic->reference = 3;
982
983         if (ff_alloc_picture(s, (Picture *) pic, 0) < 0) {
984             return -1;
985         }
986
987         if (pic->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
988             pic->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
989             pic->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
990             // empty
991         } else {
992             int h_chroma_shift, v_chroma_shift;
993             avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift,
994                                           &v_chroma_shift);
995
996             for (i = 0; i < 3; i++) {
997                 int src_stride = pic_arg->linesize[i];
998                 int dst_stride = i ? s->uvlinesize : s->linesize;
999                 int h_shift = i ? h_chroma_shift : 0;
1000                 int v_shift = i ? v_chroma_shift : 0;
1001                 int w = s->width  >> h_shift;
1002                 int h = s->height >> v_shift;
1003                 uint8_t *src = pic_arg->data[i];
1004                 uint8_t *dst = pic->data[i];
1005
1006                 if (!s->avctx->rc_buffer_size)
1007                     dst += INPLACE_OFFSET;
1008
1009                 if (src_stride == dst_stride)
1010                     memcpy(dst, src, src_stride * h);
1011                 else {
1012                     while (h--) {
1013                         memcpy(dst, src, w);
1014                         dst += dst_stride;
1015                         src += src_stride;
1016                     }
1017                 }
1018             }
1019         }
1020     }
1021     copy_picture_attributes(s, pic, pic_arg);
1022     pic->pts = pts; // we set this here to avoid modifiying pic_arg
1023   }
1024
1025     /* shift buffer entries */
1026     for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1027         s->input_picture[i - 1] = s->input_picture[i];
1028
1029     s->input_picture[encoding_delay] = (Picture*) pic;
1030
1031     return 0;
1032 }
1033
1034 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1035 {
1036     int x, y, plane;
1037     int score = 0;
1038     int64_t score64 = 0;
1039
1040     for (plane = 0; plane < 3; plane++) {
1041         const int stride = p->f.linesize[plane];
1042         const int bw = plane ? 1 : 2;
1043         for (y = 0; y < s->mb_height * bw; y++) {
1044             for (x = 0; x < s->mb_width * bw; x++) {
1045                 int off = p->f.type == FF_BUFFER_TYPE_SHARED ? 0 : 16;
1046                 uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
1047                 uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
1048                 int v   = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1049
1050                 switch (s->avctx->frame_skip_exp) {
1051                 case 0: score    =  FFMAX(score, v);          break;
1052                 case 1: score   += FFABS(v);                  break;
1053                 case 2: score   += v * v;                     break;
1054                 case 3: score64 += FFABS(v * v * (int64_t)v); break;
1055                 case 4: score64 += v * v * (int64_t)(v * v);  break;
1056                 }
1057             }
1058         }
1059     }
1060
1061     if (score)
1062         score64 = score;
1063
1064     if (score64 < s->avctx->frame_skip_threshold)
1065         return 1;
1066     if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1067         return 1;
1068     return 0;
1069 }
1070
1071 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1072 {
1073     AVPacket pkt = { 0 };
1074     int ret, got_output;
1075
1076     av_init_packet(&pkt);
1077     ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1078     if (ret < 0)
1079         return ret;
1080
1081     ret = pkt.size;
1082     av_free_packet(&pkt);
1083     return ret;
1084 }
1085
1086 static int estimate_best_b_count(MpegEncContext *s)
1087 {
1088     AVCodec *codec    = avcodec_find_encoder(s->avctx->codec_id);
1089     AVCodecContext *c = avcodec_alloc_context3(NULL);
1090     AVFrame input[FF_MAX_B_FRAMES + 2];
1091     const int scale = s->avctx->brd_scale;
1092     int i, j, out_size, p_lambda, b_lambda, lambda2;
1093     int64_t best_rd  = INT64_MAX;
1094     int best_b_count = -1;
1095
1096     assert(scale >= 0 && scale <= 3);
1097
1098     //emms_c();
1099     //s->next_picture_ptr->quality;
1100     p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1101     //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1102     b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1103     if (!b_lambda) // FIXME we should do this somewhere else
1104         b_lambda = p_lambda;
1105     lambda2  = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1106                FF_LAMBDA_SHIFT;
1107
1108     c->width        = s->width  >> scale;
1109     c->height       = s->height >> scale;
1110     c->flags        = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR |
1111                       CODEC_FLAG_INPUT_PRESERVED /*| CODEC_FLAG_EMU_EDGE*/;
1112     c->flags       |= s->avctx->flags & CODEC_FLAG_QPEL;
1113     c->mb_decision  = s->avctx->mb_decision;
1114     c->me_cmp       = s->avctx->me_cmp;
1115     c->mb_cmp       = s->avctx->mb_cmp;
1116     c->me_sub_cmp   = s->avctx->me_sub_cmp;
1117     c->pix_fmt      = PIX_FMT_YUV420P;
1118     c->time_base    = s->avctx->time_base;
1119     c->max_b_frames = s->max_b_frames;
1120
1121     if (avcodec_open2(c, codec, NULL) < 0)
1122         return -1;
1123
1124     for (i = 0; i < s->max_b_frames + 2; i++) {
1125         int ysize = c->width * c->height;
1126         int csize = (c->width / 2) * (c->height / 2);
1127         Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1128                                                 s->next_picture_ptr;
1129
1130         avcodec_get_frame_defaults(&input[i]);
1131         input[i].data[0]     = av_malloc(ysize + 2 * csize);
1132         input[i].data[1]     = input[i].data[0] + ysize;
1133         input[i].data[2]     = input[i].data[1] + csize;
1134         input[i].linesize[0] = c->width;
1135         input[i].linesize[1] =
1136         input[i].linesize[2] = c->width / 2;
1137
1138         if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1139             pre_input = *pre_input_ptr;
1140
1141             if (pre_input.f.type != FF_BUFFER_TYPE_SHARED && i) {
1142                 pre_input.f.data[0] += INPLACE_OFFSET;
1143                 pre_input.f.data[1] += INPLACE_OFFSET;
1144                 pre_input.f.data[2] += INPLACE_OFFSET;
1145             }
1146
1147             s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0],
1148                                  pre_input.f.data[0], pre_input.f.linesize[0],
1149                                  c->width,      c->height);
1150             s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1],
1151                                  pre_input.f.data[1], pre_input.f.linesize[1],
1152                                  c->width >> 1, c->height >> 1);
1153             s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2],
1154                                  pre_input.f.data[2], pre_input.f.linesize[2],
1155                                  c->width >> 1, c->height >> 1);
1156         }
1157     }
1158
1159     for (j = 0; j < s->max_b_frames + 1; j++) {
1160         int64_t rd = 0;
1161
1162         if (!s->input_picture[j])
1163             break;
1164
1165         c->error[0] = c->error[1] = c->error[2] = 0;
1166
1167         input[0].pict_type = AV_PICTURE_TYPE_I;
1168         input[0].quality   = 1 * FF_QP2LAMBDA;
1169
1170         out_size = encode_frame(c, &input[0]);
1171
1172         //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1173
1174         for (i = 0; i < s->max_b_frames + 1; i++) {
1175             int is_p = i % (j + 1) == j || i == s->max_b_frames;
1176
1177             input[i + 1].pict_type = is_p ?
1178                                      AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1179             input[i + 1].quality   = is_p ? p_lambda : b_lambda;
1180
1181             out_size = encode_frame(c, &input[i + 1]);
1182
1183             rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1184         }
1185
1186         /* get the delayed frames */
1187         while (out_size) {
1188             out_size = encode_frame(c, NULL);
1189             rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1190         }
1191
1192         rd += c->error[0] + c->error[1] + c->error[2];
1193
1194         if (rd < best_rd) {
1195             best_rd = rd;
1196             best_b_count = j;
1197         }
1198     }
1199
1200     avcodec_close(c);
1201     av_freep(&c);
1202
1203     for (i = 0; i < s->max_b_frames + 2; i++) {
1204         av_freep(&input[i].data[0]);
1205     }
1206
1207     return best_b_count;
1208 }
1209
1210 static int select_input_picture(MpegEncContext *s)
1211 {
1212     int i;
1213
1214     for (i = 1; i < MAX_PICTURE_COUNT; i++)
1215         s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1216     s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1217
1218     /* set next picture type & ordering */
1219     if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
1220         if (/*s->picture_in_gop_number >= s->gop_size ||*/
1221             s->next_picture_ptr == NULL || s->intra_only) {
1222             s->reordered_input_picture[0] = s->input_picture[0];
1223             s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I;
1224             s->reordered_input_picture[0]->f.coded_picture_number =
1225                 s->coded_picture_number++;
1226         } else {
1227             int b_frames;
1228
1229             if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1230                 if (s->picture_in_gop_number < s->gop_size &&
1231                     skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1232                     // FIXME check that te gop check above is +-1 correct
1233                     //av_log(NULL, AV_LOG_DEBUG, "skip %p %"PRId64"\n",
1234                     //       s->input_picture[0]->f.data[0],
1235                     //       s->input_picture[0]->pts);
1236
1237                     if (s->input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED) {
1238                         for (i = 0; i < 4; i++)
1239                             s->input_picture[0]->f.data[i] = NULL;
1240                         s->input_picture[0]->f.type = 0;
1241                     } else {
1242                         assert(s->input_picture[0]->f.type == FF_BUFFER_TYPE_USER ||
1243                                s->input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL);
1244
1245                         s->avctx->release_buffer(s->avctx,
1246                                                  &s->input_picture[0]->f);
1247                     }
1248
1249                     emms_c();
1250                     ff_vbv_update(s, 0);
1251
1252                     goto no_output_pic;
1253                 }
1254             }
1255
1256             if (s->flags & CODEC_FLAG_PASS2) {
1257                 for (i = 0; i < s->max_b_frames + 1; i++) {
1258                     int pict_num = s->input_picture[0]->f.display_picture_number + i;
1259
1260                     if (pict_num >= s->rc_context.num_entries)
1261                         break;
1262                     if (!s->input_picture[i]) {
1263                         s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1264                         break;
1265                     }
1266
1267                     s->input_picture[i]->f.pict_type =
1268                         s->rc_context.entry[pict_num].new_pict_type;
1269                 }
1270             }
1271
1272             if (s->avctx->b_frame_strategy == 0) {
1273                 b_frames = s->max_b_frames;
1274                 while (b_frames && !s->input_picture[b_frames])
1275                     b_frames--;
1276             } else if (s->avctx->b_frame_strategy == 1) {
1277                 for (i = 1; i < s->max_b_frames + 1; i++) {
1278                     if (s->input_picture[i] &&
1279                         s->input_picture[i]->b_frame_score == 0) {
1280                         s->input_picture[i]->b_frame_score =
1281                             get_intra_count(s,
1282                                             s->input_picture[i    ]->f.data[0],
1283                                             s->input_picture[i - 1]->f.data[0],
1284                                             s->linesize) + 1;
1285                     }
1286                 }
1287                 for (i = 0; i < s->max_b_frames + 1; i++) {
1288                     if (s->input_picture[i] == NULL ||
1289                         s->input_picture[i]->b_frame_score - 1 >
1290                             s->mb_num / s->avctx->b_sensitivity)
1291                         break;
1292                 }
1293
1294                 b_frames = FFMAX(0, i - 1);
1295
1296                 /* reset scores */
1297                 for (i = 0; i < b_frames + 1; i++) {
1298                     s->input_picture[i]->b_frame_score = 0;
1299                 }
1300             } else if (s->avctx->b_frame_strategy == 2) {
1301                 b_frames = estimate_best_b_count(s);
1302             } else {
1303                 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1304                 b_frames = 0;
1305             }
1306
1307             emms_c();
1308             //static int b_count = 0;
1309             //b_count += b_frames;
1310             //av_log(s->avctx, AV_LOG_DEBUG, "b_frames: %d\n", b_count);
1311
1312             for (i = b_frames - 1; i >= 0; i--) {
1313                 int type = s->input_picture[i]->f.pict_type;
1314                 if (type && type != AV_PICTURE_TYPE_B)
1315                     b_frames = i;
1316             }
1317             if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
1318                 b_frames == s->max_b_frames) {
1319                 av_log(s->avctx, AV_LOG_ERROR,
1320                        "warning, too many b frames in a row\n");
1321             }
1322
1323             if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1324                 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1325                     s->gop_size > s->picture_in_gop_number) {
1326                     b_frames = s->gop_size - s->picture_in_gop_number - 1;
1327                 } else {
1328                     if (s->flags & CODEC_FLAG_CLOSED_GOP)
1329                         b_frames = 0;
1330                     s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
1331                 }
1332             }
1333
1334             if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
1335                 s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
1336                 b_frames--;
1337
1338             s->reordered_input_picture[0] = s->input_picture[b_frames];
1339             if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I)
1340                 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P;
1341             s->reordered_input_picture[0]->f.coded_picture_number =
1342                 s->coded_picture_number++;
1343             for (i = 0; i < b_frames; i++) {
1344                 s->reordered_input_picture[i + 1] = s->input_picture[i];
1345                 s->reordered_input_picture[i + 1]->f.pict_type =
1346                     AV_PICTURE_TYPE_B;
1347                 s->reordered_input_picture[i + 1]->f.coded_picture_number =
1348                     s->coded_picture_number++;
1349             }
1350         }
1351     }
1352 no_output_pic:
1353     if (s->reordered_input_picture[0]) {
1354         s->reordered_input_picture[0]->f.reference =
1355            s->reordered_input_picture[0]->f.pict_type !=
1356                AV_PICTURE_TYPE_B ? 3 : 0;
1357
1358         ff_copy_picture(&s->new_picture, s->reordered_input_picture[0]);
1359
1360         if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED ||
1361             s->avctx->rc_buffer_size) {
1362             // input is a shared pix, so we can't modifiy it -> alloc a new
1363             // one & ensure that the shared one is reuseable
1364
1365             Picture *pic;
1366             int i = ff_find_unused_picture(s, 0);
1367             if (i < 0)
1368                 return i;
1369             pic = &s->picture[i];
1370
1371             pic->f.reference = s->reordered_input_picture[0]->f.reference;
1372             if (ff_alloc_picture(s, pic, 0) < 0) {
1373                 return -1;
1374             }
1375
1376             /* mark us unused / free shared pic */
1377             if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL)
1378                 s->avctx->release_buffer(s->avctx,
1379                                          &s->reordered_input_picture[0]->f);
1380             for (i = 0; i < 4; i++)
1381                 s->reordered_input_picture[0]->f.data[i] = NULL;
1382             s->reordered_input_picture[0]->f.type = 0;
1383
1384             copy_picture_attributes(s, &pic->f,
1385                                     &s->reordered_input_picture[0]->f);
1386
1387             s->current_picture_ptr = pic;
1388         } else {
1389             // input is not a shared pix -> reuse buffer for current_pix
1390
1391             assert(s->reordered_input_picture[0]->f.type ==
1392                        FF_BUFFER_TYPE_USER ||
1393                    s->reordered_input_picture[0]->f.type ==
1394                        FF_BUFFER_TYPE_INTERNAL);
1395
1396             s->current_picture_ptr = s->reordered_input_picture[0];
1397             for (i = 0; i < 4; i++) {
1398                 s->new_picture.f.data[i] += INPLACE_OFFSET;
1399             }
1400         }
1401         ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1402
1403         s->picture_number = s->new_picture.f.display_picture_number;
1404         //printf("dpn:%d\n", s->picture_number);
1405     } else {
1406         memset(&s->new_picture, 0, sizeof(Picture));
1407     }
1408     return 0;
1409 }
1410
1411 int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1412                           const AVFrame *pic_arg, int *got_packet)
1413 {
1414     MpegEncContext *s = avctx->priv_data;
1415     int i, stuffing_count, ret;
1416     int context_count = s->slice_context_count;
1417
1418     s->picture_in_gop_number++;
1419
1420     if (load_input_picture(s, pic_arg) < 0)
1421         return -1;
1422
1423     if (select_input_picture(s) < 0) {
1424         return -1;
1425     }
1426
1427     /* output? */
1428     if (s->new_picture.f.data[0]) {
1429         if (!pkt->data &&
1430             (ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*MAX_MB_BYTES)) < 0)
1431             return ret;
1432         if (s->mb_info) {
1433             s->mb_info_ptr = av_packet_new_side_data(pkt,
1434                                  AV_PKT_DATA_H263_MB_INFO,
1435                                  s->mb_width*s->mb_height*12);
1436             s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1437         }
1438
1439         for (i = 0; i < context_count; i++) {
1440             int start_y = s->thread_context[i]->start_mb_y;
1441             int   end_y = s->thread_context[i]->  end_mb_y;
1442             int h       = s->mb_height;
1443             uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1444             uint8_t *end   = pkt->data + (size_t)(((int64_t) pkt->size) *   end_y / h);
1445
1446             init_put_bits(&s->thread_context[i]->pb, start, end - start);
1447         }
1448
1449         s->pict_type = s->new_picture.f.pict_type;
1450         //emms_c();
1451         //printf("qs:%f %f %d\n", s->new_picture.quality,
1452         //       s->current_picture.quality, s->qscale);
1453         ff_MPV_frame_start(s, avctx);
1454 vbv_retry:
1455         if (encode_picture(s, s->picture_number) < 0)
1456             return -1;
1457
1458         avctx->header_bits = s->header_bits;
1459         avctx->mv_bits     = s->mv_bits;
1460         avctx->misc_bits   = s->misc_bits;
1461         avctx->i_tex_bits  = s->i_tex_bits;
1462         avctx->p_tex_bits  = s->p_tex_bits;
1463         avctx->i_count     = s->i_count;
1464         // FIXME f/b_count in avctx
1465         avctx->p_count     = s->mb_num - s->i_count - s->skip_count;
1466         avctx->skip_count  = s->skip_count;
1467
1468         ff_MPV_frame_end(s);
1469
1470         if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1471             ff_mjpeg_encode_picture_trailer(s);
1472
1473         if (avctx->rc_buffer_size) {
1474             RateControlContext *rcc = &s->rc_context;
1475             int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
1476
1477             if (put_bits_count(&s->pb) > max_size &&
1478                 s->lambda < s->avctx->lmax) {
1479                 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1480                                        (s->qscale + 1) / s->qscale);
1481                 if (s->adaptive_quant) {
1482                     int i;
1483                     for (i = 0; i < s->mb_height * s->mb_stride; i++)
1484                         s->lambda_table[i] =
1485                             FFMAX(s->lambda_table[i] + 1,
1486                                   s->lambda_table[i] * (s->qscale + 1) /
1487                                   s->qscale);
1488                 }
1489                 s->mb_skipped = 0;        // done in MPV_frame_start()
1490                 // done in encode_picture() so we must undo it
1491                 if (s->pict_type == AV_PICTURE_TYPE_P) {
1492                     if (s->flipflop_rounding          ||
1493                         s->codec_id == AV_CODEC_ID_H263P ||
1494                         s->codec_id == AV_CODEC_ID_MPEG4)
1495                         s->no_rounding ^= 1;
1496                 }
1497                 if (s->pict_type != AV_PICTURE_TYPE_B) {
1498                     s->time_base       = s->last_time_base;
1499                     s->last_non_b_time = s->time - s->pp_time;
1500                 }
1501                 //av_log(NULL, AV_LOG_ERROR, "R:%d ", s->next_lambda);
1502                 for (i = 0; i < context_count; i++) {
1503                     PutBitContext *pb = &s->thread_context[i]->pb;
1504                     init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1505                 }
1506                 goto vbv_retry;
1507             }
1508
1509             assert(s->avctx->rc_max_rate);
1510         }
1511
1512         if (s->flags & CODEC_FLAG_PASS1)
1513             ff_write_pass1_stats(s);
1514
1515         for (i = 0; i < 4; i++) {
1516             s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i];
1517             avctx->error[i] += s->current_picture_ptr->f.error[i];
1518         }
1519
1520         if (s->flags & CODEC_FLAG_PASS1)
1521             assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1522                    avctx->i_tex_bits + avctx->p_tex_bits ==
1523                        put_bits_count(&s->pb));
1524         flush_put_bits(&s->pb);
1525         s->frame_bits  = put_bits_count(&s->pb);
1526
1527         stuffing_count = ff_vbv_update(s, s->frame_bits);
1528         if (stuffing_count) {
1529             if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1530                     stuffing_count + 50) {
1531                 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1532                 return -1;
1533             }
1534
1535             switch (s->codec_id) {
1536             case AV_CODEC_ID_MPEG1VIDEO:
1537             case AV_CODEC_ID_MPEG2VIDEO:
1538                 while (stuffing_count--) {
1539                     put_bits(&s->pb, 8, 0);
1540                 }
1541             break;
1542             case AV_CODEC_ID_MPEG4:
1543                 put_bits(&s->pb, 16, 0);
1544                 put_bits(&s->pb, 16, 0x1C3);
1545                 stuffing_count -= 4;
1546                 while (stuffing_count--) {
1547                     put_bits(&s->pb, 8, 0xFF);
1548                 }
1549             break;
1550             default:
1551                 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1552             }
1553             flush_put_bits(&s->pb);
1554             s->frame_bits  = put_bits_count(&s->pb);
1555         }
1556
1557         /* update mpeg1/2 vbv_delay for CBR */
1558         if (s->avctx->rc_max_rate                          &&
1559             s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1560             s->out_format == FMT_MPEG1                     &&
1561             90000LL * (avctx->rc_buffer_size - 1) <=
1562                 s->avctx->rc_max_rate * 0xFFFFLL) {
1563             int vbv_delay, min_delay;
1564             double inbits  = s->avctx->rc_max_rate *
1565                              av_q2d(s->avctx->time_base);
1566             int    minbits = s->frame_bits - 8 *
1567                              (s->vbv_delay_ptr - s->pb.buf - 1);
1568             double bits    = s->rc_context.buffer_index + minbits - inbits;
1569
1570             if (bits < 0)
1571                 av_log(s->avctx, AV_LOG_ERROR,
1572                        "Internal error, negative bits\n");
1573
1574             assert(s->repeat_first_field == 0);
1575
1576             vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1577             min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1578                         s->avctx->rc_max_rate;
1579
1580             vbv_delay = FFMAX(vbv_delay, min_delay);
1581
1582             assert(vbv_delay < 0xFFFF);
1583
1584             s->vbv_delay_ptr[0] &= 0xF8;
1585             s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1586             s->vbv_delay_ptr[1]  = vbv_delay >> 5;
1587             s->vbv_delay_ptr[2] &= 0x07;
1588             s->vbv_delay_ptr[2] |= vbv_delay << 3;
1589             avctx->vbv_delay     = vbv_delay * 300;
1590         }
1591         s->total_bits     += s->frame_bits;
1592         avctx->frame_bits  = s->frame_bits;
1593
1594         pkt->pts = s->current_picture.f.pts;
1595         if (!s->low_delay) {
1596             if (!s->current_picture.f.coded_picture_number)
1597                 pkt->dts = pkt->pts - s->dts_delta;
1598             else
1599                 pkt->dts = s->reordered_pts;
1600             s->reordered_pts = s->input_picture[0]->f.pts;
1601         } else
1602             pkt->dts = pkt->pts;
1603         if (s->current_picture.f.key_frame)
1604             pkt->flags |= AV_PKT_FLAG_KEY;
1605         if (s->mb_info)
1606             av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
1607     } else {
1608         assert((put_bits_ptr(&s->pb) == s->pb.buf));
1609         s->frame_bits = 0;
1610     }
1611     assert((s->frame_bits & 7) == 0);
1612
1613     pkt->size = s->frame_bits / 8;
1614     *got_packet = !!pkt->size;
1615     return 0;
1616 }
1617
1618 static inline void dct_single_coeff_elimination(MpegEncContext *s,
1619                                                 int n, int threshold)
1620 {
1621     static const char tab[64] = {
1622         3, 2, 2, 1, 1, 1, 1, 1,
1623         1, 1, 1, 1, 1, 1, 1, 1,
1624         1, 1, 1, 1, 1, 1, 1, 1,
1625         0, 0, 0, 0, 0, 0, 0, 0,
1626         0, 0, 0, 0, 0, 0, 0, 0,
1627         0, 0, 0, 0, 0, 0, 0, 0,
1628         0, 0, 0, 0, 0, 0, 0, 0,
1629         0, 0, 0, 0, 0, 0, 0, 0
1630     };
1631     int score = 0;
1632     int run = 0;
1633     int i;
1634     DCTELEM *block = s->block[n];
1635     const int last_index = s->block_last_index[n];
1636     int skip_dc;
1637
1638     if (threshold < 0) {
1639         skip_dc = 0;
1640         threshold = -threshold;
1641     } else
1642         skip_dc = 1;
1643
1644     /* Are all we could set to zero already zero? */
1645     if (last_index <= skip_dc - 1)
1646         return;
1647
1648     for (i = 0; i <= last_index; i++) {
1649         const int j = s->intra_scantable.permutated[i];
1650         const int level = FFABS(block[j]);
1651         if (level == 1) {
1652             if (skip_dc && i == 0)
1653                 continue;
1654             score += tab[run];
1655             run = 0;
1656         } else if (level > 1) {
1657             return;
1658         } else {
1659             run++;
1660         }
1661     }
1662     if (score >= threshold)
1663         return;
1664     for (i = skip_dc; i <= last_index; i++) {
1665         const int j = s->intra_scantable.permutated[i];
1666         block[j] = 0;
1667     }
1668     if (block[0])
1669         s->block_last_index[n] = 0;
1670     else
1671         s->block_last_index[n] = -1;
1672 }
1673
1674 static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block,
1675                                int last_index)
1676 {
1677     int i;
1678     const int maxlevel = s->max_qcoeff;
1679     const int minlevel = s->min_qcoeff;
1680     int overflow = 0;
1681
1682     if (s->mb_intra) {
1683         i = 1; // skip clipping of intra dc
1684     } else
1685         i = 0;
1686
1687     for (; i <= last_index; i++) {
1688         const int j = s->intra_scantable.permutated[i];
1689         int level = block[j];
1690
1691         if (level > maxlevel) {
1692             level = maxlevel;
1693             overflow++;
1694         } else if (level < minlevel) {
1695             level = minlevel;
1696             overflow++;
1697         }
1698
1699         block[j] = level;
1700     }
1701
1702     if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
1703         av_log(s->avctx, AV_LOG_INFO,
1704                "warning, clipping %d dct coefficients to %d..%d\n",
1705                overflow, minlevel, maxlevel);
1706 }
1707
1708 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
1709 {
1710     int x, y;
1711     // FIXME optimize
1712     for (y = 0; y < 8; y++) {
1713         for (x = 0; x < 8; x++) {
1714             int x2, y2;
1715             int sum = 0;
1716             int sqr = 0;
1717             int count = 0;
1718
1719             for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
1720                 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
1721                     int v = ptr[x2 + y2 * stride];
1722                     sum += v;
1723                     sqr += v * v;
1724                     count++;
1725                 }
1726             }
1727             weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
1728         }
1729     }
1730 }
1731
1732 static av_always_inline void encode_mb_internal(MpegEncContext *s,
1733                                                 int motion_x, int motion_y,
1734                                                 int mb_block_height,
1735                                                 int mb_block_count)
1736 {
1737     int16_t weight[8][64];
1738     DCTELEM orig[8][64];
1739     const int mb_x = s->mb_x;
1740     const int mb_y = s->mb_y;
1741     int i;
1742     int skip_dct[8];
1743     int dct_offset = s->linesize * 8; // default for progressive frames
1744     uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1745     int wrap_y, wrap_c;
1746
1747     for (i = 0; i < mb_block_count; i++)
1748         skip_dct[i] = s->skipdct;
1749
1750     if (s->adaptive_quant) {
1751         const int last_qp = s->qscale;
1752         const int mb_xy = mb_x + mb_y * s->mb_stride;
1753
1754         s->lambda = s->lambda_table[mb_xy];
1755         update_qscale(s);
1756
1757         if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
1758             s->qscale = s->current_picture_ptr->f.qscale_table[mb_xy];
1759             s->dquant = s->qscale - last_qp;
1760
1761             if (s->out_format == FMT_H263) {
1762                 s->dquant = av_clip(s->dquant, -2, 2);
1763
1764                 if (s->codec_id == AV_CODEC_ID_MPEG4) {
1765                     if (!s->mb_intra) {
1766                         if (s->pict_type == AV_PICTURE_TYPE_B) {
1767                             if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
1768                                 s->dquant = 0;
1769                         }
1770                         if (s->mv_type == MV_TYPE_8X8)
1771                             s->dquant = 0;
1772                     }
1773                 }
1774             }
1775         }
1776         ff_set_qscale(s, last_qp + s->dquant);
1777     } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
1778         ff_set_qscale(s, s->qscale + s->dquant);
1779
1780     wrap_y = s->linesize;
1781     wrap_c = s->uvlinesize;
1782     ptr_y  = s->new_picture.f.data[0] +
1783              (mb_y * 16 * wrap_y)              + mb_x * 16;
1784     ptr_cb = s->new_picture.f.data[1] +
1785              (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1786     ptr_cr = s->new_picture.f.data[2] +
1787              (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1788
1789     if (mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) {
1790         uint8_t *ebuf = s->edge_emu_buffer + 32;
1791         s->dsp.emulated_edge_mc(ebuf, ptr_y, wrap_y, 16, 16, mb_x * 16,
1792                                 mb_y * 16, s->width, s->height);
1793         ptr_y = ebuf;
1794         s->dsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb, wrap_c, 8,
1795                                 mb_block_height, mb_x * 8, mb_y * 8,
1796                                 s->width >> 1, s->height >> 1);
1797         ptr_cb = ebuf + 18 * wrap_y;
1798         s->dsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr, wrap_c, 8,
1799                                 mb_block_height, mb_x * 8, mb_y * 8,
1800                                 s->width >> 1, s->height >> 1);
1801         ptr_cr = ebuf + 18 * wrap_y + 8;
1802     }
1803
1804     if (s->mb_intra) {
1805         if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1806             int progressive_score, interlaced_score;
1807
1808             s->interlaced_dct = 0;
1809             progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
1810                                                     NULL, wrap_y, 8) +
1811                                 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
1812                                                     NULL, wrap_y, 8) - 400;
1813
1814             if (progressive_score > 0) {
1815                 interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
1816                                                        NULL, wrap_y * 2, 8) +
1817                                    s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
1818                                                        NULL, wrap_y * 2, 8);
1819                 if (progressive_score > interlaced_score) {
1820                     s->interlaced_dct = 1;
1821
1822                     dct_offset = wrap_y;
1823                     wrap_y <<= 1;
1824                     if (s->chroma_format == CHROMA_422)
1825                         wrap_c <<= 1;
1826                 }
1827             }
1828         }
1829
1830         s->dsp.get_pixels(s->block[0], ptr_y                  , wrap_y);
1831         s->dsp.get_pixels(s->block[1], ptr_y              + 8 , wrap_y);
1832         s->dsp.get_pixels(s->block[2], ptr_y + dct_offset     , wrap_y);
1833         s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
1834
1835         if (s->flags & CODEC_FLAG_GRAY) {
1836             skip_dct[4] = 1;
1837             skip_dct[5] = 1;
1838         } else {
1839             s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
1840             s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
1841             if (!s->chroma_y_shift) { /* 422 */
1842                 s->dsp.get_pixels(s->block[6],
1843                                   ptr_cb + (dct_offset >> 1), wrap_c);
1844                 s->dsp.get_pixels(s->block[7],
1845                                   ptr_cr + (dct_offset >> 1), wrap_c);
1846             }
1847         }
1848     } else {
1849         op_pixels_func (*op_pix)[4];
1850         qpel_mc_func (*op_qpix)[16];
1851         uint8_t *dest_y, *dest_cb, *dest_cr;
1852
1853         dest_y  = s->dest[0];
1854         dest_cb = s->dest[1];
1855         dest_cr = s->dest[2];
1856
1857         if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
1858             op_pix  = s->dsp.put_pixels_tab;
1859             op_qpix = s->dsp.put_qpel_pixels_tab;
1860         } else {
1861             op_pix  = s->dsp.put_no_rnd_pixels_tab;
1862             op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
1863         }
1864
1865         if (s->mv_dir & MV_DIR_FORWARD) {
1866             ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0,
1867                           s->last_picture.f.data,
1868                           op_pix, op_qpix);
1869             op_pix  = s->dsp.avg_pixels_tab;
1870             op_qpix = s->dsp.avg_qpel_pixels_tab;
1871         }
1872         if (s->mv_dir & MV_DIR_BACKWARD) {
1873             ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1,
1874                           s->next_picture.f.data,
1875                           op_pix, op_qpix);
1876         }
1877
1878         if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1879             int progressive_score, interlaced_score;
1880
1881             s->interlaced_dct = 0;
1882             progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
1883                                                     ptr_y,              wrap_y,
1884                                                     8) +
1885                                 s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
1886                                                     ptr_y + wrap_y * 8, wrap_y,
1887                                                     8) - 400;
1888
1889             if (s->avctx->ildct_cmp == FF_CMP_VSSE)
1890                 progressive_score -= 400;
1891
1892             if (progressive_score > 0) {
1893                 interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
1894                                                        ptr_y,
1895                                                        wrap_y * 2, 8) +
1896                                    s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
1897                                                        ptr_y + wrap_y,
1898                                                        wrap_y * 2, 8);
1899
1900                 if (progressive_score > interlaced_score) {
1901                     s->interlaced_dct = 1;
1902
1903                     dct_offset = wrap_y;
1904                     wrap_y <<= 1;
1905                     if (s->chroma_format == CHROMA_422)
1906                         wrap_c <<= 1;
1907                 }
1908             }
1909         }
1910
1911         s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
1912         s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
1913         s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
1914                            dest_y + dct_offset, wrap_y);
1915         s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
1916                            dest_y + dct_offset + 8, wrap_y);
1917
1918         if (s->flags & CODEC_FLAG_GRAY) {
1919             skip_dct[4] = 1;
1920             skip_dct[5] = 1;
1921         } else {
1922             s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
1923             s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
1924             if (!s->chroma_y_shift) { /* 422 */
1925                 s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1),
1926                                    dest_cb + (dct_offset >> 1), wrap_c);
1927                 s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1),
1928                                    dest_cr + (dct_offset >> 1), wrap_c);
1929             }
1930         }
1931         /* pre quantization */
1932         if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
1933                 2 * s->qscale * s->qscale) {
1934             // FIXME optimize
1935             if (s->dsp.sad[1](NULL, ptr_y , dest_y,
1936                               wrap_y, 8) < 20 * s->qscale)
1937                 skip_dct[0] = 1;
1938             if (s->dsp.sad[1](NULL, ptr_y + 8,
1939                               dest_y + 8, wrap_y, 8) < 20 * s->qscale)
1940                 skip_dct[1] = 1;
1941             if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
1942                               dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
1943                 skip_dct[2] = 1;
1944             if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
1945                               dest_y + dct_offset + 8,
1946                               wrap_y, 8) < 20 * s->qscale)
1947                 skip_dct[3] = 1;
1948             if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
1949                               wrap_c, 8) < 20 * s->qscale)
1950                 skip_dct[4] = 1;
1951             if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
1952                               wrap_c, 8) < 20 * s->qscale)
1953                 skip_dct[5] = 1;
1954             if (!s->chroma_y_shift) { /* 422 */
1955                 if (s->dsp.sad[1](NULL, ptr_cb + (dct_offset >> 1),
1956                                   dest_cb + (dct_offset >> 1),
1957                                   wrap_c, 8) < 20 * s->qscale)
1958                     skip_dct[6] = 1;
1959                 if (s->dsp.sad[1](NULL, ptr_cr + (dct_offset >> 1),
1960                                   dest_cr + (dct_offset >> 1),
1961                                   wrap_c, 8) < 20 * s->qscale)
1962                     skip_dct[7] = 1;
1963             }
1964         }
1965     }
1966
1967     if (s->quantizer_noise_shaping) {
1968         if (!skip_dct[0])
1969             get_visual_weight(weight[0], ptr_y                 , wrap_y);
1970         if (!skip_dct[1])
1971             get_visual_weight(weight[1], ptr_y              + 8, wrap_y);
1972         if (!skip_dct[2])
1973             get_visual_weight(weight[2], ptr_y + dct_offset    , wrap_y);
1974         if (!skip_dct[3])
1975             get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
1976         if (!skip_dct[4])
1977             get_visual_weight(weight[4], ptr_cb                , wrap_c);
1978         if (!skip_dct[5])
1979             get_visual_weight(weight[5], ptr_cr                , wrap_c);
1980         if (!s->chroma_y_shift) { /* 422 */
1981             if (!skip_dct[6])
1982                 get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1),
1983                                   wrap_c);
1984             if (!skip_dct[7])
1985                 get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1),
1986                                   wrap_c);
1987         }
1988         memcpy(orig[0], s->block[0], sizeof(DCTELEM) * 64 * mb_block_count);
1989     }
1990
1991     /* DCT & quantize */
1992     assert(s->out_format != FMT_MJPEG || s->qscale == 8);
1993     {
1994         for (i = 0; i < mb_block_count; i++) {
1995             if (!skip_dct[i]) {
1996                 int overflow;
1997                 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
1998                 // FIXME we could decide to change to quantizer instead of
1999                 // clipping
2000                 // JS: I don't think that would be a good idea it could lower
2001                 //     quality instead of improve it. Just INTRADC clipping
2002                 //     deserves changes in quantizer
2003                 if (overflow)
2004                     clip_coeffs(s, s->block[i], s->block_last_index[i]);
2005             } else
2006                 s->block_last_index[i] = -1;
2007         }
2008         if (s->quantizer_noise_shaping) {
2009             for (i = 0; i < mb_block_count; i++) {
2010                 if (!skip_dct[i]) {
2011                     s->block_last_index[i] =
2012                         dct_quantize_refine(s, s->block[i], weight[i],
2013                                             orig[i], i, s->qscale);
2014                 }
2015             }
2016         }
2017
2018         if (s->luma_elim_threshold && !s->mb_intra)
2019             for (i = 0; i < 4; i++)
2020                 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2021         if (s->chroma_elim_threshold && !s->mb_intra)
2022             for (i = 4; i < mb_block_count; i++)
2023                 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2024
2025         if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2026             for (i = 0; i < mb_block_count; i++) {
2027                 if (s->block_last_index[i] == -1)
2028                     s->coded_score[i] = INT_MAX / 256;
2029             }
2030         }
2031     }
2032
2033     if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
2034         s->block_last_index[4] =
2035         s->block_last_index[5] = 0;
2036         s->block[4][0] =
2037         s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2038     }
2039
2040     // non c quantize code returns incorrect block_last_index FIXME
2041     if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2042         for (i = 0; i < mb_block_count; i++) {
2043             int j;
2044             if (s->block_last_index[i] > 0) {
2045                 for (j = 63; j > 0; j--) {
2046                     if (s->block[i][s->intra_scantable.permutated[j]])
2047                         break;
2048                 }
2049                 s->block_last_index[i] = j;
2050             }
2051         }
2052     }
2053
2054     /* huffman encode */
2055     switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2056     case AV_CODEC_ID_MPEG1VIDEO:
2057     case AV_CODEC_ID_MPEG2VIDEO:
2058         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2059             ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2060         break;
2061     case AV_CODEC_ID_MPEG4:
2062         if (CONFIG_MPEG4_ENCODER)
2063             ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2064         break;
2065     case AV_CODEC_ID_MSMPEG4V2:
2066     case AV_CODEC_ID_MSMPEG4V3:
2067     case AV_CODEC_ID_WMV1:
2068         if (CONFIG_MSMPEG4_ENCODER)
2069             ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2070         break;
2071     case AV_CODEC_ID_WMV2:
2072         if (CONFIG_WMV2_ENCODER)
2073             ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2074         break;
2075     case AV_CODEC_ID_H261:
2076         if (CONFIG_H261_ENCODER)
2077             ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2078         break;
2079     case AV_CODEC_ID_H263:
2080     case AV_CODEC_ID_H263P:
2081     case AV_CODEC_ID_FLV1:
2082     case AV_CODEC_ID_RV10:
2083     case AV_CODEC_ID_RV20:
2084         if (CONFIG_H263_ENCODER)
2085             ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2086         break;
2087     case AV_CODEC_ID_MJPEG:
2088         if (CONFIG_MJPEG_ENCODER)
2089             ff_mjpeg_encode_mb(s, s->block);
2090         break;
2091     default:
2092         assert(0);
2093     }
2094 }
2095
2096 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2097 {
2098     if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y,  8, 6);
2099     else                                encode_mb_internal(s, motion_x, motion_y, 16, 8);
2100 }
2101
2102 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2103     int i;
2104
2105     memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2106
2107     /* mpeg1 */
2108     d->mb_skip_run= s->mb_skip_run;
2109     for(i=0; i<3; i++)
2110         d->last_dc[i] = s->last_dc[i];
2111
2112     /* statistics */
2113     d->mv_bits= s->mv_bits;
2114     d->i_tex_bits= s->i_tex_bits;
2115     d->p_tex_bits= s->p_tex_bits;
2116     d->i_count= s->i_count;
2117     d->f_count= s->f_count;
2118     d->b_count= s->b_count;
2119     d->skip_count= s->skip_count;
2120     d->misc_bits= s->misc_bits;
2121     d->last_bits= 0;
2122
2123     d->mb_skipped= 0;
2124     d->qscale= s->qscale;
2125     d->dquant= s->dquant;
2126
2127     d->esc3_level_length= s->esc3_level_length;
2128 }
2129
2130 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2131     int i;
2132
2133     memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2134     memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2135
2136     /* mpeg1 */
2137     d->mb_skip_run= s->mb_skip_run;
2138     for(i=0; i<3; i++)
2139         d->last_dc[i] = s->last_dc[i];
2140
2141     /* statistics */
2142     d->mv_bits= s->mv_bits;
2143     d->i_tex_bits= s->i_tex_bits;
2144     d->p_tex_bits= s->p_tex_bits;
2145     d->i_count= s->i_count;
2146     d->f_count= s->f_count;
2147     d->b_count= s->b_count;
2148     d->skip_count= s->skip_count;
2149     d->misc_bits= s->misc_bits;
2150
2151     d->mb_intra= s->mb_intra;
2152     d->mb_skipped= s->mb_skipped;
2153     d->mv_type= s->mv_type;
2154     d->mv_dir= s->mv_dir;
2155     d->pb= s->pb;
2156     if(s->data_partitioning){
2157         d->pb2= s->pb2;
2158         d->tex_pb= s->tex_pb;
2159     }
2160     d->block= s->block;
2161     for(i=0; i<8; i++)
2162         d->block_last_index[i]= s->block_last_index[i];
2163     d->interlaced_dct= s->interlaced_dct;
2164     d->qscale= s->qscale;
2165
2166     d->esc3_level_length= s->esc3_level_length;
2167 }
2168
2169 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2170                            PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2171                            int *dmin, int *next_block, int motion_x, int motion_y)
2172 {
2173     int score;
2174     uint8_t *dest_backup[3];
2175
2176     copy_context_before_encode(s, backup, type);
2177
2178     s->block= s->blocks[*next_block];
2179     s->pb= pb[*next_block];
2180     if(s->data_partitioning){
2181         s->pb2   = pb2   [*next_block];
2182         s->tex_pb= tex_pb[*next_block];
2183     }
2184
2185     if(*next_block){
2186         memcpy(dest_backup, s->dest, sizeof(s->dest));
2187         s->dest[0] = s->rd_scratchpad;
2188         s->dest[1] = s->rd_scratchpad + 16*s->linesize;
2189         s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
2190         assert(s->linesize >= 32); //FIXME
2191     }
2192
2193     encode_mb(s, motion_x, motion_y);
2194
2195     score= put_bits_count(&s->pb);
2196     if(s->data_partitioning){
2197         score+= put_bits_count(&s->pb2);
2198         score+= put_bits_count(&s->tex_pb);
2199     }
2200
2201     if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2202         ff_MPV_decode_mb(s, s->block);
2203
2204         score *= s->lambda2;
2205         score += sse_mb(s) << FF_LAMBDA_SHIFT;
2206     }
2207
2208     if(*next_block){
2209         memcpy(s->dest, dest_backup, sizeof(s->dest));
2210     }
2211
2212     if(score<*dmin){
2213         *dmin= score;
2214         *next_block^=1;
2215
2216         copy_context_after_encode(best, s, type);
2217     }
2218 }
2219
2220 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2221     uint32_t *sq = ff_squareTbl + 256;
2222     int acc=0;
2223     int x,y;
2224
2225     if(w==16 && h==16)
2226         return s->dsp.sse[0](NULL, src1, src2, stride, 16);
2227     else if(w==8 && h==8)
2228         return s->dsp.sse[1](NULL, src1, src2, stride, 8);
2229
2230     for(y=0; y<h; y++){
2231         for(x=0; x<w; x++){
2232             acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2233         }
2234     }
2235
2236     assert(acc>=0);
2237
2238     return acc;
2239 }
2240
2241 static int sse_mb(MpegEncContext *s){
2242     int w= 16;
2243     int h= 16;
2244
2245     if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2246     if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2247
2248     if(w==16 && h==16)
2249       if(s->avctx->mb_cmp == FF_CMP_NSSE){
2250         return  s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2251                +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2252                +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2253       }else{
2254         return  s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2255                +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2256                +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2257       }
2258     else
2259         return  sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2260                +sse(s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2261                +sse(s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2262 }
2263
2264 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2265     MpegEncContext *s= *(void**)arg;
2266
2267
2268     s->me.pre_pass=1;
2269     s->me.dia_size= s->avctx->pre_dia_size;
2270     s->first_slice_line=1;
2271     for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2272         for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2273             ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2274         }
2275         s->first_slice_line=0;
2276     }
2277
2278     s->me.pre_pass=0;
2279
2280     return 0;
2281 }
2282
2283 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2284     MpegEncContext *s= *(void**)arg;
2285
2286     ff_check_alignment();
2287
2288     s->me.dia_size= s->avctx->dia_size;
2289     s->first_slice_line=1;
2290     for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2291         s->mb_x=0; //for block init below
2292         ff_init_block_index(s);
2293         for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2294             s->block_index[0]+=2;
2295             s->block_index[1]+=2;
2296             s->block_index[2]+=2;
2297             s->block_index[3]+=2;
2298
2299             /* compute motion vector & mb_type and store in context */
2300             if(s->pict_type==AV_PICTURE_TYPE_B)
2301                 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2302             else
2303                 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2304         }
2305         s->first_slice_line=0;
2306     }
2307     return 0;
2308 }
2309
2310 static int mb_var_thread(AVCodecContext *c, void *arg){
2311     MpegEncContext *s= *(void**)arg;
2312     int mb_x, mb_y;
2313
2314     ff_check_alignment();
2315
2316     for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2317         for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2318             int xx = mb_x * 16;
2319             int yy = mb_y * 16;
2320             uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
2321             int varc;
2322             int sum = s->dsp.pix_sum(pix, s->linesize);
2323
2324             varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
2325
2326             s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2327             s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2328             s->me.mb_var_sum_temp    += varc;
2329         }
2330     }
2331     return 0;
2332 }
2333
2334 static void write_slice_end(MpegEncContext *s){
2335     if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2336         if(s->partitioned_frame){
2337             ff_mpeg4_merge_partitions(s);
2338         }
2339
2340         ff_mpeg4_stuffing(&s->pb);
2341     }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2342         ff_mjpeg_encode_stuffing(&s->pb);
2343     }
2344
2345     avpriv_align_put_bits(&s->pb);
2346     flush_put_bits(&s->pb);
2347
2348     if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
2349         s->misc_bits+= get_bits_diff(s);
2350 }
2351
2352 static void write_mb_info(MpegEncContext *s)
2353 {
2354     uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2355     int offset = put_bits_count(&s->pb);
2356     int mba  = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2357     int gobn = s->mb_y / s->gob_index;
2358     int pred_x, pred_y;
2359     if (CONFIG_H263_ENCODER)
2360         ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2361     bytestream_put_le32(&ptr, offset);
2362     bytestream_put_byte(&ptr, s->qscale);
2363     bytestream_put_byte(&ptr, gobn);
2364     bytestream_put_le16(&ptr, mba);
2365     bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2366     bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2367     /* 4MV not implemented */
2368     bytestream_put_byte(&ptr, 0); /* hmv2 */
2369     bytestream_put_byte(&ptr, 0); /* vmv2 */
2370 }
2371
2372 static void update_mb_info(MpegEncContext *s, int startcode)
2373 {
2374     if (!s->mb_info)
2375         return;
2376     if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2377         s->mb_info_size += 12;
2378         s->prev_mb_info = s->last_mb_info;
2379     }
2380     if (startcode) {
2381         s->prev_mb_info = put_bits_count(&s->pb)/8;
2382         /* This might have incremented mb_info_size above, and we return without
2383          * actually writing any info into that slot yet. But in that case,
2384          * this will be called again at the start of the after writing the
2385          * start code, actually writing the mb info. */
2386         return;
2387     }
2388
2389     s->last_mb_info = put_bits_count(&s->pb)/8;
2390     if (!s->mb_info_size)
2391         s->mb_info_size += 12;
2392     write_mb_info(s);
2393 }
2394
2395 static int encode_thread(AVCodecContext *c, void *arg){
2396     MpegEncContext *s= *(void**)arg;
2397     int mb_x, mb_y, pdif = 0;
2398     int chr_h= 16>>s->chroma_y_shift;
2399     int i, j;
2400     MpegEncContext best_s, backup_s;
2401     uint8_t bit_buf[2][MAX_MB_BYTES];
2402     uint8_t bit_buf2[2][MAX_MB_BYTES];
2403     uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2404     PutBitContext pb[2], pb2[2], tex_pb[2];
2405 //printf("%d->%d\n", s->resync_mb_y, s->end_mb_y);
2406
2407     ff_check_alignment();
2408
2409     for(i=0; i<2; i++){
2410         init_put_bits(&pb    [i], bit_buf    [i], MAX_MB_BYTES);
2411         init_put_bits(&pb2   [i], bit_buf2   [i], MAX_MB_BYTES);
2412         init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2413     }
2414
2415     s->last_bits= put_bits_count(&s->pb);
2416     s->mv_bits=0;
2417     s->misc_bits=0;
2418     s->i_tex_bits=0;
2419     s->p_tex_bits=0;
2420     s->i_count=0;
2421     s->f_count=0;
2422     s->b_count=0;
2423     s->skip_count=0;
2424
2425     for(i=0; i<3; i++){
2426         /* init last dc values */
2427         /* note: quant matrix value (8) is implied here */
2428         s->last_dc[i] = 128 << s->intra_dc_precision;
2429
2430         s->current_picture.f.error[i] = 0;
2431     }
2432     s->mb_skip_run = 0;
2433     memset(s->last_mv, 0, sizeof(s->last_mv));
2434
2435     s->last_mv_dir = 0;
2436
2437     switch(s->codec_id){
2438     case AV_CODEC_ID_H263:
2439     case AV_CODEC_ID_H263P:
2440     case AV_CODEC_ID_FLV1:
2441         if (CONFIG_H263_ENCODER)
2442             s->gob_index = ff_h263_get_gob_height(s);
2443         break;
2444     case AV_CODEC_ID_MPEG4:
2445         if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2446             ff_mpeg4_init_partitions(s);
2447         break;
2448     }
2449
2450     s->resync_mb_x=0;
2451     s->resync_mb_y=0;
2452     s->first_slice_line = 1;
2453     s->ptr_lastgob = s->pb.buf;
2454     for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2455 //    printf("row %d at %X\n", s->mb_y, (int)s);
2456         s->mb_x=0;
2457         s->mb_y= mb_y;
2458
2459         ff_set_qscale(s, s->qscale);
2460         ff_init_block_index(s);
2461
2462         for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2463             int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2464             int mb_type= s->mb_type[xy];
2465 //            int d;
2466             int dmin= INT_MAX;
2467             int dir;
2468
2469             if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2470                 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2471                 return -1;
2472             }
2473             if(s->data_partitioning){
2474                 if(   s->pb2   .buf_end - s->pb2   .buf - (put_bits_count(&s->    pb2)>>3) < MAX_MB_BYTES
2475                    || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2476                     av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2477                     return -1;
2478                 }
2479             }
2480
2481             s->mb_x = mb_x;
2482             s->mb_y = mb_y;  // moved into loop, can get changed by H.261
2483             ff_update_block_index(s);
2484
2485             if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2486                 ff_h261_reorder_mb_index(s);
2487                 xy= s->mb_y*s->mb_stride + s->mb_x;
2488                 mb_type= s->mb_type[xy];
2489             }
2490
2491             /* write gob / video packet header  */
2492             if(s->rtp_mode){
2493                 int current_packet_size, is_gob_start;
2494
2495                 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2496
2497                 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2498
2499                 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2500
2501                 switch(s->codec_id){
2502                 case AV_CODEC_ID_H263:
2503                 case AV_CODEC_ID_H263P:
2504                     if(!s->h263_slice_structured)
2505                         if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2506                     break;
2507                 case AV_CODEC_ID_MPEG2VIDEO:
2508                     if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2509                 case AV_CODEC_ID_MPEG1VIDEO:
2510                     if(s->mb_skip_run) is_gob_start=0;
2511                     break;
2512                 }
2513
2514                 if(is_gob_start){
2515                     if(s->start_mb_y != mb_y || mb_x!=0){
2516                         write_slice_end(s);
2517
2518                         if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2519                             ff_mpeg4_init_partitions(s);
2520                         }
2521                     }
2522
2523                     assert((put_bits_count(&s->pb)&7) == 0);
2524                     current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2525
2526                     if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){
2527                         int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2528                         int d= 100 / s->avctx->error_rate;
2529                         if(r % d == 0){
2530                             current_packet_size=0;
2531                             s->pb.buf_ptr= s->ptr_lastgob;
2532                             assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2533                         }
2534                     }
2535
2536                     if (s->avctx->rtp_callback){
2537                         int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2538                         s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2539                     }
2540                     update_mb_info(s, 1);
2541
2542                     switch(s->codec_id){
2543                     case AV_CODEC_ID_MPEG4:
2544                         if (CONFIG_MPEG4_ENCODER) {
2545                             ff_mpeg4_encode_video_packet_header(s);
2546                             ff_mpeg4_clean_buffers(s);
2547                         }
2548                     break;
2549                     case AV_CODEC_ID_MPEG1VIDEO:
2550                     case AV_CODEC_ID_MPEG2VIDEO:
2551                         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2552                             ff_mpeg1_encode_slice_header(s);
2553                             ff_mpeg1_clean_buffers(s);
2554                         }
2555                     break;
2556                     case AV_CODEC_ID_H263:
2557                     case AV_CODEC_ID_H263P:
2558                         if (CONFIG_H263_ENCODER)
2559                             ff_h263_encode_gob_header(s, mb_y);
2560                     break;
2561                     }
2562
2563                     if(s->flags&CODEC_FLAG_PASS1){
2564                         int bits= put_bits_count(&s->pb);
2565                         s->misc_bits+= bits - s->last_bits;
2566                         s->last_bits= bits;
2567                     }
2568
2569                     s->ptr_lastgob += current_packet_size;
2570                     s->first_slice_line=1;
2571                     s->resync_mb_x=mb_x;
2572                     s->resync_mb_y=mb_y;
2573                 }
2574             }
2575
2576             if(  (s->resync_mb_x   == s->mb_x)
2577                && s->resync_mb_y+1 == s->mb_y){
2578                 s->first_slice_line=0;
2579             }
2580
2581             s->mb_skipped=0;
2582             s->dquant=0; //only for QP_RD
2583
2584             update_mb_info(s, 0);
2585
2586             if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2587                 int next_block=0;
2588                 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2589
2590                 copy_context_before_encode(&backup_s, s, -1);
2591                 backup_s.pb= s->pb;
2592                 best_s.data_partitioning= s->data_partitioning;
2593                 best_s.partitioned_frame= s->partitioned_frame;
2594                 if(s->data_partitioning){
2595                     backup_s.pb2= s->pb2;
2596                     backup_s.tex_pb= s->tex_pb;
2597                 }
2598
2599                 if(mb_type&CANDIDATE_MB_TYPE_INTER){
2600                     s->mv_dir = MV_DIR_FORWARD;
2601                     s->mv_type = MV_TYPE_16X16;
2602                     s->mb_intra= 0;
2603                     s->mv[0][0][0] = s->p_mv_table[xy][0];
2604                     s->mv[0][0][1] = s->p_mv_table[xy][1];
2605                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2606                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2607                 }
2608                 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2609                     s->mv_dir = MV_DIR_FORWARD;
2610                     s->mv_type = MV_TYPE_FIELD;
2611                     s->mb_intra= 0;
2612                     for(i=0; i<2; i++){
2613                         j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2614                         s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2615                         s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2616                     }
2617                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2618                                  &dmin, &next_block, 0, 0);
2619                 }
2620                 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2621                     s->mv_dir = MV_DIR_FORWARD;
2622                     s->mv_type = MV_TYPE_16X16;
2623                     s->mb_intra= 0;
2624                     s->mv[0][0][0] = 0;
2625                     s->mv[0][0][1] = 0;
2626                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2627                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2628                 }
2629                 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
2630                     s->mv_dir = MV_DIR_FORWARD;
2631                     s->mv_type = MV_TYPE_8X8;
2632                     s->mb_intra= 0;
2633                     for(i=0; i<4; i++){
2634                         s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
2635                         s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
2636                     }
2637                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
2638                                  &dmin, &next_block, 0, 0);
2639                 }
2640                 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
2641                     s->mv_dir = MV_DIR_FORWARD;
2642                     s->mv_type = MV_TYPE_16X16;
2643                     s->mb_intra= 0;
2644                     s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2645                     s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2646                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
2647                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2648                 }
2649                 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
2650                     s->mv_dir = MV_DIR_BACKWARD;
2651                     s->mv_type = MV_TYPE_16X16;
2652                     s->mb_intra= 0;
2653                     s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2654                     s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2655                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
2656                                  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
2657                 }
2658                 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
2659                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2660                     s->mv_type = MV_TYPE_16X16;
2661                     s->mb_intra= 0;
2662                     s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2663                     s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2664                     s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2665                     s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2666                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
2667                                  &dmin, &next_block, 0, 0);
2668                 }
2669                 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
2670                     s->mv_dir = MV_DIR_FORWARD;
2671                     s->mv_type = MV_TYPE_FIELD;
2672                     s->mb_intra= 0;
2673                     for(i=0; i<2; i++){
2674                         j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2675                         s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2676                         s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2677                     }
2678                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
2679                                  &dmin, &next_block, 0, 0);
2680                 }
2681                 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
2682                     s->mv_dir = MV_DIR_BACKWARD;
2683                     s->mv_type = MV_TYPE_FIELD;
2684                     s->mb_intra= 0;
2685                     for(i=0; i<2; i++){
2686                         j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2687                         s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2688                         s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2689                     }
2690                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
2691                                  &dmin, &next_block, 0, 0);
2692                 }
2693                 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
2694                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2695                     s->mv_type = MV_TYPE_FIELD;
2696                     s->mb_intra= 0;
2697                     for(dir=0; dir<2; dir++){
2698                         for(i=0; i<2; i++){
2699                             j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2700                             s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2701                             s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2702                         }
2703                     }
2704                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
2705                                  &dmin, &next_block, 0, 0);
2706                 }
2707                 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
2708                     s->mv_dir = 0;
2709                     s->mv_type = MV_TYPE_16X16;
2710                     s->mb_intra= 1;
2711                     s->mv[0][0][0] = 0;
2712                     s->mv[0][0][1] = 0;
2713                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
2714                                  &dmin, &next_block, 0, 0);
2715                     if(s->h263_pred || s->h263_aic){
2716                         if(best_s.mb_intra)
2717                             s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
2718                         else
2719                             ff_clean_intra_table_entries(s); //old mode?
2720                     }
2721                 }
2722
2723                 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
2724                     if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
2725                         const int last_qp= backup_s.qscale;
2726                         int qpi, qp, dc[6];
2727                         DCTELEM ac[6][16];
2728                         const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
2729                         static const int dquant_tab[4]={-1,1,-2,2};
2730
2731                         assert(backup_s.dquant == 0);
2732
2733                         //FIXME intra
2734                         s->mv_dir= best_s.mv_dir;
2735                         s->mv_type = MV_TYPE_16X16;
2736                         s->mb_intra= best_s.mb_intra;
2737                         s->mv[0][0][0] = best_s.mv[0][0][0];
2738                         s->mv[0][0][1] = best_s.mv[0][0][1];
2739                         s->mv[1][0][0] = best_s.mv[1][0][0];
2740                         s->mv[1][0][1] = best_s.mv[1][0][1];
2741
2742                         qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
2743                         for(; qpi<4; qpi++){
2744                             int dquant= dquant_tab[qpi];
2745                             qp= last_qp + dquant;
2746                             if(qp < s->avctx->qmin || qp > s->avctx->qmax)
2747                                 continue;
2748                             backup_s.dquant= dquant;
2749                             if(s->mb_intra && s->dc_val[0]){
2750                                 for(i=0; i<6; i++){
2751                                     dc[i]= s->dc_val[0][ s->block_index[i] ];
2752                                     memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(DCTELEM)*16);
2753                                 }
2754                             }
2755
2756                             encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2757                                          &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
2758                             if(best_s.qscale != qp){
2759                                 if(s->mb_intra && s->dc_val[0]){
2760                                     for(i=0; i<6; i++){
2761                                         s->dc_val[0][ s->block_index[i] ]= dc[i];
2762                                         memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(DCTELEM)*16);
2763                                     }
2764                                 }
2765                             }
2766                         }
2767                     }
2768                 }
2769                 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
2770                     int mx= s->b_direct_mv_table[xy][0];
2771                     int my= s->b_direct_mv_table[xy][1];
2772
2773                     backup_s.dquant = 0;
2774                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2775                     s->mb_intra= 0;
2776                     ff_mpeg4_set_direct_mv(s, mx, my);
2777                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2778                                  &dmin, &next_block, mx, my);
2779                 }
2780                 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
2781                     backup_s.dquant = 0;
2782                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2783                     s->mb_intra= 0;
2784                     ff_mpeg4_set_direct_mv(s, 0, 0);
2785                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2786                                  &dmin, &next_block, 0, 0);
2787                 }
2788                 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
2789                     int coded=0;
2790                     for(i=0; i<6; i++)
2791                         coded |= s->block_last_index[i];
2792                     if(coded){
2793                         int mx,my;
2794                         memcpy(s->mv, best_s.mv, sizeof(s->mv));
2795                         if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
2796                             mx=my=0; //FIXME find the one we actually used
2797                             ff_mpeg4_set_direct_mv(s, mx, my);
2798                         }else if(best_s.mv_dir&MV_DIR_BACKWARD){
2799                             mx= s->mv[1][0][0];
2800                             my= s->mv[1][0][1];
2801                         }else{
2802                             mx= s->mv[0][0][0];
2803                             my= s->mv[0][0][1];
2804                         }
2805
2806                         s->mv_dir= best_s.mv_dir;
2807                         s->mv_type = best_s.mv_type;
2808                         s->mb_intra= 0;
2809 /*                        s->mv[0][0][0] = best_s.mv[0][0][0];
2810                         s->mv[0][0][1] = best_s.mv[0][0][1];
2811                         s->mv[1][0][0] = best_s.mv[1][0][0];
2812                         s->mv[1][0][1] = best_s.mv[1][0][1];*/
2813                         backup_s.dquant= 0;
2814                         s->skipdct=1;
2815                         encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2816                                         &dmin, &next_block, mx, my);
2817                         s->skipdct=0;
2818                     }
2819                 }
2820
2821                 s->current_picture.f.qscale_table[xy] = best_s.qscale;
2822
2823                 copy_context_after_encode(s, &best_s, -1);
2824
2825                 pb_bits_count= put_bits_count(&s->pb);
2826                 flush_put_bits(&s->pb);
2827                 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
2828                 s->pb= backup_s.pb;
2829
2830                 if(s->data_partitioning){
2831                     pb2_bits_count= put_bits_count(&s->pb2);
2832                     flush_put_bits(&s->pb2);
2833                     avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
2834                     s->pb2= backup_s.pb2;
2835
2836                     tex_pb_bits_count= put_bits_count(&s->tex_pb);
2837                     flush_put_bits(&s->tex_pb);
2838                     avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
2839                     s->tex_pb= backup_s.tex_pb;
2840                 }
2841                 s->last_bits= put_bits_count(&s->pb);
2842
2843                 if (CONFIG_H263_ENCODER &&
2844                     s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2845                     ff_h263_update_motion_val(s);
2846
2847                 if(next_block==0){ //FIXME 16 vs linesize16
2848                     s->dsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad                     , s->linesize  ,16);
2849                     s->dsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize    , s->uvlinesize, 8);
2850                     s->dsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
2851                 }
2852
2853                 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
2854                     ff_MPV_decode_mb(s, s->block);
2855             } else {
2856                 int motion_x = 0, motion_y = 0;
2857                 s->mv_type=MV_TYPE_16X16;
2858                 // only one MB-Type possible
2859
2860                 switch(mb_type){
2861                 case CANDIDATE_MB_TYPE_INTRA:
2862                     s->mv_dir = 0;
2863                     s->mb_intra= 1;
2864                     motion_x= s->mv[0][0][0] = 0;
2865                     motion_y= s->mv[0][0][1] = 0;
2866                     break;
2867                 case CANDIDATE_MB_TYPE_INTER:
2868                     s->mv_dir = MV_DIR_FORWARD;
2869                     s->mb_intra= 0;
2870                     motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
2871                     motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
2872                     break;
2873                 case CANDIDATE_MB_TYPE_INTER_I:
2874                     s->mv_dir = MV_DIR_FORWARD;
2875                     s->mv_type = MV_TYPE_FIELD;
2876                     s->mb_intra= 0;
2877                     for(i=0; i<2; i++){
2878                         j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2879                         s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2880                         s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2881                     }
2882                     break;
2883                 case CANDIDATE_MB_TYPE_INTER4V:
2884                     s->mv_dir = MV_DIR_FORWARD;
2885                     s->mv_type = MV_TYPE_8X8;
2886                     s->mb_intra= 0;
2887                     for(i=0; i<4; i++){
2888                         s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
2889                         s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
2890                     }
2891                     break;
2892                 case CANDIDATE_MB_TYPE_DIRECT:
2893                     if (CONFIG_MPEG4_ENCODER) {
2894                         s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2895                         s->mb_intra= 0;
2896                         motion_x=s->b_direct_mv_table[xy][0];
2897                         motion_y=s->b_direct_mv_table[xy][1];
2898                         ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
2899                     }
2900                     break;
2901                 case CANDIDATE_MB_TYPE_DIRECT0:
2902                     if (CONFIG_MPEG4_ENCODER) {
2903                         s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2904                         s->mb_intra= 0;
2905                         ff_mpeg4_set_direct_mv(s, 0, 0);
2906                     }
2907                     break;
2908                 case CANDIDATE_MB_TYPE_BIDIR:
2909                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2910                     s->mb_intra= 0;
2911                     s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2912                     s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2913                     s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2914                     s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2915                     break;
2916                 case CANDIDATE_MB_TYPE_BACKWARD:
2917                     s->mv_dir = MV_DIR_BACKWARD;
2918                     s->mb_intra= 0;
2919                     motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2920                     motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2921                     break;
2922                 case CANDIDATE_MB_TYPE_FORWARD:
2923                     s->mv_dir = MV_DIR_FORWARD;
2924                     s->mb_intra= 0;
2925                     motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2926                     motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2927 //                    printf(" %d %d ", motion_x, motion_y);
2928                     break;
2929                 case CANDIDATE_MB_TYPE_FORWARD_I:
2930                     s->mv_dir = MV_DIR_FORWARD;
2931                     s->mv_type = MV_TYPE_FIELD;
2932                     s->mb_intra= 0;
2933                     for(i=0; i<2; i++){
2934                         j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2935                         s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2936                         s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2937                     }
2938                     break;
2939                 case CANDIDATE_MB_TYPE_BACKWARD_I:
2940                     s->mv_dir = MV_DIR_BACKWARD;
2941                     s->mv_type = MV_TYPE_FIELD;
2942                     s->mb_intra= 0;
2943                     for(i=0; i<2; i++){
2944                         j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2945                         s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2946                         s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2947                     }
2948                     break;
2949                 case CANDIDATE_MB_TYPE_BIDIR_I:
2950                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2951                     s->mv_type = MV_TYPE_FIELD;
2952                     s->mb_intra= 0;
2953                     for(dir=0; dir<2; dir++){
2954                         for(i=0; i<2; i++){
2955                             j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2956                             s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2957                             s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2958                         }
2959                     }
2960                     break;
2961                 default:
2962                     av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
2963                 }
2964
2965                 encode_mb(s, motion_x, motion_y);
2966
2967                 // RAL: Update last macroblock type
2968                 s->last_mv_dir = s->mv_dir;
2969
2970                 if (CONFIG_H263_ENCODER &&
2971                     s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2972                     ff_h263_update_motion_val(s);
2973
2974                 ff_MPV_decode_mb(s, s->block);
2975             }
2976
2977             /* clean the MV table in IPS frames for direct mode in B frames */
2978             if(s->mb_intra /* && I,P,S_TYPE */){
2979                 s->p_mv_table[xy][0]=0;
2980                 s->p_mv_table[xy][1]=0;
2981             }
2982
2983             if(s->flags&CODEC_FLAG_PSNR){
2984                 int w= 16;
2985                 int h= 16;
2986
2987                 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2988                 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2989
2990                 s->current_picture.f.error[0] += sse(
2991                     s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
2992                     s->dest[0], w, h, s->linesize);
2993                 s->current_picture.f.error[1] += sse(
2994                     s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*chr_h,
2995                     s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2996                 s->current_picture.f.error[2] += sse(
2997                     s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*chr_h,
2998                     s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2999             }
3000             if(s->loop_filter){
3001                 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3002                     ff_h263_loop_filter(s);
3003             }
3004 //printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_stride, put_bits_count(&s->pb));
3005         }
3006     }
3007
3008     //not beautiful here but we must write it before flushing so it has to be here
3009     if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3010         ff_msmpeg4_encode_ext_header(s);
3011
3012     write_slice_end(s);
3013
3014     /* Send the last GOB if RTP */
3015     if (s->avctx->rtp_callback) {
3016         int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3017         pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3018         /* Call the RTP callback to send the last GOB */
3019         emms_c();
3020         s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3021     }
3022
3023     return 0;
3024 }
3025
3026 #define MERGE(field) dst->field += src->field; src->field=0
3027 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3028     MERGE(me.scene_change_score);
3029     MERGE(me.mc_mb_var_sum_temp);
3030     MERGE(me.mb_var_sum_temp);
3031 }
3032
3033 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3034     int i;
3035
3036     MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3037     MERGE(dct_count[1]);
3038     MERGE(mv_bits);
3039     MERGE(i_tex_bits);
3040     MERGE(p_tex_bits);
3041     MERGE(i_count);
3042     MERGE(f_count);
3043     MERGE(b_count);
3044     MERGE(skip_count);
3045     MERGE(misc_bits);
3046     MERGE(error_count);
3047     MERGE(padding_bug_score);
3048     MERGE(current_picture.f.error[0]);
3049     MERGE(current_picture.f.error[1]);
3050     MERGE(current_picture.f.error[2]);
3051
3052     if(dst->avctx->noise_reduction){
3053         for(i=0; i<64; i++){
3054             MERGE(dct_error_sum[0][i]);
3055             MERGE(dct_error_sum[1][i]);
3056         }
3057     }
3058
3059     assert(put_bits_count(&src->pb) % 8 ==0);
3060     assert(put_bits_count(&dst->pb) % 8 ==0);
3061     avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3062     flush_put_bits(&dst->pb);
3063 }
3064
3065 static int estimate_qp(MpegEncContext *s, int dry_run){
3066     if (s->next_lambda){
3067         s->current_picture_ptr->f.quality =
3068         s->current_picture.f.quality = s->next_lambda;
3069         if(!dry_run) s->next_lambda= 0;
3070     } else if (!s->fixed_qscale) {
3071         s->current_picture_ptr->f.quality =
3072         s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run);
3073         if (s->current_picture.f.quality < 0)
3074             return -1;
3075     }
3076
3077     if(s->adaptive_quant){
3078         switch(s->codec_id){
3079         case AV_CODEC_ID_MPEG4:
3080             if (CONFIG_MPEG4_ENCODER)
3081                 ff_clean_mpeg4_qscales(s);
3082             break;
3083         case AV_CODEC_ID_H263:
3084         case AV_CODEC_ID_H263P:
3085         case AV_CODEC_ID_FLV1:
3086             if (CONFIG_H263_ENCODER)
3087                 ff_clean_h263_qscales(s);
3088             break;
3089         default:
3090             ff_init_qscale_tab(s);
3091         }
3092
3093         s->lambda= s->lambda_table[0];
3094         //FIXME broken
3095     }else
3096         s->lambda = s->current_picture.f.quality;
3097 //printf("%d %d\n", s->avctx->global_quality, s->current_picture.quality);
3098     update_qscale(s);
3099     return 0;
3100 }
3101
3102 /* must be called before writing the header */
3103 static void set_frame_distances(MpegEncContext * s){
3104     assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
3105     s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
3106
3107     if(s->pict_type==AV_PICTURE_TYPE_B){
3108         s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3109         assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3110     }else{
3111         s->pp_time= s->time - s->last_non_b_time;
3112         s->last_non_b_time= s->time;
3113         assert(s->picture_number==0 || s->pp_time > 0);
3114     }
3115 }
3116
3117 static int encode_picture(MpegEncContext *s, int picture_number)
3118 {
3119     int i;
3120     int bits;
3121     int context_count = s->slice_context_count;
3122
3123     s->picture_number = picture_number;
3124
3125     /* Reset the average MB variance */
3126     s->me.mb_var_sum_temp    =
3127     s->me.mc_mb_var_sum_temp = 0;
3128
3129     /* we need to initialize some time vars before we can encode b-frames */
3130     // RAL: Condition added for MPEG1VIDEO
3131     if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3132         set_frame_distances(s);
3133     if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3134         ff_set_mpeg4_time(s);
3135
3136     s->me.scene_change_score=0;
3137
3138 //    s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3139
3140     if(s->pict_type==AV_PICTURE_TYPE_I){
3141         if(s->msmpeg4_version >= 3) s->no_rounding=1;
3142         else                        s->no_rounding=0;
3143     }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3144         if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3145             s->no_rounding ^= 1;
3146     }
3147
3148     if(s->flags & CODEC_FLAG_PASS2){
3149         if (estimate_qp(s,1) < 0)
3150             return -1;
3151         ff_get_2pass_fcode(s);
3152     }else if(!(s->flags & CODEC_FLAG_QSCALE)){
3153         if(s->pict_type==AV_PICTURE_TYPE_B)
3154             s->lambda= s->last_lambda_for[s->pict_type];
3155         else
3156             s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3157         update_qscale(s);
3158     }
3159
3160     s->mb_intra=0; //for the rate distortion & bit compare functions
3161     for(i=1; i<context_count; i++){
3162         ff_update_duplicate_context(s->thread_context[i], s);
3163     }
3164
3165     if(ff_init_me(s)<0)
3166         return -1;
3167
3168     /* Estimate motion for every MB */
3169     if(s->pict_type != AV_PICTURE_TYPE_I){
3170         s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3171         s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3172         if(s->pict_type != AV_PICTURE_TYPE_B && s->avctx->me_threshold==0){
3173             if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3174                 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3175             }
3176         }
3177
3178         s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3179     }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3180         /* I-Frame */
3181         for(i=0; i<s->mb_stride*s->mb_height; i++)
3182             s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3183
3184         if(!s->fixed_qscale){
3185             /* finding spatial complexity for I-frame rate control */
3186             s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3187         }
3188     }
3189     for(i=1; i<context_count; i++){
3190         merge_context_after_me(s, s->thread_context[i]);
3191     }
3192     s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3193     s->current_picture.   mb_var_sum= s->current_picture_ptr->   mb_var_sum= s->me.   mb_var_sum_temp;
3194     emms_c();
3195
3196     if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
3197         s->pict_type= AV_PICTURE_TYPE_I;
3198         for(i=0; i<s->mb_stride*s->mb_height; i++)
3199             s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3200 //printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3201     }
3202
3203     if(!s->umvplus){
3204         if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3205             s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3206
3207             if(s->flags & CODEC_FLAG_INTERLACED_ME){
3208                 int a,b;
3209                 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3210                 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3211                 s->f_code= FFMAX3(s->f_code, a, b);
3212             }
3213
3214             ff_fix_long_p_mvs(s);
3215             ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3216             if(s->flags & CODEC_FLAG_INTERLACED_ME){
3217                 int j;
3218                 for(i=0; i<2; i++){
3219                     for(j=0; j<2; j++)
3220                         ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3221                                         s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3222                 }
3223             }
3224         }
3225
3226         if(s->pict_type==AV_PICTURE_TYPE_B){
3227             int a, b;
3228
3229             a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3230             b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3231             s->f_code = FFMAX(a, b);
3232
3233             a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3234             b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3235             s->b_code = FFMAX(a, b);
3236
3237             ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3238             ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3239             ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3240             ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3241             if(s->flags & CODEC_FLAG_INTERLACED_ME){
3242                 int dir, j;
3243                 for(dir=0; dir<2; dir++){
3244                     for(i=0; i<2; i++){
3245                         for(j=0; j<2; j++){
3246                             int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3247                                           : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3248                             ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3249                                             s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3250                         }
3251                     }
3252                 }
3253             }
3254         }
3255     }
3256
3257     if (estimate_qp(s, 0) < 0)
3258         return -1;
3259
3260     if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
3261         s->qscale= 3; //reduce clipping problems
3262
3263     if (s->out_format == FMT_MJPEG) {
3264         /* for mjpeg, we do include qscale in the matrix */
3265         for(i=1;i<64;i++){
3266             int j= s->dsp.idct_permutation[i];
3267
3268             s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3269         }
3270         s->y_dc_scale_table=
3271         s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3272         s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3273         ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
3274                        s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3275         s->qscale= 8;
3276     }
3277
3278     //FIXME var duplication
3279     s->current_picture_ptr->f.key_frame =
3280     s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3281     s->current_picture_ptr->f.pict_type =
3282     s->current_picture.f.pict_type = s->pict_type;
3283
3284     if (s->current_picture.f.key_frame)
3285         s->picture_in_gop_number=0;
3286
3287     s->last_bits= put_bits_count(&s->pb);
3288     switch(s->out_format) {
3289     case FMT_MJPEG:
3290         if (CONFIG_MJPEG_ENCODER)
3291             ff_mjpeg_encode_picture_header(s);
3292         break;
3293     case FMT_H261:
3294         if (CONFIG_H261_ENCODER)
3295             ff_h261_encode_picture_header(s, picture_number);
3296         break;
3297     case FMT_H263:
3298         if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3299             ff_wmv2_encode_picture_header(s, picture_number);
3300         else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)