mpegvideo_enc: K&R cosmetics (line 1000-2000).
[ffmpeg.git] / libavcodec / mpegvideo_enc.c
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of Libav.
9  *
10  * Libav is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * Libav is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with Libav; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29
30 #include "libavutil/intmath.h"
31 #include "libavutil/mathematics.h"
32 #include "libavutil/opt.h"
33 #include "avcodec.h"
34 #include "dsputil.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
37 #include "h263.h"
38 #include "mjpegenc.h"
39 #include "msmpeg4.h"
40 #include "faandct.h"
41 #include "thread.h"
42 #include "aandcttab.h"
43 #include "flv.h"
44 #include "mpeg4video.h"
45 #include "internal.h"
46 #include <limits.h>
47
48 //#undef NDEBUG
49 //#include <assert.h>
50
51 static int encode_picture(MpegEncContext *s, int picture_number);
52 static int dct_quantize_refine(MpegEncContext *s, DCTELEM *block, int16_t *weight, DCTELEM *orig, int n, int qscale);
53 static int sse_mb(MpegEncContext *s);
54 static void denoise_dct_c(MpegEncContext *s, DCTELEM *block);
55 static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
56
57 /* enable all paranoid tests for rounding, overflows, etc... */
58 //#define PARANOID
59
60 //#define DEBUG
61
62 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
63 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
64
65 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
66                        uint16_t (*qmat16)[2][64],
67                        const uint16_t *quant_matrix,
68                        int bias, int qmin, int qmax, int intra)
69 {
70     int qscale;
71     int shift = 0;
72
73     for (qscale = qmin; qscale <= qmax; qscale++) {
74         int i;
75         if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
76             dsp->fdct == ff_jpeg_fdct_islow_10
77 #ifdef FAAN_POSTSCALE
78             || dsp->fdct == ff_faandct
79 #endif
80             ) {
81             for (i = 0; i < 64; i++) {
82                 const int j = dsp->idct_permutation[i];
83                 /* 16 <= qscale * quant_matrix[i] <= 7905
84                  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
85                  *             19952 <=              x  <= 249205026
86                  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
87                  *           3444240 >= (1 << 36) / (x) >= 275 */
88
89                 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
90                                         (qscale * quant_matrix[j]));
91             }
92         } else if (dsp->fdct == fdct_ifast
93 #ifndef FAAN_POSTSCALE
94                    || dsp->fdct == ff_faandct
95 #endif
96                    ) {
97             for (i = 0; i < 64; i++) {
98                 const int j = dsp->idct_permutation[i];
99                 /* 16 <= qscale * quant_matrix[i] <= 7905
100                  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
101                  *             19952 <=              x  <= 249205026
102                  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
103                  *           3444240 >= (1 << 36) / (x) >= 275 */
104
105                 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
106                                         (ff_aanscales[i] * qscale *
107                                          quant_matrix[j]));
108             }
109         } else {
110             for (i = 0; i < 64; i++) {
111                 const int j = dsp->idct_permutation[i];
112                 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
113                  * Assume x = qscale * quant_matrix[i]
114                  * So             16 <=              x  <= 7905
115                  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
116                  * so          32768 >= (1 << 19) / (x) >= 67 */
117                 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
118                                         (qscale * quant_matrix[j]));
119                 //qmat  [qscale][i] = (1 << QMAT_SHIFT_MMX) /
120                 //                    (qscale * quant_matrix[i]);
121                 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
122                                        (qscale * quant_matrix[j]);
123
124                 if (qmat16[qscale][0][i] == 0 ||
125                     qmat16[qscale][0][i] == 128 * 256)
126                     qmat16[qscale][0][i] = 128 * 256 - 1;
127                 qmat16[qscale][1][i] =
128                     ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
129                                 qmat16[qscale][0][i]);
130             }
131         }
132
133         for (i = intra; i < 64; i++) {
134             int64_t max = 8191;
135             if (dsp->fdct == fdct_ifast
136 #ifndef FAAN_POSTSCALE
137                 || dsp->fdct == ff_faandct
138 #endif
139                ) {
140                 max = (8191LL * ff_aanscales[i]) >> 14;
141             }
142             while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
143                 shift++;
144             }
145         }
146     }
147     if (shift) {
148         av_log(NULL, AV_LOG_INFO,
149                "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
150                QMAT_SHIFT - shift);
151     }
152 }
153
154 static inline void update_qscale(MpegEncContext *s)
155 {
156     s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
157                 (FF_LAMBDA_SHIFT + 7);
158     s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
159
160     s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
161                  FF_LAMBDA_SHIFT;
162 }
163
164 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
165 {
166     int i;
167
168     if (matrix) {
169         put_bits(pb, 1, 1);
170         for (i = 0; i < 64; i++) {
171             put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
172         }
173     } else
174         put_bits(pb, 1, 0);
175 }
176
177 /**
178  * init s->current_picture.qscale_table from s->lambda_table
179  */
180 void ff_init_qscale_tab(MpegEncContext *s)
181 {
182     int8_t * const qscale_table = s->current_picture.f.qscale_table;
183     int i;
184
185     for (i = 0; i < s->mb_num; i++) {
186         unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
187         int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
188         qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
189                                                   s->avctx->qmax);
190     }
191 }
192
193 static void copy_picture_attributes(MpegEncContext *s,
194                                     AVFrame *dst,
195                                     AVFrame *src)
196 {
197     int i;
198
199     dst->pict_type              = src->pict_type;
200     dst->quality                = src->quality;
201     dst->coded_picture_number   = src->coded_picture_number;
202     dst->display_picture_number = src->display_picture_number;
203     //dst->reference              = src->reference;
204     dst->pts                    = src->pts;
205     dst->interlaced_frame       = src->interlaced_frame;
206     dst->top_field_first        = src->top_field_first;
207
208     if (s->avctx->me_threshold) {
209         if (!src->motion_val[0])
210             av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_val not set!\n");
211         if (!src->mb_type)
212             av_log(s->avctx, AV_LOG_ERROR, "AVFrame.mb_type not set!\n");
213         if (!src->ref_index[0])
214             av_log(s->avctx, AV_LOG_ERROR, "AVFrame.ref_index not set!\n");
215         if (src->motion_subsample_log2 != dst->motion_subsample_log2)
216             av_log(s->avctx, AV_LOG_ERROR,
217                    "AVFrame.motion_subsample_log2 doesn't match! (%d!=%d)\n",
218                    src->motion_subsample_log2, dst->motion_subsample_log2);
219
220         memcpy(dst->mb_type, src->mb_type,
221                s->mb_stride * s->mb_height * sizeof(dst->mb_type[0]));
222
223         for (i = 0; i < 2; i++) {
224             int stride = ((16 * s->mb_width ) >>
225                           src->motion_subsample_log2) + 1;
226             int height = ((16 * s->mb_height) >> src->motion_subsample_log2);
227
228             if (src->motion_val[i] &&
229                 src->motion_val[i] != dst->motion_val[i]) {
230                 memcpy(dst->motion_val[i], src->motion_val[i],
231                        2 * stride * height * sizeof(int16_t));
232             }
233             if (src->ref_index[i] && src->ref_index[i] != dst->ref_index[i]) {
234                 memcpy(dst->ref_index[i], src->ref_index[i],
235                        s->mb_stride * 4 * s->mb_height * sizeof(int8_t));
236             }
237         }
238     }
239 }
240
241 static void update_duplicate_context_after_me(MpegEncContext *dst,
242                                               MpegEncContext *src)
243 {
244 #define COPY(a) dst->a= src->a
245     COPY(pict_type);
246     COPY(current_picture);
247     COPY(f_code);
248     COPY(b_code);
249     COPY(qscale);
250     COPY(lambda);
251     COPY(lambda2);
252     COPY(picture_in_gop_number);
253     COPY(gop_picture_number);
254     COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
255     COPY(progressive_frame);    // FIXME don't set in encode_header
256     COPY(partitioned_frame);    // FIXME don't set in encode_header
257 #undef COPY
258 }
259
260 /**
261  * Set the given MpegEncContext to defaults for encoding.
262  * the changed fields will not depend upon the prior state of the MpegEncContext.
263  */
264 static void MPV_encode_defaults(MpegEncContext *s)
265 {
266     int i;
267     MPV_common_defaults(s);
268
269     for (i = -16; i < 16; i++) {
270         default_fcode_tab[i + MAX_MV] = 1;
271     }
272     s->me.mv_penalty = default_mv_penalty;
273     s->fcode_tab     = default_fcode_tab;
274 }
275
276 /* init video encoder */
277 av_cold int MPV_encode_init(AVCodecContext *avctx)
278 {
279     MpegEncContext *s = avctx->priv_data;
280     int i;
281     int chroma_h_shift, chroma_v_shift;
282
283     MPV_encode_defaults(s);
284
285     switch (avctx->codec_id) {
286     case CODEC_ID_MPEG2VIDEO:
287         if (avctx->pix_fmt != PIX_FMT_YUV420P &&
288             avctx->pix_fmt != PIX_FMT_YUV422P) {
289             av_log(avctx, AV_LOG_ERROR,
290                    "only YUV420 and YUV422 are supported\n");
291             return -1;
292         }
293         break;
294     case CODEC_ID_LJPEG:
295         if (avctx->pix_fmt != PIX_FMT_YUVJ420P &&
296             avctx->pix_fmt != PIX_FMT_YUVJ422P &&
297             avctx->pix_fmt != PIX_FMT_YUVJ444P &&
298             avctx->pix_fmt != PIX_FMT_BGRA     &&
299             ((avctx->pix_fmt != PIX_FMT_YUV420P &&
300               avctx->pix_fmt != PIX_FMT_YUV422P &&
301               avctx->pix_fmt != PIX_FMT_YUV444P) ||
302              avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
303             av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
304             return -1;
305         }
306         break;
307     case CODEC_ID_MJPEG:
308         if (avctx->pix_fmt != PIX_FMT_YUVJ420P &&
309             avctx->pix_fmt != PIX_FMT_YUVJ422P &&
310             ((avctx->pix_fmt != PIX_FMT_YUV420P &&
311               avctx->pix_fmt != PIX_FMT_YUV422P) ||
312              avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
313             av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
314             return -1;
315         }
316         break;
317     default:
318         if (avctx->pix_fmt != PIX_FMT_YUV420P) {
319             av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
320             return -1;
321         }
322     }
323
324     switch (avctx->pix_fmt) {
325     case PIX_FMT_YUVJ422P:
326     case PIX_FMT_YUV422P:
327         s->chroma_format = CHROMA_422;
328         break;
329     case PIX_FMT_YUVJ420P:
330     case PIX_FMT_YUV420P:
331     default:
332         s->chroma_format = CHROMA_420;
333         break;
334     }
335
336     s->bit_rate = avctx->bit_rate;
337     s->width    = avctx->width;
338     s->height   = avctx->height;
339     if (avctx->gop_size > 600 &&
340         avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
341         av_log(avctx, AV_LOG_ERROR,
342                "Warning keyframe interval too large! reducing it ...\n");
343         avctx->gop_size = 600;
344     }
345     s->gop_size     = avctx->gop_size;
346     s->avctx        = avctx;
347     s->flags        = avctx->flags;
348     s->flags2       = avctx->flags2;
349     s->max_b_frames = avctx->max_b_frames;
350     s->codec_id     = avctx->codec->id;
351     s->luma_elim_threshold   = avctx->luma_elim_threshold;
352     s->chroma_elim_threshold = avctx->chroma_elim_threshold;
353     s->strict_std_compliance = avctx->strict_std_compliance;
354 #if FF_API_MPEGVIDEO_GLOBAL_OPTS
355     if (avctx->flags & CODEC_FLAG_PART)
356         s->data_partitioning = 1;
357 #endif
358     s->quarter_sample     = (avctx->flags & CODEC_FLAG_QPEL) != 0;
359     s->mpeg_quant         = avctx->mpeg_quant;
360     s->rtp_mode           = !!avctx->rtp_payload_size;
361     s->intra_dc_precision = avctx->intra_dc_precision;
362     s->user_specified_pts = AV_NOPTS_VALUE;
363
364     if (s->gop_size <= 1) {
365         s->intra_only = 1;
366         s->gop_size   = 12;
367     } else {
368         s->intra_only = 0;
369     }
370
371     s->me_method = avctx->me_method;
372
373     /* Fixed QSCALE */
374     s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
375
376     s->adaptive_quant = (s->avctx->lumi_masking ||
377                          s->avctx->dark_masking ||
378                          s->avctx->temporal_cplx_masking ||
379                          s->avctx->spatial_cplx_masking  ||
380                          s->avctx->p_masking      ||
381                          s->avctx->border_masking ||
382                          (s->flags & CODEC_FLAG_QP_RD)) &&
383                         !s->fixed_qscale;
384
385     s->loop_filter      = !!(s->flags & CODEC_FLAG_LOOP_FILTER);
386 #if FF_API_MPEGVIDEO_GLOBAL_OPTS
387     s->alternate_scan   = !!(s->flags  & CODEC_FLAG_ALT_SCAN);
388     s->intra_vlc_format = !!(s->flags2 & CODEC_FLAG2_INTRA_VLC);
389     s->q_scale_type     = !!(s->flags2 & CODEC_FLAG2_NON_LINEAR_QUANT);
390     s->obmc             = !!(s->flags  & CODEC_FLAG_OBMC);
391 #endif
392
393     if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
394         av_log(avctx, AV_LOG_ERROR,
395                "a vbv buffer size is needed, "
396                "for encoding with a maximum bitrate\n");
397         return -1;
398     }
399
400     if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
401         av_log(avctx, AV_LOG_INFO,
402                "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
403     }
404
405     if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
406         av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
407         return -1;
408     }
409
410     if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
411         av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
412         return -1;
413     }
414
415     if (avctx->rc_max_rate &&
416         avctx->rc_max_rate == avctx->bit_rate &&
417         avctx->rc_max_rate != avctx->rc_min_rate) {
418         av_log(avctx, AV_LOG_INFO,
419                "impossible bitrate constraints, this will fail\n");
420     }
421
422     if (avctx->rc_buffer_size &&
423         avctx->bit_rate * (int64_t)avctx->time_base.num >
424             avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
425         av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
426         return -1;
427     }
428
429     if (!s->fixed_qscale &&
430         avctx->bit_rate * av_q2d(avctx->time_base) >
431             avctx->bit_rate_tolerance) {
432         av_log(avctx, AV_LOG_ERROR,
433                "bitrate tolerance too small for bitrate\n");
434         return -1;
435     }
436
437     if (s->avctx->rc_max_rate &&
438         s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
439         (s->codec_id == CODEC_ID_MPEG1VIDEO ||
440          s->codec_id == CODEC_ID_MPEG2VIDEO) &&
441         90000LL * (avctx->rc_buffer_size - 1) >
442             s->avctx->rc_max_rate * 0xFFFFLL) {
443         av_log(avctx, AV_LOG_INFO,
444                "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
445                "specified vbv buffer is too large for the given bitrate!\n");
446     }
447
448     if ((s->flags & CODEC_FLAG_4MV)  && s->codec_id != CODEC_ID_MPEG4 &&
449         s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P &&
450         s->codec_id != CODEC_ID_FLV1) {
451         av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
452         return -1;
453     }
454
455     if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
456         av_log(avctx, AV_LOG_ERROR,
457                "OBMC is only supported with simple mb decision\n");
458         return -1;
459     }
460
461 #if FF_API_MPEGVIDEO_GLOBAL_OPTS
462     if (s->obmc && s->codec_id != CODEC_ID_H263 &&
463         s->codec_id != CODEC_ID_H263P) {
464         av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with H263(+)\n");
465         return -1;
466     }
467 #endif
468
469     if (s->quarter_sample && s->codec_id != CODEC_ID_MPEG4) {
470         av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
471         return -1;
472     }
473
474 #if FF_API_MPEGVIDEO_GLOBAL_OPTS
475     if (s->data_partitioning && s->codec_id != CODEC_ID_MPEG4) {
476         av_log(avctx, AV_LOG_ERROR,
477                "data partitioning not supported by codec\n");
478         return -1;
479     }
480 #endif
481
482     if (s->max_b_frames                    &&
483         s->codec_id != CODEC_ID_MPEG4      &&
484         s->codec_id != CODEC_ID_MPEG1VIDEO &&
485         s->codec_id != CODEC_ID_MPEG2VIDEO) {
486         av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
487         return -1;
488     }
489
490     if ((s->codec_id == CODEC_ID_MPEG4 ||
491          s->codec_id == CODEC_ID_H263  ||
492          s->codec_id == CODEC_ID_H263P) &&
493         (avctx->sample_aspect_ratio.num > 255 ||
494          avctx->sample_aspect_ratio.den > 255)) {
495         av_log(avctx, AV_LOG_ERROR,
496                "Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
497                avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
498         return -1;
499     }
500
501     if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME |
502                      CODEC_FLAG_ALT_SCAN)) &&
503         s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG2VIDEO) {
504         av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
505         return -1;
506     }
507
508     // FIXME mpeg2 uses that too
509     if (s->mpeg_quant && s->codec_id != CODEC_ID_MPEG4) {
510         av_log(avctx, AV_LOG_ERROR,
511                "mpeg2 style quantization not supported by codec\n");
512         return -1;
513     }
514
515     if ((s->flags & CODEC_FLAG_CBP_RD) && !avctx->trellis) {
516         av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
517         return -1;
518     }
519
520     if ((s->flags & CODEC_FLAG_QP_RD) &&
521         s->avctx->mb_decision != FF_MB_DECISION_RD) {
522         av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
523         return -1;
524     }
525
526     if (s->avctx->scenechange_threshold < 1000000000 &&
527         (s->flags & CODEC_FLAG_CLOSED_GOP)) {
528         av_log(avctx, AV_LOG_ERROR,
529                "closed gop with scene change detection are not supported yet, "
530                "set threshold to 1000000000\n");
531         return -1;
532     }
533
534     if ((s->flags2 & CODEC_FLAG2_INTRA_VLC) &&
535         s->codec_id != CODEC_ID_MPEG2VIDEO) {
536         av_log(avctx, AV_LOG_ERROR,
537                "intra vlc table not supported by codec\n");
538         return -1;
539     }
540
541     if (s->flags & CODEC_FLAG_LOW_DELAY) {
542         if (s->codec_id != CODEC_ID_MPEG2VIDEO) {
543             av_log(avctx, AV_LOG_ERROR,
544                   "low delay forcing is only available for mpeg2\n");
545             return -1;
546         }
547         if (s->max_b_frames != 0) {
548             av_log(avctx, AV_LOG_ERROR,
549                    "b frames cannot be used with low delay\n");
550             return -1;
551         }
552     }
553
554     if (s->q_scale_type == 1) {
555 #if FF_API_MPEGVIDEO_GLOBAL_OPTS
556         if (s->codec_id != CODEC_ID_MPEG2VIDEO) {
557             av_log(avctx, AV_LOG_ERROR,
558                    "non linear quant is only available for mpeg2\n");
559             return -1;
560         }
561 #endif
562         if (avctx->qmax > 12) {
563             av_log(avctx, AV_LOG_ERROR,
564                    "non linear quant only supports qmax <= 12 currently\n");
565             return -1;
566         }
567     }
568
569     if (s->avctx->thread_count > 1         &&
570         s->codec_id != CODEC_ID_MPEG4      &&
571         s->codec_id != CODEC_ID_MPEG1VIDEO &&
572         s->codec_id != CODEC_ID_MPEG2VIDEO &&
573         (s->codec_id != CODEC_ID_H263P ||
574          !(s->flags & CODEC_FLAG_H263P_SLICE_STRUCT))) {
575         av_log(avctx, AV_LOG_ERROR,
576                "multi threaded encoding not supported by codec\n");
577         return -1;
578     }
579
580     if (s->avctx->thread_count < 1) {
581         av_log(avctx, AV_LOG_ERROR,
582                "automatic thread number detection not supported by codec,"
583                "patch welcome\n");
584         return -1;
585     }
586
587     if (s->avctx->thread_count > 1)
588         s->rtp_mode = 1;
589
590     if (!avctx->time_base.den || !avctx->time_base.num) {
591         av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
592         return -1;
593     }
594
595     i = (INT_MAX / 2 + 128) >> 8;
596     if (avctx->me_threshold >= i) {
597         av_log(avctx, AV_LOG_ERROR, "me_threshold too large, max is %d\n",
598                i - 1);
599         return -1;
600     }
601     if (avctx->mb_threshold >= i) {
602         av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
603                i - 1);
604         return -1;
605     }
606
607     if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
608         av_log(avctx, AV_LOG_INFO,
609                "notice: b_frame_strategy only affects the first pass\n");
610         avctx->b_frame_strategy = 0;
611     }
612
613     i = av_gcd(avctx->time_base.den, avctx->time_base.num);
614     if (i > 1) {
615         av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
616         avctx->time_base.den /= i;
617         avctx->time_base.num /= i;
618         //return -1;
619     }
620
621     if (s->mpeg_quant || s->codec_id == CODEC_ID_MPEG1VIDEO ||
622         s->codec_id == CODEC_ID_MPEG2VIDEO || s->codec_id == CODEC_ID_MJPEG) {
623         // (a + x * 3 / 8) / x
624         s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
625         s->inter_quant_bias = 0;
626     } else {
627         s->intra_quant_bias = 0;
628         // (a - x / 4) / x
629         s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
630     }
631
632     if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
633         s->intra_quant_bias = avctx->intra_quant_bias;
634     if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
635         s->inter_quant_bias = avctx->inter_quant_bias;
636
637     avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift,
638                                   &chroma_v_shift);
639
640     if (avctx->codec_id == CODEC_ID_MPEG4 &&
641         s->avctx->time_base.den > (1 << 16) - 1) {
642         av_log(avctx, AV_LOG_ERROR,
643                "timebase %d/%d not supported by MPEG 4 standard, "
644                "the maximum admitted value for the timebase denominator "
645                "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
646                (1 << 16) - 1);
647         return -1;
648     }
649     s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
650
651     switch (avctx->codec->id) {
652     case CODEC_ID_MPEG1VIDEO:
653         s->out_format = FMT_MPEG1;
654         s->low_delay  = !!(s->flags & CODEC_FLAG_LOW_DELAY);
655         avctx->delay  = s->low_delay ? 0 : (s->max_b_frames + 1);
656         break;
657     case CODEC_ID_MPEG2VIDEO:
658         s->out_format = FMT_MPEG1;
659         s->low_delay  = !!(s->flags & CODEC_FLAG_LOW_DELAY);
660         avctx->delay  = s->low_delay ? 0 : (s->max_b_frames + 1);
661         s->rtp_mode   = 1;
662         break;
663     case CODEC_ID_LJPEG:
664     case CODEC_ID_MJPEG:
665         s->out_format = FMT_MJPEG;
666         s->intra_only = 1; /* force intra only for jpeg */
667         if (avctx->codec->id == CODEC_ID_LJPEG &&
668             avctx->pix_fmt   == PIX_FMT_BGRA) {
669             s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
670             s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
671             s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
672         } else {
673             s->mjpeg_vsample[0] = 2;
674             s->mjpeg_vsample[1] = 2 >> chroma_v_shift;
675             s->mjpeg_vsample[2] = 2 >> chroma_v_shift;
676             s->mjpeg_hsample[0] = 2;
677             s->mjpeg_hsample[1] = 2 >> chroma_h_shift;
678             s->mjpeg_hsample[2] = 2 >> chroma_h_shift;
679         }
680         if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) ||
681             ff_mjpeg_encode_init(s) < 0)
682             return -1;
683         avctx->delay = 0;
684         s->low_delay = 1;
685         break;
686     case CODEC_ID_H261:
687         if (!CONFIG_H261_ENCODER)
688             return -1;
689         if (ff_h261_get_picture_format(s->width, s->height) < 0) {
690             av_log(avctx, AV_LOG_ERROR,
691                    "The specified picture size of %dx%d is not valid for the "
692                    "H.261 codec.\nValid sizes are 176x144, 352x288\n",
693                     s->width, s->height);
694             return -1;
695         }
696         s->out_format = FMT_H261;
697         avctx->delay  = 0;
698         s->low_delay  = 1;
699         break;
700     case CODEC_ID_H263:
701         if (!CONFIG_H263_ENCODER)
702         return -1;
703         if (ff_match_2uint16(h263_format, FF_ARRAY_ELEMS(h263_format),
704                              s->width, s->height) == 8) {
705             av_log(avctx, AV_LOG_INFO,
706                    "The specified picture size of %dx%d is not valid for "
707                    "the H.263 codec.\nValid sizes are 128x96, 176x144, "
708                    "352x288, 704x576, and 1408x1152."
709                    "Try H.263+.\n", s->width, s->height);
710             return -1;
711         }
712         s->out_format = FMT_H263;
713         avctx->delay  = 0;
714         s->low_delay  = 1;
715         break;
716     case CODEC_ID_H263P:
717         s->out_format = FMT_H263;
718         s->h263_plus  = 1;
719         /* Fx */
720 #if FF_API_MPEGVIDEO_GLOBAL_OPTS
721         if (avctx->flags & CODEC_FLAG_H263P_UMV)
722             s->umvplus = 1;
723         if (avctx->flags & CODEC_FLAG_H263P_AIV)
724             s->alt_inter_vlc = 1;
725         if (avctx->flags & CODEC_FLAG_H263P_SLICE_STRUCT)
726             s->h263_slice_structured = 1;
727 #endif
728         s->h263_aic        = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
729         s->modified_quant  = s->h263_aic;
730         s->loop_filter     = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
731         s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
732
733         /* /Fx */
734         /* These are just to be sure */
735         avctx->delay = 0;
736         s->low_delay = 1;
737         break;
738     case CODEC_ID_FLV1:
739         s->out_format      = FMT_H263;
740         s->h263_flv        = 2; /* format = 1; 11-bit codes */
741         s->unrestricted_mv = 1;
742         s->rtp_mode  = 0; /* don't allow GOB */
743         avctx->delay = 0;
744         s->low_delay = 1;
745         break;
746     case CODEC_ID_RV10:
747         s->out_format = FMT_H263;
748         avctx->delay  = 0;
749         s->low_delay  = 1;
750         break;
751     case CODEC_ID_RV20:
752         s->out_format      = FMT_H263;
753         avctx->delay       = 0;
754         s->low_delay       = 1;
755         s->modified_quant  = 1;
756         s->h263_aic        = 1;
757         s->h263_plus       = 1;
758         s->loop_filter     = 1;
759         s->unrestricted_mv = 0;
760         break;
761     case CODEC_ID_MPEG4:
762         s->out_format      = FMT_H263;
763         s->h263_pred       = 1;
764         s->unrestricted_mv = 1;
765         s->low_delay       = s->max_b_frames ? 0 : 1;
766         avctx->delay       = s->low_delay ? 0 : (s->max_b_frames + 1);
767         break;
768     case CODEC_ID_MSMPEG4V2:
769         s->out_format      = FMT_H263;
770         s->h263_pred       = 1;
771         s->unrestricted_mv = 1;
772         s->msmpeg4_version = 2;
773         avctx->delay       = 0;
774         s->low_delay       = 1;
775         break;
776     case CODEC_ID_MSMPEG4V3:
777         s->out_format        = FMT_H263;
778         s->h263_pred         = 1;
779         s->unrestricted_mv   = 1;
780         s->msmpeg4_version   = 3;
781         s->flipflop_rounding = 1;
782         avctx->delay         = 0;
783         s->low_delay         = 1;
784         break;
785     case CODEC_ID_WMV1:
786         s->out_format        = FMT_H263;
787         s->h263_pred         = 1;
788         s->unrestricted_mv   = 1;
789         s->msmpeg4_version   = 4;
790         s->flipflop_rounding = 1;
791         avctx->delay         = 0;
792         s->low_delay         = 1;
793         break;
794     case CODEC_ID_WMV2:
795         s->out_format        = FMT_H263;
796         s->h263_pred         = 1;
797         s->unrestricted_mv   = 1;
798         s->msmpeg4_version   = 5;
799         s->flipflop_rounding = 1;
800         avctx->delay         = 0;
801         s->low_delay         = 1;
802         break;
803     default:
804         return -1;
805     }
806
807     avctx->has_b_frames = !s->low_delay;
808
809     s->encoding = 1;
810
811     s->progressive_frame    =
812     s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
813                                                 CODEC_FLAG_INTERLACED_ME  |
814                                                 CODEC_FLAG_ALT_SCAN));
815
816     /* init */
817     if (MPV_common_init(s) < 0)
818         return -1;
819
820     if (!s->dct_quantize)
821         s->dct_quantize = dct_quantize_c;
822     if (!s->denoise_dct)
823         s->denoise_dct  = denoise_dct_c;
824     s->fast_dct_quantize = s->dct_quantize;
825     if (avctx->trellis)
826         s->dct_quantize  = dct_quantize_trellis_c;
827
828     if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
829         s->chroma_qscale_table = ff_h263_chroma_qscale_table;
830
831     s->quant_precision = 5;
832
833     ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
834     ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
835
836     if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
837         ff_h261_encode_init(s);
838     if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
839         h263_encode_init(s);
840     if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
841         ff_msmpeg4_encode_init(s);
842     if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
843         && s->out_format == FMT_MPEG1)
844         ff_mpeg1_encode_init(s);
845
846     /* init q matrix */
847     for (i = 0; i < 64; i++) {
848         int j = s->dsp.idct_permutation[i];
849         if (CONFIG_MPEG4_ENCODER && s->codec_id == CODEC_ID_MPEG4 &&
850             s->mpeg_quant) {
851             s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
852             s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
853         } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
854             s->intra_matrix[j] =
855             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
856         } else {
857             /* mpeg1/2 */
858             s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
859             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
860         }
861         if (s->avctx->intra_matrix)
862             s->intra_matrix[j] = s->avctx->intra_matrix[i];
863         if (s->avctx->inter_matrix)
864             s->inter_matrix[j] = s->avctx->inter_matrix[i];
865     }
866
867     /* precompute matrix */
868     /* for mjpeg, we do include qscale in the matrix */
869     if (s->out_format != FMT_MJPEG) {
870         ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
871                           s->intra_matrix, s->intra_quant_bias, avctx->qmin,
872                           31, 1);
873         ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
874                           s->inter_matrix, s->inter_quant_bias, avctx->qmin,
875                           31, 0);
876     }
877
878     if (ff_rate_control_init(s) < 0)
879         return -1;
880
881     return 0;
882 }
883
884 av_cold int MPV_encode_end(AVCodecContext *avctx)
885 {
886     MpegEncContext *s = avctx->priv_data;
887
888     ff_rate_control_uninit(s);
889
890     MPV_common_end(s);
891     if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
892         s->out_format == FMT_MJPEG)
893         ff_mjpeg_encode_close(s);
894
895     av_freep(&avctx->extradata);
896
897     return 0;
898 }
899
900 static int get_sae(uint8_t *src, int ref, int stride)
901 {
902     int x,y;
903     int acc = 0;
904
905     for (y = 0; y < 16; y++) {
906         for (x = 0; x < 16; x++) {
907             acc += FFABS(src[x + y * stride] - ref);
908         }
909     }
910
911     return acc;
912 }
913
914 static int get_intra_count(MpegEncContext *s, uint8_t *src,
915                            uint8_t *ref, int stride)
916 {
917     int x, y, w, h;
918     int acc = 0;
919
920     w = s->width  & ~15;
921     h = s->height & ~15;
922
923     for (y = 0; y < h; y += 16) {
924         for (x = 0; x < w; x += 16) {
925             int offset = x + y * stride;
926             int sad  = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
927                                      16);
928             int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
929             int sae  = get_sae(src + offset, mean, stride);
930
931             acc += sae + 500 < sad;
932         }
933     }
934     return acc;
935 }
936
937
938 static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg)
939 {
940     AVFrame *pic = NULL;
941     int64_t pts;
942     int i;
943     const int encoding_delay = s->max_b_frames;
944     int direct = 1;
945
946     if (pic_arg) {
947         pts = pic_arg->pts;
948         pic_arg->display_picture_number = s->input_picture_number++;
949
950         if (pts != AV_NOPTS_VALUE) {
951             if (s->user_specified_pts != AV_NOPTS_VALUE) {
952                 int64_t time = pts;
953                 int64_t last = s->user_specified_pts;
954
955                 if (time <= last) {
956                     av_log(s->avctx, AV_LOG_ERROR,
957                            "Error, Invalid timestamp=%"PRId64", "
958                            "last=%"PRId64"\n", pts, s->user_specified_pts);
959                     return -1;
960                 }
961             }
962             s->user_specified_pts = pts;
963         } else {
964             if (s->user_specified_pts != AV_NOPTS_VALUE) {
965                 s->user_specified_pts =
966                 pts = s->user_specified_pts + 1;
967                 av_log(s->avctx, AV_LOG_INFO,
968                        "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
969                        pts);
970             } else {
971                 pts = pic_arg->display_picture_number;
972             }
973         }
974     }
975
976   if (pic_arg) {
977     if (encoding_delay && !(s->flags & CODEC_FLAG_INPUT_PRESERVED))
978         direct = 0;
979     if (pic_arg->linesize[0] != s->linesize)
980         direct = 0;
981     if (pic_arg->linesize[1] != s->uvlinesize)
982         direct = 0;
983     if (pic_arg->linesize[2] != s->uvlinesize)
984         direct = 0;
985
986     //av_log(AV_LOG_DEBUG, "%d %d %d %d\n",pic_arg->linesize[0],
987     //       pic_arg->linesize[1], s->linesize, s->uvlinesize);
988
989     if (direct) {
990         i = ff_find_unused_picture(s, 1);
991         if (i < 0)
992             return i;
993
994         pic = (AVFrame *) &s->picture[i];
995         pic->reference = 3;
996
997         for (i = 0; i < 4; i++) {
998             pic->data[i]     = pic_arg->data[i];
999             pic->linesize[i] = pic_arg->linesize[i];
1000         }
1001         if (ff_alloc_picture(s, (Picture *) pic, 1) < 0) {
1002             return -1;
1003         }
1004     } else {
1005         i = ff_find_unused_picture(s, 0);
1006         if (i < 0)
1007             return i;
1008
1009         pic = (AVFrame *) &s->picture[i];
1010         pic->reference = 3;
1011
1012         if (ff_alloc_picture(s, (Picture *) pic, 0) < 0) {
1013             return -1;
1014         }
1015
1016         if (pic->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1017             pic->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1018             pic->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1019             // empty
1020         } else {
1021             int h_chroma_shift, v_chroma_shift;
1022             avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift,
1023                                           &v_chroma_shift);
1024
1025             for (i = 0; i < 3; i++) {
1026                 int src_stride = pic_arg->linesize[i];
1027                 int dst_stride = i ? s->uvlinesize : s->linesize;
1028                 int h_shift = i ? h_chroma_shift : 0;
1029                 int v_shift = i ? v_chroma_shift : 0;
1030                 int w = s->width  >> h_shift;
1031                 int h = s->height >> v_shift;
1032                 uint8_t *src = pic_arg->data[i];
1033                 uint8_t *dst = pic->data[i];
1034
1035                 if (!s->avctx->rc_buffer_size)
1036                     dst += INPLACE_OFFSET;
1037
1038                 if (src_stride == dst_stride)
1039                     memcpy(dst, src, src_stride * h);
1040                 else {
1041                     while (h--) {
1042                         memcpy(dst, src, w);
1043                         dst += dst_stride;
1044                         src += src_stride;
1045                     }
1046                 }
1047             }
1048         }
1049     }
1050     copy_picture_attributes(s, pic, pic_arg);
1051     pic->pts = pts; // we set this here to avoid modifiying pic_arg
1052   }
1053
1054     /* shift buffer entries */
1055     for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1056         s->input_picture[i - 1] = s->input_picture[i];
1057
1058     s->input_picture[encoding_delay] = (Picture*) pic;
1059
1060     return 0;
1061 }
1062
1063 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1064 {
1065     int x, y, plane;
1066     int score = 0;
1067     int64_t score64 = 0;
1068
1069     for (plane = 0; plane < 3; plane++) {
1070         const int stride = p->f.linesize[plane];
1071         const int bw = plane ? 1 : 2;
1072         for (y = 0; y < s->mb_height * bw; y++) {
1073             for (x = 0; x < s->mb_width * bw; x++) {
1074                 int off = p->f.type == FF_BUFFER_TYPE_SHARED ? 0 : 16;
1075                 uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
1076                 uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
1077                 int v   = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1078
1079                 switch (s->avctx->frame_skip_exp) {
1080                 case 0: score    =  FFMAX(score, v);          break;
1081                 case 1: score   += FFABS(v);                  break;
1082                 case 2: score   += v * v;                     break;
1083                 case 3: score64 += FFABS(v * v * (int64_t)v); break;
1084                 case 4: score64 += v * v * (int64_t)(v * v);  break;
1085                 }
1086             }
1087         }
1088     }
1089
1090     if (score)
1091         score64 = score;
1092
1093     if (score64 < s->avctx->frame_skip_threshold)
1094         return 1;
1095     if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1096         return 1;
1097     return 0;
1098 }
1099
1100 static int estimate_best_b_count(MpegEncContext *s)
1101 {
1102     AVCodec *codec    = avcodec_find_encoder(s->avctx->codec_id);
1103     AVCodecContext *c = avcodec_alloc_context3(NULL);
1104     AVFrame input[FF_MAX_B_FRAMES + 2];
1105     const int scale = s->avctx->brd_scale;
1106     int i, j, out_size, p_lambda, b_lambda, lambda2;
1107     int outbuf_size  = s->width * s->height; // FIXME
1108     uint8_t *outbuf  = av_malloc(outbuf_size);
1109     int64_t best_rd  = INT64_MAX;
1110     int best_b_count = -1;
1111
1112     assert(scale >= 0 && scale <= 3);
1113
1114     //emms_c();
1115     //s->next_picture_ptr->quality;
1116     p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1117     //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1118     b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1119     if (!b_lambda) // FIXME we should do this somewhere else
1120         b_lambda = p_lambda;
1121     lambda2  = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1122                FF_LAMBDA_SHIFT;
1123
1124     c->width        = s->width  >> scale;
1125     c->height       = s->height >> scale;
1126     c->flags        = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR |
1127                       CODEC_FLAG_INPUT_PRESERVED /*| CODEC_FLAG_EMU_EDGE*/;
1128     c->flags       |= s->avctx->flags & CODEC_FLAG_QPEL;
1129     c->mb_decision  = s->avctx->mb_decision;
1130     c->me_cmp       = s->avctx->me_cmp;
1131     c->mb_cmp       = s->avctx->mb_cmp;
1132     c->me_sub_cmp   = s->avctx->me_sub_cmp;
1133     c->pix_fmt      = PIX_FMT_YUV420P;
1134     c->time_base    = s->avctx->time_base;
1135     c->max_b_frames = s->max_b_frames;
1136
1137     if (avcodec_open2(c, codec, NULL) < 0)
1138         return -1;
1139
1140     for (i = 0; i < s->max_b_frames + 2; i++) {
1141         int ysize = c->width * c->height;
1142         int csize = (c->width / 2) * (c->height / 2);
1143         Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1144                                                 s->next_picture_ptr;
1145
1146         avcodec_get_frame_defaults(&input[i]);
1147         input[i].data[0]     = av_malloc(ysize + 2 * csize);
1148         input[i].data[1]     = input[i].data[0] + ysize;
1149         input[i].data[2]     = input[i].data[1] + csize;
1150         input[i].linesize[0] = c->width;
1151         input[i].linesize[1] =
1152         input[i].linesize[2] = c->width / 2;
1153
1154         if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1155             pre_input = *pre_input_ptr;
1156
1157             if (pre_input.f.type != FF_BUFFER_TYPE_SHARED && i) {
1158                 pre_input.f.data[0] += INPLACE_OFFSET;
1159                 pre_input.f.data[1] += INPLACE_OFFSET;
1160                 pre_input.f.data[2] += INPLACE_OFFSET;
1161             }
1162
1163             s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0],
1164                                  pre_input.f.data[0], pre_input.f.linesize[0],
1165                                  c->width,      c->height);
1166             s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1],
1167                                  pre_input.f.data[1], pre_input.f.linesize[1],
1168                                  c->width >> 1, c->height >> 1);
1169             s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2],
1170                                  pre_input.f.data[2], pre_input.f.linesize[2],
1171                                  c->width >> 1, c->height >> 1);
1172         }
1173     }
1174
1175     for (j = 0; j < s->max_b_frames + 1; j++) {
1176         int64_t rd = 0;
1177
1178         if (!s->input_picture[j])
1179             break;
1180
1181         c->error[0] = c->error[1] = c->error[2] = 0;
1182
1183         input[0].pict_type = AV_PICTURE_TYPE_I;
1184         input[0].quality   = 1 * FF_QP2LAMBDA;
1185         out_size           = avcodec_encode_video(c, outbuf,
1186                                                   outbuf_size, &input[0]);
1187         //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1188
1189         for (i = 0; i < s->max_b_frames + 1; i++) {
1190             int is_p = i % (j + 1) == j || i == s->max_b_frames;
1191
1192             input[i + 1].pict_type = is_p ?
1193                                      AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1194             input[i + 1].quality   = is_p ? p_lambda : b_lambda;
1195             out_size = avcodec_encode_video(c, outbuf, outbuf_size,
1196                                             &input[i + 1]);
1197             rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1198         }
1199
1200         /* get the delayed frames */
1201         while (out_size) {
1202             out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
1203             rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1204         }
1205
1206         rd += c->error[0] + c->error[1] + c->error[2];
1207
1208         if (rd < best_rd) {
1209             best_rd = rd;
1210             best_b_count = j;
1211         }
1212     }
1213
1214     av_freep(&outbuf);
1215     avcodec_close(c);
1216     av_freep(&c);
1217
1218     for (i = 0; i < s->max_b_frames + 2; i++) {
1219         av_freep(&input[i].data[0]);
1220     }
1221
1222     return best_b_count;
1223 }
1224
1225 static int select_input_picture(MpegEncContext *s)
1226 {
1227     int i;
1228
1229     for (i = 1; i < MAX_PICTURE_COUNT; i++)
1230         s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1231     s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1232
1233     /* set next picture type & ordering */
1234     if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
1235         if (/*s->picture_in_gop_number >= s->gop_size ||*/
1236             s->next_picture_ptr == NULL || s->intra_only) {
1237             s->reordered_input_picture[0] = s->input_picture[0];
1238             s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I;
1239             s->reordered_input_picture[0]->f.coded_picture_number =
1240                 s->coded_picture_number++;
1241         } else {
1242             int b_frames;
1243
1244             if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1245                 if (s->picture_in_gop_number < s->gop_size &&
1246                     skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1247                     // FIXME check that te gop check above is +-1 correct
1248                     //av_log(NULL, AV_LOG_DEBUG, "skip %p %"PRId64"\n",
1249                     //       s->input_picture[0]->f.data[0],
1250                     //       s->input_picture[0]->pts);
1251
1252                     if (s->input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED) {
1253                         for (i = 0; i < 4; i++)
1254                             s->input_picture[0]->f.data[i] = NULL;
1255                         s->input_picture[0]->f.type = 0;
1256                     } else {
1257                         assert(s->input_picture[0]->f.type == FF_BUFFER_TYPE_USER ||
1258                                s->input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL);
1259
1260                         s->avctx->release_buffer(s->avctx,
1261                                                  (AVFrame *) s->input_picture[0]);
1262                     }
1263
1264                     emms_c();
1265                     ff_vbv_update(s, 0);
1266
1267                     goto no_output_pic;
1268                 }
1269             }
1270
1271             if (s->flags & CODEC_FLAG_PASS2) {
1272                 for (i = 0; i < s->max_b_frames + 1; i++) {
1273                     int pict_num = s->input_picture[0]->f.display_picture_number + i;
1274
1275                     if (pict_num >= s->rc_context.num_entries)
1276                         break;
1277                     if (!s->input_picture[i]) {
1278                         s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1279                         break;
1280                     }
1281
1282                     s->input_picture[i]->f.pict_type =
1283                         s->rc_context.entry[pict_num].new_pict_type;
1284                 }
1285             }
1286
1287             if (s->avctx->b_frame_strategy == 0) {
1288                 b_frames = s->max_b_frames;
1289                 while (b_frames && !s->input_picture[b_frames])
1290                     b_frames--;
1291             } else if (s->avctx->b_frame_strategy == 1) {
1292                 for (i = 1; i < s->max_b_frames + 1; i++) {
1293                     if (s->input_picture[i] &&
1294                         s->input_picture[i]->b_frame_score == 0) {
1295                         s->input_picture[i]->b_frame_score =
1296                             get_intra_count(s,
1297                                             s->input_picture[i    ]->f.data[0],
1298                                             s->input_picture[i - 1]->f.data[0],
1299                                             s->linesize) + 1;
1300                     }
1301                 }
1302                 for (i = 0; i < s->max_b_frames + 1; i++) {
1303                     if (s->input_picture[i] == NULL ||
1304                         s->input_picture[i]->b_frame_score - 1 >
1305                             s->mb_num / s->avctx->b_sensitivity)
1306                         break;
1307                 }
1308
1309                 b_frames = FFMAX(0, i - 1);
1310
1311                 /* reset scores */
1312                 for (i = 0; i < b_frames + 1; i++) {
1313                     s->input_picture[i]->b_frame_score = 0;
1314                 }
1315             } else if (s->avctx->b_frame_strategy == 2) {
1316                 b_frames = estimate_best_b_count(s);
1317             } else {
1318                 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1319                 b_frames = 0;
1320             }
1321
1322             emms_c();
1323             //static int b_count = 0;
1324             //b_count += b_frames;
1325             //av_log(s->avctx, AV_LOG_DEBUG, "b_frames: %d\n", b_count);
1326
1327             for (i = b_frames - 1; i >= 0; i--) {
1328                 int type = s->input_picture[i]->f.pict_type;
1329                 if (type && type != AV_PICTURE_TYPE_B)
1330                     b_frames = i;
1331             }
1332             if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
1333                 b_frames == s->max_b_frames) {
1334                 av_log(s->avctx, AV_LOG_ERROR,
1335                        "warning, too many b frames in a row\n");
1336             }
1337
1338             if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1339                 if ((s->flags2 & CODEC_FLAG2_STRICT_GOP) &&
1340                     s->gop_size > s->picture_in_gop_number) {
1341                     b_frames = s->gop_size - s->picture_in_gop_number - 1;
1342                 } else {
1343                     if (s->flags & CODEC_FLAG_CLOSED_GOP)
1344                         b_frames = 0;
1345                     s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
1346                 }
1347             }
1348
1349             if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
1350                 s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
1351                 b_frames--;
1352
1353             s->reordered_input_picture[0] = s->input_picture[b_frames];
1354             if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I)
1355                 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P;
1356             s->reordered_input_picture[0]->f.coded_picture_number =
1357                 s->coded_picture_number++;
1358             for (i = 0; i < b_frames; i++) {
1359                 s->reordered_input_picture[i + 1] = s->input_picture[i];
1360                 s->reordered_input_picture[i + 1]->f.pict_type =
1361                     AV_PICTURE_TYPE_B;
1362                 s->reordered_input_picture[i + 1]->f.coded_picture_number =
1363                     s->coded_picture_number++;
1364             }
1365         }
1366     }
1367 no_output_pic:
1368     if (s->reordered_input_picture[0]) {
1369         s->reordered_input_picture[0]->f.reference =
1370            s->reordered_input_picture[0]->f.pict_type !=
1371                AV_PICTURE_TYPE_B ? 3 : 0;
1372
1373         ff_copy_picture(&s->new_picture, s->reordered_input_picture[0]);
1374
1375         if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED ||
1376             s->avctx->rc_buffer_size) {
1377             // input is a shared pix, so we can't modifiy it -> alloc a new
1378             // one & ensure that the shared one is reuseable
1379
1380             Picture *pic;
1381             int i = ff_find_unused_picture(s, 0);
1382             if (i < 0)
1383                 return i;
1384             pic = &s->picture[i];
1385
1386             pic->f.reference = s->reordered_input_picture[0]->f.reference;
1387             if (ff_alloc_picture(s, pic, 0) < 0) {
1388                 return -1;
1389             }
1390
1391             /* mark us unused / free shared pic */
1392             if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL)
1393                 s->avctx->release_buffer(s->avctx,
1394                                          (AVFrame *) s->reordered_input_picture[0]);
1395             for (i = 0; i < 4; i++)
1396                 s->reordered_input_picture[0]->f.data[i] = NULL;
1397             s->reordered_input_picture[0]->f.type = 0;
1398
1399             copy_picture_attributes(s, (AVFrame *) pic,
1400                                     (AVFrame *) s->reordered_input_picture[0]);
1401
1402             s->current_picture_ptr = pic;
1403         } else {
1404             // input is not a shared pix -> reuse buffer for current_pix
1405
1406             assert(s->reordered_input_picture[0]->f.type ==
1407                        FF_BUFFER_TYPE_USER ||
1408                    s->reordered_input_picture[0]->f.type ==
1409                        FF_BUFFER_TYPE_INTERNAL);
1410
1411             s->current_picture_ptr = s->reordered_input_picture[0];
1412             for (i = 0; i < 4; i++) {
1413                 s->new_picture.f.data[i] += INPLACE_OFFSET;
1414             }
1415         }
1416         ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1417
1418         s->picture_number = s->new_picture.f.display_picture_number;
1419         //printf("dpn:%d\n", s->picture_number);
1420     } else {
1421         memset(&s->new_picture, 0, sizeof(Picture));
1422     }
1423     return 0;
1424 }
1425
1426 int MPV_encode_picture(AVCodecContext *avctx,
1427                        unsigned char *buf, int buf_size, void *data)
1428 {
1429     MpegEncContext *s = avctx->priv_data;
1430     AVFrame *pic_arg  = data;
1431     int i, stuffing_count, context_count = avctx->thread_count;
1432
1433     for (i = 0; i < context_count; i++) {
1434         int start_y = s->thread_context[i]->start_mb_y;
1435         int   end_y = s->thread_context[i]->  end_mb_y;
1436         int h       = s->mb_height;
1437         uint8_t *start = buf + (size_t)(((int64_t) buf_size) * start_y / h);
1438         uint8_t *end   = buf + (size_t)(((int64_t) buf_size) *   end_y / h);
1439
1440         init_put_bits(&s->thread_context[i]->pb, start, end - start);
1441     }
1442
1443     s->picture_in_gop_number++;
1444
1445     if (load_input_picture(s, pic_arg) < 0)
1446         return -1;
1447
1448     if (select_input_picture(s) < 0) {
1449         return -1;
1450     }
1451
1452     /* output? */
1453     if (s->new_picture.f.data[0]) {
1454         s->pict_type = s->new_picture.f.pict_type;
1455         //emms_c();
1456         //printf("qs:%f %f %d\n", s->new_picture.quality,
1457         //       s->current_picture.quality, s->qscale);
1458         MPV_frame_start(s, avctx);
1459 vbv_retry:
1460         if (encode_picture(s, s->picture_number) < 0)
1461             return -1;
1462
1463         avctx->header_bits = s->header_bits;
1464         avctx->mv_bits     = s->mv_bits;
1465         avctx->misc_bits   = s->misc_bits;
1466         avctx->i_tex_bits  = s->i_tex_bits;
1467         avctx->p_tex_bits  = s->p_tex_bits;
1468         avctx->i_count     = s->i_count;
1469         // FIXME f/b_count in avctx
1470         avctx->p_count     = s->mb_num - s->i_count - s->skip_count;
1471         avctx->skip_count  = s->skip_count;
1472
1473         MPV_frame_end(s);
1474
1475         if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1476             ff_mjpeg_encode_picture_trailer(s);
1477
1478         if (avctx->rc_buffer_size) {
1479             RateControlContext *rcc = &s->rc_context;
1480             int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
1481
1482             if (put_bits_count(&s->pb) > max_size &&
1483                 s->lambda < s->avctx->lmax) {
1484                 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1485                                        (s->qscale + 1) / s->qscale);
1486                 if (s->adaptive_quant) {
1487                     int i;
1488                     for (i = 0; i < s->mb_height * s->mb_stride; i++)
1489                         s->lambda_table[i] =
1490                             FFMAX(s->lambda_table[i] + 1,
1491                                   s->lambda_table[i] * (s->qscale + 1) /
1492                                   s->qscale);
1493                 }
1494                 s->mb_skipped = 0;        // done in MPV_frame_start()
1495                 // done in encode_picture() so we must undo it
1496                 if (s->pict_type == AV_PICTURE_TYPE_P) {
1497                     if (s->flipflop_rounding          ||
1498                         s->codec_id == CODEC_ID_H263P ||
1499                         s->codec_id == CODEC_ID_MPEG4)
1500                         s->no_rounding ^= 1;
1501                 }
1502                 if (s->pict_type != AV_PICTURE_TYPE_B) {
1503                     s->time_base       = s->last_time_base;
1504                     s->last_non_b_time = s->time - s->pp_time;
1505                 }
1506                 //av_log(NULL, AV_LOG_ERROR, "R:%d ", s->next_lambda);
1507                 for (i = 0; i < context_count; i++) {
1508                     PutBitContext *pb = &s->thread_context[i]->pb;
1509                     init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1510                 }
1511                 goto vbv_retry;
1512             }
1513
1514             assert(s->avctx->rc_max_rate);
1515         }
1516
1517         if (s->flags & CODEC_FLAG_PASS1)
1518             ff_write_pass1_stats(s);
1519
1520         for (i = 0; i < 4; i++) {
1521             s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i];
1522             avctx->error[i] += s->current_picture_ptr->f.error[i];
1523         }
1524
1525         if (s->flags & CODEC_FLAG_PASS1)
1526             assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1527                    avctx->i_tex_bits + avctx->p_tex_bits ==
1528                        put_bits_count(&s->pb));
1529         flush_put_bits(&s->pb);
1530         s->frame_bits  = put_bits_count(&s->pb);
1531
1532         stuffing_count = ff_vbv_update(s, s->frame_bits);
1533         if (stuffing_count) {
1534             if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1535                     stuffing_count + 50) {
1536                 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1537                 return -1;
1538             }
1539
1540             switch (s->codec_id) {
1541             case CODEC_ID_MPEG1VIDEO:
1542             case CODEC_ID_MPEG2VIDEO:
1543                 while (stuffing_count--) {
1544                     put_bits(&s->pb, 8, 0);
1545                 }
1546             break;
1547             case CODEC_ID_MPEG4:
1548                 put_bits(&s->pb, 16, 0);
1549                 put_bits(&s->pb, 16, 0x1C3);
1550                 stuffing_count -= 4;
1551                 while (stuffing_count--) {
1552                     put_bits(&s->pb, 8, 0xFF);
1553                 }
1554             break;
1555             default:
1556                 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1557             }
1558             flush_put_bits(&s->pb);
1559             s->frame_bits  = put_bits_count(&s->pb);
1560         }
1561
1562         /* update mpeg1/2 vbv_delay for CBR */
1563         if (s->avctx->rc_max_rate                          &&
1564             s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1565             s->out_format == FMT_MPEG1                     &&
1566             90000LL * (avctx->rc_buffer_size - 1) <=
1567                 s->avctx->rc_max_rate * 0xFFFFLL) {
1568             int vbv_delay, min_delay;
1569             double inbits  = s->avctx->rc_max_rate *
1570                              av_q2d(s->avctx->time_base);
1571             int    minbits = s->frame_bits - 8 *
1572                              (s->vbv_delay_ptr - s->pb.buf - 1);
1573             double bits    = s->rc_context.buffer_index + minbits - inbits;
1574
1575             if (bits < 0)
1576                 av_log(s->avctx, AV_LOG_ERROR,
1577                        "Internal error, negative bits\n");
1578
1579             assert(s->repeat_first_field == 0);
1580
1581             vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1582             min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1583                         s->avctx->rc_max_rate;
1584
1585             vbv_delay = FFMAX(vbv_delay, min_delay);
1586
1587             assert(vbv_delay < 0xFFFF);
1588
1589             s->vbv_delay_ptr[0] &= 0xF8;
1590             s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1591             s->vbv_delay_ptr[1]  = vbv_delay >> 5;
1592             s->vbv_delay_ptr[2] &= 0x07;
1593             s->vbv_delay_ptr[2] |= vbv_delay << 3;
1594             avctx->vbv_delay     = vbv_delay * 300;
1595         }
1596         s->total_bits     += s->frame_bits;
1597         avctx->frame_bits  = s->frame_bits;
1598     } else {
1599         assert((put_bits_ptr(&s->pb) == s->pb.buf));
1600         s->frame_bits = 0;
1601     }
1602     assert((s->frame_bits & 7) == 0);
1603
1604     return s->frame_bits / 8;
1605 }
1606
1607 static inline void dct_single_coeff_elimination(MpegEncContext *s,
1608                                                 int n, int threshold)
1609 {
1610     static const char tab[64] = {
1611         3, 2, 2, 1, 1, 1, 1, 1,
1612         1, 1, 1, 1, 1, 1, 1, 1,
1613         1, 1, 1, 1, 1, 1, 1, 1,
1614         0, 0, 0, 0, 0, 0, 0, 0,
1615         0, 0, 0, 0, 0, 0, 0, 0,
1616         0, 0, 0, 0, 0, 0, 0, 0,
1617         0, 0, 0, 0, 0, 0, 0, 0,
1618         0, 0, 0, 0, 0, 0, 0, 0
1619     };
1620     int score = 0;
1621     int run = 0;
1622     int i;
1623     DCTELEM *block = s->block[n];
1624     const int last_index = s->block_last_index[n];
1625     int skip_dc;
1626
1627     if (threshold < 0) {
1628         skip_dc = 0;
1629         threshold = -threshold;
1630     } else
1631         skip_dc = 1;
1632
1633     /* Are all we could set to zero already zero? */
1634     if (last_index <= skip_dc - 1)
1635         return;
1636
1637     for (i = 0; i <= last_index; i++) {
1638         const int j = s->intra_scantable.permutated[i];
1639         const int level = FFABS(block[j]);
1640         if (level == 1) {
1641             if (skip_dc && i == 0)
1642                 continue;
1643             score += tab[run];
1644             run = 0;
1645         } else if (level > 1) {
1646             return;
1647         } else {
1648             run++;
1649         }
1650     }
1651     if (score >= threshold)
1652         return;
1653     for (i = skip_dc; i <= last_index; i++) {
1654         const int j = s->intra_scantable.permutated[i];
1655         block[j] = 0;
1656     }
1657     if (block[0])
1658         s->block_last_index[n] = 0;
1659     else
1660         s->block_last_index[n] = -1;
1661 }
1662
1663 static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block,
1664                                int last_index)
1665 {
1666     int i;
1667     const int maxlevel = s->max_qcoeff;
1668     const int minlevel = s->min_qcoeff;
1669     int overflow = 0;
1670
1671     if (s->mb_intra) {
1672         i = 1; // skip clipping of intra dc
1673     } else
1674         i = 0;
1675
1676     for (; i <= last_index; i++) {
1677         const int j = s->intra_scantable.permutated[i];
1678         int level = block[j];
1679
1680         if (level > maxlevel) {
1681             level = maxlevel;
1682             overflow++;
1683         } else if (level < minlevel) {
1684             level = minlevel;
1685             overflow++;
1686         }
1687
1688         block[j] = level;
1689     }
1690
1691     if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
1692         av_log(s->avctx, AV_LOG_INFO,
1693                "warning, clipping %d dct coefficients to %d..%d\n",
1694                overflow, minlevel, maxlevel);
1695 }
1696
1697 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
1698 {
1699     int x, y;
1700     // FIXME optimize
1701     for (y = 0; y < 8; y++) {
1702         for (x = 0; x < 8; x++) {
1703             int x2, y2;
1704             int sum = 0;
1705             int sqr = 0;
1706             int count = 0;
1707
1708             for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
1709                 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
1710                     int v = ptr[x2 + y2 * stride];
1711                     sum += v;
1712                     sqr += v * v;
1713                     count++;
1714                 }
1715             }
1716             weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
1717         }
1718     }
1719 }
1720
1721 static av_always_inline void encode_mb_internal(MpegEncContext *s,
1722                                                 int motion_x, int motion_y,
1723                                                 int mb_block_height,
1724                                                 int mb_block_count)
1725 {
1726     int16_t weight[8][64];
1727     DCTELEM orig[8][64];
1728     const int mb_x = s->mb_x;
1729     const int mb_y = s->mb_y;
1730     int i;
1731     int skip_dct[8];
1732     int dct_offset = s->linesize * 8; // default for progressive frames
1733     uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1734     int wrap_y, wrap_c;
1735
1736     for (i = 0; i < mb_block_count; i++)
1737         skip_dct[i] = s->skipdct;
1738
1739     if (s->adaptive_quant) {
1740         const int last_qp = s->qscale;
1741         const int mb_xy = mb_x + mb_y * s->mb_stride;
1742
1743         s->lambda = s->lambda_table[mb_xy];
1744         update_qscale(s);
1745
1746         if (!(s->flags & CODEC_FLAG_QP_RD)) {
1747             s->qscale = s->current_picture_ptr->f.qscale_table[mb_xy];
1748             s->dquant = s->qscale - last_qp;
1749
1750             if (s->out_format == FMT_H263) {
1751                 s->dquant = av_clip(s->dquant, -2, 2);
1752
1753                 if (s->codec_id == CODEC_ID_MPEG4) {
1754                     if (!s->mb_intra) {
1755                         if (s->pict_type == AV_PICTURE_TYPE_B) {
1756                             if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
1757                                 s->dquant = 0;
1758                         }
1759                         if (s->mv_type == MV_TYPE_8X8)
1760                             s->dquant = 0;
1761                     }
1762                 }
1763             }
1764         }
1765         ff_set_qscale(s, last_qp + s->dquant);
1766     } else if (s->flags & CODEC_FLAG_QP_RD)
1767         ff_set_qscale(s, s->qscale + s->dquant);
1768
1769     wrap_y = s->linesize;
1770     wrap_c = s->uvlinesize;
1771     ptr_y  = s->new_picture.f.data[0] +
1772              (mb_y * 16 * wrap_y)              + mb_x * 16;
1773     ptr_cb = s->new_picture.f.data[1] +
1774              (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1775     ptr_cr = s->new_picture.f.data[2] +
1776              (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1777
1778     if (mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) {
1779         uint8_t *ebuf = s->edge_emu_buffer + 32;
1780         s->dsp.emulated_edge_mc(ebuf, ptr_y, wrap_y, 16, 16, mb_x * 16,
1781                                 mb_y * 16, s->width, s->height);
1782         ptr_y = ebuf;
1783         s->dsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb, wrap_c, 8,
1784                                 mb_block_height, mb_x * 8, mb_y * 8,
1785                                 s->width >> 1, s->height >> 1);
1786         ptr_cb = ebuf + 18 * wrap_y;
1787         s->dsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr, wrap_c, 8,
1788                                 mb_block_height, mb_x * 8, mb_y * 8,
1789                                 s->width >> 1, s->height >> 1);
1790         ptr_cr = ebuf + 18 * wrap_y + 8;
1791     }
1792
1793     if (s->mb_intra) {
1794         if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1795             int progressive_score, interlaced_score;
1796
1797             s->interlaced_dct = 0;
1798             progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
1799                                                     NULL, wrap_y, 8) +
1800                                 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
1801                                                     NULL, wrap_y, 8) - 400;
1802
1803             if (progressive_score > 0) {
1804                 interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
1805                                                        NULL, wrap_y * 2, 8) +
1806                                    s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
1807                                                        NULL, wrap_y * 2, 8);
1808                 if (progressive_score > interlaced_score) {
1809                     s->interlaced_dct = 1;
1810
1811                     dct_offset = wrap_y;
1812                     wrap_y <<= 1;
1813                     if (s->chroma_format == CHROMA_422)
1814                         wrap_c <<= 1;
1815                 }
1816             }
1817         }
1818
1819         s->dsp.get_pixels(s->block[0], ptr_y                  , wrap_y);
1820         s->dsp.get_pixels(s->block[1], ptr_y              + 8 , wrap_y);
1821         s->dsp.get_pixels(s->block[2], ptr_y + dct_offset     , wrap_y);
1822         s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
1823
1824         if (s->flags & CODEC_FLAG_GRAY) {
1825             skip_dct[4] = 1;
1826             skip_dct[5] = 1;
1827         } else {
1828             s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
1829             s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
1830             if (!s->chroma_y_shift) { /* 422 */
1831                 s->dsp.get_pixels(s->block[6],
1832                                   ptr_cb + (dct_offset >> 1), wrap_c);
1833                 s->dsp.get_pixels(s->block[7],
1834                                   ptr_cr + (dct_offset >> 1), wrap_c);
1835             }
1836         }
1837     } else {
1838         op_pixels_func (*op_pix)[4];
1839         qpel_mc_func (*op_qpix)[16];
1840         uint8_t *dest_y, *dest_cb, *dest_cr;
1841
1842         dest_y  = s->dest[0];
1843         dest_cb = s->dest[1];
1844         dest_cr = s->dest[2];
1845
1846         if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
1847             op_pix  = s->dsp.put_pixels_tab;
1848             op_qpix = s->dsp.put_qpel_pixels_tab;
1849         } else {
1850             op_pix  = s->dsp.put_no_rnd_pixels_tab;
1851             op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
1852         }
1853
1854         if (s->mv_dir & MV_DIR_FORWARD) {
1855             MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data,
1856                        op_pix, op_qpix);
1857             op_pix  = s->dsp.avg_pixels_tab;
1858             op_qpix = s->dsp.avg_qpel_pixels_tab;
1859         }
1860         if (s->mv_dir & MV_DIR_BACKWARD) {
1861             MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data,
1862                        op_pix, op_qpix);
1863         }
1864
1865         if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1866             int progressive_score, interlaced_score;
1867
1868             s->interlaced_dct = 0;
1869             progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
1870                                                     ptr_y,              wrap_y,
1871                                                     8) +
1872                                 s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
1873                                                     ptr_y + wrap_y * 8, wrap_y,
1874                                                     8) - 400;
1875
1876             if (s->avctx->ildct_cmp == FF_CMP_VSSE)
1877                 progressive_score -= 400;
1878
1879             if (progressive_score > 0) {
1880                 interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
1881                                                        ptr_y,
1882                                                        wrap_y * 2, 8) +
1883                                    s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
1884                                                        ptr_y + wrap_y,
1885                                                        wrap_y * 2, 8);
1886
1887                 if (progressive_score > interlaced_score) {
1888                     s->interlaced_dct = 1;
1889
1890                     dct_offset = wrap_y;
1891                     wrap_y <<= 1;
1892                     if (s->chroma_format == CHROMA_422)
1893                         wrap_c <<= 1;
1894                 }
1895             }
1896         }
1897
1898         s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
1899         s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
1900         s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
1901                            dest_y + dct_offset, wrap_y);
1902         s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
1903                            dest_y + dct_offset + 8, wrap_y);
1904
1905         if (s->flags & CODEC_FLAG_GRAY) {
1906             skip_dct[4] = 1;
1907             skip_dct[5] = 1;
1908         } else {
1909             s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
1910             s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
1911             if (!s->chroma_y_shift) { /* 422 */
1912                 s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1),
1913                                    dest_cb + (dct_offset >> 1), wrap_c);
1914                 s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1),
1915                                    dest_cr + (dct_offset >> 1), wrap_c);
1916             }
1917         }
1918         /* pre quantization */
1919         if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
1920                 2 * s->qscale * s->qscale) {
1921             // FIXME optimize
1922             if (s->dsp.sad[1](NULL, ptr_y , dest_y,
1923                               wrap_y, 8) < 20 * s->qscale)
1924                 skip_dct[0] = 1;
1925             if (s->dsp.sad[1](NULL, ptr_y + 8,
1926                               dest_y + 8, wrap_y, 8) < 20 * s->qscale)
1927                 skip_dct[1] = 1;
1928             if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
1929                               dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
1930                 skip_dct[2] = 1;
1931             if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
1932                               dest_y + dct_offset + 8,
1933                               wrap_y, 8) < 20 * s->qscale)
1934                 skip_dct[3] = 1;
1935             if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
1936                               wrap_c, 8) < 20 * s->qscale)
1937                 skip_dct[4] = 1;
1938             if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
1939                               wrap_c, 8) < 20 * s->qscale)
1940                 skip_dct[5] = 1;
1941             if (!s->chroma_y_shift) { /* 422 */
1942                 if (s->dsp.sad[1](NULL, ptr_cb + (dct_offset >> 1),
1943                                   dest_cb + (dct_offset >> 1),
1944                                   wrap_c, 8) < 20 * s->qscale)
1945                     skip_dct[6] = 1;
1946                 if (s->dsp.sad[1](NULL, ptr_cr + (dct_offset >> 1),
1947                                   dest_cr + (dct_offset >> 1),
1948                                   wrap_c, 8) < 20 * s->qscale)
1949                     skip_dct[7] = 1;
1950             }
1951         }
1952     }
1953
1954     if (s->avctx->quantizer_noise_shaping) {
1955         if (!skip_dct[0])
1956             get_visual_weight(weight[0], ptr_y                 , wrap_y);
1957         if (!skip_dct[1])
1958             get_visual_weight(weight[1], ptr_y              + 8, wrap_y);
1959         if (!skip_dct[2])
1960             get_visual_weight(weight[2], ptr_y + dct_offset    , wrap_y);
1961         if (!skip_dct[3])
1962             get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
1963         if (!skip_dct[4])
1964             get_visual_weight(weight[4], ptr_cb                , wrap_c);
1965         if (!skip_dct[5])
1966             get_visual_weight(weight[5], ptr_cr                , wrap_c);
1967         if (!s->chroma_y_shift) { /* 422 */
1968             if (!skip_dct[6])
1969                 get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1),
1970                                   wrap_c);
1971             if (!skip_dct[7])
1972                 get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1),
1973                                   wrap_c);
1974         }
1975         memcpy(orig[0], s->block[0], sizeof(DCTELEM) * 64 * mb_block_count);
1976     }
1977
1978     /* DCT & quantize */
1979     assert(s->out_format != FMT_MJPEG || s->qscale == 8);
1980     {
1981         for (i = 0; i < mb_block_count; i++) {
1982             if (!skip_dct[i]) {
1983                 int overflow;
1984                 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
1985                 // FIXME we could decide to change to quantizer instead of
1986                 // clipping
1987                 // JS: I don't think that would be a good idea it could lower
1988                 //     quality instead of improve it. Just INTRADC clipping
1989                 //     deserves changes in quantizer
1990                 if (overflow)
1991                     clip_coeffs(s, s->block[i], s->block_last_index[i]);
1992             } else
1993                 s->block_last_index[i] = -1;
1994         }
1995         if (s->avctx->quantizer_noise_shaping) {
1996             for (i = 0; i < mb_block_count; i++) {
1997                 if (!skip_dct[i]) {
1998                     s->block_last_index[i] =
1999                         dct_quantize_refine(s, s->block[i], weight[i],
2000                                             orig[i], i, s->qscale);
2001                 }
2002             }
2003         }
2004
2005         if (s->luma_elim_threshold && !s->mb_intra)
2006             for (i = 0; i < 4; i++)
2007                 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2008         if (s->chroma_elim_threshold && !s->mb_intra)
2009             for (i = 4; i < mb_block_count; i++)
2010                 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2011
2012         if (s->flags & CODEC_FLAG_CBP_RD) {
2013             for (i = 0; i < mb_block_count; i++) {
2014                 if (s->block_last_index[i] == -1)
2015                     s->coded_score[i] = INT_MAX / 256;
2016             }
2017         }
2018     }
2019
2020     if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
2021         s->block_last_index[4] =
2022         s->block_last_index[5] = 0;
2023         s->block[4][0] =
2024         s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2025     }
2026
2027     // non c quantize code returns incorrect block_last_index FIXME
2028     if (s->alternate_scan && s->dct_quantize != dct_quantize_c) {
2029         for (i = 0; i < mb_block_count; i++) {
2030             int j;
2031             if (s->block_last_index[i] > 0) {
2032                 for (j = 63; j > 0; j--) {
2033                     if (s->block[i][s->intra_scantable.permutated[j]])
2034                         break;
2035                 }
2036                 s->block_last_index[i] = j;
2037             }
2038         }
2039     }
2040
2041     /* huffman encode */
2042     switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2043     case CODEC_ID_MPEG1VIDEO:
2044     case CODEC_ID_MPEG2VIDEO:
2045         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2046             mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2047         break;
2048     case CODEC_ID_MPEG4:
2049         if (CONFIG_MPEG4_ENCODER)
2050             mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2051         break;
2052     case CODEC_ID_MSMPEG4V2:
2053     case CODEC_ID_MSMPEG4V3:
2054     case CODEC_ID_WMV1:
2055         if (CONFIG_MSMPEG4_ENCODER)
2056             msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2057         break;
2058     case CODEC_ID_WMV2:
2059         if (CONFIG_WMV2_ENCODER)
2060             ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2061         break;
2062     case CODEC_ID_H261:
2063         if (CONFIG_H261_ENCODER)
2064             ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2065         break;
2066     case CODEC_ID_H263:
2067     case CODEC_ID_H263P:
2068     case CODEC_ID_FLV1:
2069     case CODEC_ID_RV10:
2070     case CODEC_ID_RV20:
2071         if (CONFIG_H263_ENCODER)
2072             h263_encode_mb(s, s->block, motion_x, motion_y);
2073         break;
2074     case CODEC_ID_MJPEG:
2075         if (CONFIG_MJPEG_ENCODER)
2076             ff_mjpeg_encode_mb(s, s->block);
2077         break;
2078     default:
2079         assert(0);
2080     }
2081 }
2082
2083 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2084 {
2085     if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y,  8, 6);
2086     else                                encode_mb_internal(s, motion_x, motion_y, 16, 8);
2087 }
2088
2089 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2090     int i;
2091
2092     memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2093
2094     /* mpeg1 */
2095     d->mb_skip_run= s->mb_skip_run;
2096     for(i=0; i<3; i++)
2097         d->last_dc[i] = s->last_dc[i];
2098
2099     /* statistics */
2100     d->mv_bits= s->mv_bits;
2101     d->i_tex_bits= s->i_tex_bits;
2102     d->p_tex_bits= s->p_tex_bits;
2103     d->i_count= s->i_count;
2104     d->f_count= s->f_count;
2105     d->b_count= s->b_count;
2106     d->skip_count= s->skip_count;
2107     d->misc_bits= s->misc_bits;
2108     d->last_bits= 0;
2109
2110     d->mb_skipped= 0;
2111     d->qscale= s->qscale;
2112     d->dquant= s->dquant;
2113
2114     d->esc3_level_length= s->esc3_level_length;
2115 }
2116
2117 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2118     int i;
2119
2120     memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2121     memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2122
2123     /* mpeg1 */
2124     d->mb_skip_run= s->mb_skip_run;
2125     for(i=0; i<3; i++)
2126         d->last_dc[i] = s->last_dc[i];
2127
2128     /* statistics */
2129     d->mv_bits= s->mv_bits;
2130     d->i_tex_bits= s->i_tex_bits;
2131     d->p_tex_bits= s->p_tex_bits;
2132     d->i_count= s->i_count;
2133     d->f_count= s->f_count;
2134     d->b_count= s->b_count;
2135     d->skip_count= s->skip_count;
2136     d->misc_bits= s->misc_bits;
2137
2138     d->mb_intra= s->mb_intra;
2139     d->mb_skipped= s->mb_skipped;
2140     d->mv_type= s->mv_type;
2141     d->mv_dir= s->mv_dir;
2142     d->pb= s->pb;
2143     if(s->data_partitioning){
2144         d->pb2= s->pb2;
2145         d->tex_pb= s->tex_pb;
2146     }
2147     d->block= s->block;
2148     for(i=0; i<8; i++)
2149         d->block_last_index[i]= s->block_last_index[i];
2150     d->interlaced_dct= s->interlaced_dct;
2151     d->qscale= s->qscale;
2152
2153     d->esc3_level_length= s->esc3_level_length;
2154 }
2155
2156 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2157                            PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2158                            int *dmin, int *next_block, int motion_x, int motion_y)
2159 {
2160     int score;
2161     uint8_t *dest_backup[3];
2162
2163     copy_context_before_encode(s, backup, type);
2164
2165     s->block= s->blocks[*next_block];
2166     s->pb= pb[*next_block];
2167     if(s->data_partitioning){
2168         s->pb2   = pb2   [*next_block];
2169         s->tex_pb= tex_pb[*next_block];
2170     }
2171
2172     if(*next_block){
2173         memcpy(dest_backup, s->dest, sizeof(s->dest));
2174         s->dest[0] = s->rd_scratchpad;
2175         s->dest[1] = s->rd_scratchpad + 16*s->linesize;
2176         s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
2177         assert(s->linesize >= 32); //FIXME
2178     }
2179
2180     encode_mb(s, motion_x, motion_y);
2181
2182     score= put_bits_count(&s->pb);
2183     if(s->data_partitioning){
2184         score+= put_bits_count(&s->pb2);
2185         score+= put_bits_count(&s->tex_pb);
2186     }
2187
2188     if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2189         MPV_decode_mb(s, s->block);
2190
2191         score *= s->lambda2;
2192         score += sse_mb(s) << FF_LAMBDA_SHIFT;
2193     }
2194
2195     if(*next_block){
2196         memcpy(s->dest, dest_backup, sizeof(s->dest));
2197     }
2198
2199     if(score<*dmin){
2200         *dmin= score;
2201         *next_block^=1;
2202
2203         copy_context_after_encode(best, s, type);
2204     }
2205 }
2206
2207 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2208     uint32_t *sq = ff_squareTbl + 256;
2209     int acc=0;
2210     int x,y;
2211
2212     if(w==16 && h==16)
2213         return s->dsp.sse[0](NULL, src1, src2, stride, 16);
2214     else if(w==8 && h==8)
2215         return s->dsp.sse[1](NULL, src1, src2, stride, 8);
2216
2217     for(y=0; y<h; y++){
2218         for(x=0; x<w; x++){
2219             acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2220         }
2221     }
2222
2223     assert(acc>=0);
2224
2225     return acc;
2226 }
2227
2228 static int sse_mb(MpegEncContext *s){
2229     int w= 16;
2230     int h= 16;
2231
2232     if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2233     if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2234
2235     if(w==16 && h==16)
2236       if(s->avctx->mb_cmp == FF_CMP_NSSE){
2237         return  s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2238                +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2239                +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2240       }else{
2241         return  s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2242                +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2243                +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2244       }
2245     else
2246         return  sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2247                +sse(s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2248                +sse(s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2249 }
2250
2251 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2252     MpegEncContext *s= *(void**)arg;
2253
2254
2255     s->me.pre_pass=1;
2256     s->me.dia_size= s->avctx->pre_dia_size;
2257     s->first_slice_line=1;
2258     for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2259         for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2260             ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2261         }
2262         s->first_slice_line=0;
2263     }
2264
2265     s->me.pre_pass=0;
2266
2267     return 0;
2268 }
2269
2270 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2271     MpegEncContext *s= *(void**)arg;
2272
2273     ff_check_alignment();
2274
2275     s->me.dia_size= s->avctx->dia_size;
2276     s->first_slice_line=1;
2277     for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2278         s->mb_x=0; //for block init below
2279         ff_init_block_index(s);
2280         for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2281             s->block_index[0]+=2;
2282             s->block_index[1]+=2;
2283             s->block_index[2]+=2;
2284             s->block_index[3]+=2;
2285
2286             /* compute motion vector & mb_type and store in context */
2287             if(s->pict_type==AV_PICTURE_TYPE_B)
2288                 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2289             else
2290                 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2291         }
2292         s->first_slice_line=0;
2293     }
2294     return 0;
2295 }
2296
2297 static int mb_var_thread(AVCodecContext *c, void *arg){
2298     MpegEncContext *s= *(void**)arg;
2299     int mb_x, mb_y;
2300
2301     ff_check_alignment();
2302
2303     for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2304         for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2305             int xx = mb_x * 16;
2306             int yy = mb_y * 16;
2307             uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
2308             int varc;
2309             int sum = s->dsp.pix_sum(pix, s->linesize);
2310
2311             varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
2312
2313             s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2314             s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2315             s->me.mb_var_sum_temp    += varc;
2316         }
2317     }
2318     return 0;
2319 }
2320
2321 static void write_slice_end(MpegEncContext *s){
2322     if(CONFIG_MPEG4_ENCODER && s->codec_id==CODEC_ID_MPEG4){
2323         if(s->partitioned_frame){
2324             ff_mpeg4_merge_partitions(s);
2325         }
2326
2327         ff_mpeg4_stuffing(&s->pb);
2328     }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2329         ff_mjpeg_encode_stuffing(&s->pb);
2330     }
2331
2332     avpriv_align_put_bits(&s->pb);
2333     flush_put_bits(&s->pb);
2334
2335     if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
2336         s->misc_bits+= get_bits_diff(s);
2337 }
2338
2339 static int encode_thread(AVCodecContext *c, void *arg){
2340     MpegEncContext *s= *(void**)arg;
2341     int mb_x, mb_y, pdif = 0;
2342     int chr_h= 16>>s->chroma_y_shift;
2343     int i, j;
2344     MpegEncContext best_s, backup_s;
2345     uint8_t bit_buf[2][MAX_MB_BYTES];
2346     uint8_t bit_buf2[2][MAX_MB_BYTES];
2347     uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2348     PutBitContext pb[2], pb2[2], tex_pb[2];
2349 //printf("%d->%d\n", s->resync_mb_y, s->end_mb_y);
2350
2351     ff_check_alignment();
2352
2353     for(i=0; i<2; i++){
2354         init_put_bits(&pb    [i], bit_buf    [i], MAX_MB_BYTES);
2355         init_put_bits(&pb2   [i], bit_buf2   [i], MAX_MB_BYTES);
2356         init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2357     }
2358
2359     s->last_bits= put_bits_count(&s->pb);
2360     s->mv_bits=0;
2361     s->misc_bits=0;
2362     s->i_tex_bits=0;
2363     s->p_tex_bits=0;
2364     s->i_count=0;
2365     s->f_count=0;
2366     s->b_count=0;
2367     s->skip_count=0;
2368
2369     for(i=0; i<3; i++){
2370         /* init last dc values */
2371         /* note: quant matrix value (8) is implied here */
2372         s->last_dc[i] = 128 << s->intra_dc_precision;
2373
2374         s->current_picture.f.error[i] = 0;
2375     }
2376     s->mb_skip_run = 0;
2377     memset(s->last_mv, 0, sizeof(s->last_mv));
2378
2379     s->last_mv_dir = 0;
2380
2381     switch(s->codec_id){
2382     case CODEC_ID_H263:
2383     case CODEC_ID_H263P:
2384     case CODEC_ID_FLV1:
2385         if (CONFIG_H263_ENCODER)
2386             s->gob_index = ff_h263_get_gob_height(s);
2387         break;
2388     case CODEC_ID_MPEG4:
2389         if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2390             ff_mpeg4_init_partitions(s);
2391         break;
2392     }
2393
2394     s->resync_mb_x=0;
2395     s->resync_mb_y=0;
2396     s->first_slice_line = 1;
2397     s->ptr_lastgob = s->pb.buf;
2398     for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2399 //    printf("row %d at %X\n", s->mb_y, (int)s);
2400         s->mb_x=0;
2401         s->mb_y= mb_y;
2402
2403         ff_set_qscale(s, s->qscale);
2404         ff_init_block_index(s);
2405
2406         for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2407             int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2408             int mb_type= s->mb_type[xy];
2409 //            int d;
2410             int dmin= INT_MAX;
2411             int dir;
2412
2413             if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2414                 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2415                 return -1;
2416             }
2417             if(s->data_partitioning){
2418                 if(   s->pb2   .buf_end - s->pb2   .buf - (put_bits_count(&s->    pb2)>>3) < MAX_MB_BYTES
2419                    || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2420                     av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2421                     return -1;
2422                 }
2423             }
2424
2425             s->mb_x = mb_x;
2426             s->mb_y = mb_y;  // moved into loop, can get changed by H.261
2427             ff_update_block_index(s);
2428
2429             if(CONFIG_H261_ENCODER && s->codec_id == CODEC_ID_H261){
2430                 ff_h261_reorder_mb_index(s);
2431                 xy= s->mb_y*s->mb_stride + s->mb_x;
2432                 mb_type= s->mb_type[xy];
2433             }
2434
2435             /* write gob / video packet header  */
2436             if(s->rtp_mode){
2437                 int current_packet_size, is_gob_start;
2438
2439                 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2440
2441                 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2442
2443                 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2444
2445                 switch(s->codec_id){
2446                 case CODEC_ID_H263:
2447                 case CODEC_ID_H263P:
2448                     if(!s->h263_slice_structured)
2449                         if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2450                     break;
2451                 case CODEC_ID_MPEG2VIDEO:
2452                     if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2453                 case CODEC_ID_MPEG1VIDEO:
2454                     if(s->mb_skip_run) is_gob_start=0;
2455                     break;
2456                 }
2457
2458                 if(is_gob_start){
2459                     if(s->start_mb_y != mb_y || mb_x!=0){
2460                         write_slice_end(s);
2461
2462                         if(CONFIG_MPEG4_ENCODER && s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame){
2463                             ff_mpeg4_init_partitions(s);
2464                         }
2465                     }
2466
2467                     assert((put_bits_count(&s->pb)&7) == 0);
2468                     current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2469
2470                     if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){
2471                         int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2472                         int d= 100 / s->avctx->error_rate;
2473                         if(r % d == 0){
2474                             current_packet_size=0;
2475                             s->pb.buf_ptr= s->ptr_lastgob;
2476                             assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2477                         }
2478                     }
2479
2480                     if (s->avctx->rtp_callback){
2481                         int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2482                         s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2483                     }
2484
2485                     switch(s->codec_id){
2486                     case CODEC_ID_MPEG4:
2487                         if (CONFIG_MPEG4_ENCODER) {
2488                             ff_mpeg4_encode_video_packet_header(s);
2489                             ff_mpeg4_clean_buffers(s);
2490                         }
2491                     break;
2492                     case CODEC_ID_MPEG1VIDEO:
2493                     case CODEC_ID_MPEG2VIDEO:
2494                         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2495                             ff_mpeg1_encode_slice_header(s);
2496                             ff_mpeg1_clean_buffers(s);
2497                         }
2498                     break;
2499                     case CODEC_ID_H263:
2500                     case CODEC_ID_H263P:
2501                         if (CONFIG_H263_ENCODER)
2502                             h263_encode_gob_header(s, mb_y);
2503                     break;
2504                     }
2505
2506                     if(s->flags&CODEC_FLAG_PASS1){
2507                         int bits= put_bits_count(&s->pb);
2508                         s->misc_bits+= bits - s->last_bits;
2509                         s->last_bits= bits;
2510                     }
2511
2512                     s->ptr_lastgob += current_packet_size;
2513                     s->first_slice_line=1;
2514                     s->resync_mb_x=mb_x;
2515                     s->resync_mb_y=mb_y;
2516                 }
2517             }
2518
2519             if(  (s->resync_mb_x   == s->mb_x)
2520                && s->resync_mb_y+1 == s->mb_y){
2521                 s->first_slice_line=0;
2522             }
2523
2524             s->mb_skipped=0;
2525             s->dquant=0; //only for QP_RD
2526
2527             if(mb_type & (mb_type-1) || (s->flags & CODEC_FLAG_QP_RD)){ // more than 1 MB type possible or CODEC_FLAG_QP_RD
2528                 int next_block=0;
2529                 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2530
2531                 copy_context_before_encode(&backup_s, s, -1);
2532                 backup_s.pb= s->pb;
2533                 best_s.data_partitioning= s->data_partitioning;
2534                 best_s.partitioned_frame= s->partitioned_frame;
2535                 if(s->data_partitioning){
2536                     backup_s.pb2= s->pb2;
2537                     backup_s.tex_pb= s->tex_pb;
2538                 }
2539
2540                 if(mb_type&CANDIDATE_MB_TYPE_INTER){
2541                     s->mv_dir = MV_DIR_FORWARD;
2542                     s->mv_type = MV_TYPE_16X16;
2543                     s->mb_intra= 0;
2544                     s->mv[0][0][0] = s->p_mv_table[xy][0];
2545                     s->mv[0][0][1] = s->p_mv_table[xy][1];
2546                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2547                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2548                 }
2549                 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2550                     s->mv_dir = MV_DIR_FORWARD;
2551                     s->mv_type = MV_TYPE_FIELD;
2552                     s->mb_intra= 0;
2553                     for(i=0; i<2; i++){
2554                         j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2555                         s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2556                         s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2557                     }
2558                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2559                                  &dmin, &next_block, 0, 0);
2560                 }
2561                 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2562                     s->mv_dir = MV_DIR_FORWARD;
2563                     s->mv_type = MV_TYPE_16X16;
2564                     s->mb_intra= 0;
2565                     s->mv[0][0][0] = 0;
2566                     s->mv[0][0][1] = 0;
2567                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2568                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2569                 }
2570                 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
2571                     s->mv_dir = MV_DIR_FORWARD;
2572                     s->mv_type = MV_TYPE_8X8;
2573                     s->mb_intra= 0;
2574                     for(i=0; i<4; i++){
2575                         s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
2576                         s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
2577                     }
2578                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
2579                                  &dmin, &next_block, 0, 0);
2580                 }
2581                 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
2582                     s->mv_dir = MV_DIR_FORWARD;
2583                     s->mv_type = MV_TYPE_16X16;
2584                     s->mb_intra= 0;
2585                     s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2586                     s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2587                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
2588                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2589                 }
2590                 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
2591                     s->mv_dir = MV_DIR_BACKWARD;
2592                     s->mv_type = MV_TYPE_16X16;
2593                     s->mb_intra= 0;
2594                     s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2595                     s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2596                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
2597                                  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
2598                 }
2599                 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
2600                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2601                     s->mv_type = MV_TYPE_16X16;
2602                     s->mb_intra= 0;
2603                     s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2604                     s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2605                     s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2606                     s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2607                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
2608                                  &dmin, &next_block, 0, 0);
2609                 }
2610                 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
2611                     s->mv_dir = MV_DIR_FORWARD;
2612                     s->mv_type = MV_TYPE_FIELD;
2613                     s->mb_intra= 0;
2614                     for(i=0; i<2; i++){
2615                         j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2616                         s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2617                         s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2618                     }
2619                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
2620                                  &dmin, &next_block, 0, 0);
2621                 }
2622                 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
2623                     s->mv_dir = MV_DIR_BACKWARD;
2624                     s->mv_type = MV_TYPE_FIELD;
2625                     s->mb_intra= 0;
2626                     for(i=0; i<2; i++){
2627                         j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2628                         s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2629                         s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2630                     }
2631                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
2632                                  &dmin, &next_block, 0, 0);
2633                 }
2634                 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
2635                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2636                     s->mv_type = MV_TYPE_FIELD;
2637                     s->mb_intra= 0;
2638                     for(dir=0; dir<2; dir++){
2639                         for(i=0; i<2; i++){
2640                             j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2641                             s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2642                             s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2643                         }
2644                     }
2645                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
2646                                  &dmin, &next_block, 0, 0);
2647                 }
2648                 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
2649                     s->mv_dir = 0;
2650                     s->mv_type = MV_TYPE_16X16;
2651                     s->mb_intra= 1;
2652                     s->mv[0][0][0] = 0;
2653                     s->mv[0][0][1] = 0;
2654                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
2655                                  &dmin, &next_block, 0, 0);
2656                     if(s->h263_pred || s->h263_aic){
2657                         if(best_s.mb_intra)
2658                             s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
2659                         else
2660                             ff_clean_intra_table_entries(s); //old mode?
2661                     }
2662                 }
2663
2664                 if((s->flags & CODEC_FLAG_QP_RD) && dmin < INT_MAX){
2665                     if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
2666                         const int last_qp= backup_s.qscale;
2667                         int qpi, qp, dc[6];
2668                         DCTELEM ac[6][16];
2669                         const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
2670                         static const int dquant_tab[4]={-1,1,-2,2};
2671
2672                         assert(backup_s.dquant == 0);
2673
2674                         //FIXME intra
2675                         s->mv_dir= best_s.mv_dir;
2676                         s->mv_type = MV_TYPE_16X16;
2677                         s->mb_intra= best_s.mb_intra;
2678                         s->mv[0][0][0] = best_s.mv[0][0][0];
2679                         s->mv[0][0][1] = best_s.mv[0][0][1];
2680                         s->mv[1][0][0] = best_s.mv[1][0][0];
2681                         s->mv[1][0][1] = best_s.mv[1][0][1];
2682
2683                         qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
2684                         for(; qpi<4; qpi++){
2685                             int dquant= dquant_tab[qpi];
2686                             qp= last_qp + dquant;
2687                             if(qp < s->avctx->qmin || qp > s->avctx->qmax)
2688                                 continue;
2689                             backup_s.dquant= dquant;
2690                             if(s->mb_intra && s->dc_val[0]){
2691                                 for(i=0; i<6; i++){
2692                                     dc[i]= s->dc_val[0][ s->block_index[i] ];
2693                                     memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(DCTELEM)*16);
2694                                 }
2695                             }
2696
2697                             encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2698                                          &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
2699                             if(best_s.qscale != qp){
2700                                 if(s->mb_intra && s->dc_val[0]){
2701                                     for(i=0; i<6; i++){
2702                                         s->dc_val[0][ s->block_index[i] ]= dc[i];
2703                                         memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(DCTELEM)*16);
2704                                     }
2705                                 }
2706                             }
2707                         }
2708                     }
2709                 }
2710                 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
2711                     int mx= s->b_direct_mv_table[xy][0];
2712                     int my= s->b_direct_mv_table[xy][1];
2713
2714                     backup_s.dquant = 0;
2715                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2716                     s->mb_intra= 0;
2717                     ff_mpeg4_set_direct_mv(s, mx, my);
2718                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2719                                  &dmin, &next_block, mx, my);
2720                 }
2721                 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
2722                     backup_s.dquant = 0;
2723                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2724                     s->mb_intra= 0;
2725                     ff_mpeg4_set_direct_mv(s, 0, 0);
2726                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2727                                  &dmin, &next_block, 0, 0);
2728                 }
2729                 if(!best_s.mb_intra && s->flags2&CODEC_FLAG2_SKIP_RD){
2730                     int coded=0;
2731                     for(i=0; i<6; i++)
2732                         coded |= s->block_last_index[i];
2733                     if(coded){
2734                         int mx,my;
2735                         memcpy(s->mv, best_s.mv, sizeof(s->mv));
2736                         if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
2737                             mx=my=0; //FIXME find the one we actually used
2738                             ff_mpeg4_set_direct_mv(s, mx, my);
2739                         }else if(best_s.mv_dir&MV_DIR_BACKWARD){
2740                             mx= s->mv[1][0][0];
2741                             my= s->mv[1][0][1];
2742                         }else{
2743                             mx= s->mv[0][0][0];
2744                             my= s->mv[0][0][1];
2745                         }
2746
2747                         s->mv_dir= best_s.mv_dir;
2748                         s->mv_type = best_s.mv_type;
2749                         s->mb_intra= 0;
2750 /*                        s->mv[0][0][0] = best_s.mv[0][0][0];
2751                         s->mv[0][0][1] = best_s.mv[0][0][1];
2752                         s->mv[1][0][0] = best_s.mv[1][0][0];
2753                         s->mv[1][0][1] = best_s.mv[1][0][1];*/
2754                         backup_s.dquant= 0;
2755                         s->skipdct=1;
2756                         encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2757                                         &dmin, &next_block, mx, my);
2758                         s->skipdct=0;
2759                     }
2760                 }
2761
2762                 s->current_picture.f.qscale_table[xy] = best_s.qscale;
2763
2764                 copy_context_after_encode(s, &best_s, -1);
2765
2766                 pb_bits_count= put_bits_count(&s->pb);
2767                 flush_put_bits(&s->pb);
2768                 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
2769                 s->pb= backup_s.pb;
2770
2771                 if(s->data_partitioning){
2772                     pb2_bits_count= put_bits_count(&s->pb2);
2773                     flush_put_bits(&s->pb2);
2774                     avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
2775                     s->pb2= backup_s.pb2;
2776
2777                     tex_pb_bits_count= put_bits_count(&s->tex_pb);
2778                     flush_put_bits(&s->tex_pb);
2779                     avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
2780                     s->tex_pb= backup_s.tex_pb;
2781                 }
2782                 s->last_bits= put_bits_count(&s->pb);
2783
2784                 if (CONFIG_H263_ENCODER &&
2785                     s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2786                     ff_h263_update_motion_val(s);
2787
2788                 if(next_block==0){ //FIXME 16 vs linesize16
2789                     s->dsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad                     , s->linesize  ,16);
2790                     s->dsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize    , s->uvlinesize, 8);
2791                     s->dsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
2792                 }
2793
2794                 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
2795                     MPV_decode_mb(s, s->block);
2796             } else {
2797                 int motion_x = 0, motion_y = 0;
2798                 s->mv_type=MV_TYPE_16X16;
2799                 // only one MB-Type possible
2800
2801                 switch(mb_type){
2802                 case CANDIDATE_MB_TYPE_INTRA:
2803                     s->mv_dir = 0;
2804                     s->mb_intra= 1;
2805                     motion_x= s->mv[0][0][0] = 0;
2806                     motion_y= s->mv[0][0][1] = 0;
2807                     break;
2808                 case CANDIDATE_MB_TYPE_INTER:
2809                     s->mv_dir = MV_DIR_FORWARD;
2810                     s->mb_intra= 0;
2811                     motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
2812                     motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
2813                     break;
2814                 case CANDIDATE_MB_TYPE_INTER_I:
2815                     s->mv_dir = MV_DIR_FORWARD;
2816                     s->mv_type = MV_TYPE_FIELD;
2817                     s->mb_intra= 0;
2818                     for(i=0; i<2; i++){
2819                         j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2820                         s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2821                         s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2822                     }
2823                     break;
2824                 case CANDIDATE_MB_TYPE_INTER4V:
2825                     s->mv_dir = MV_DIR_FORWARD;
2826                     s->mv_type = MV_TYPE_8X8;
2827                     s->mb_intra= 0;
2828                     for(i=0; i<4; i++){
2829                         s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
2830                         s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
2831                     }
2832                     break;
2833                 case CANDIDATE_MB_TYPE_DIRECT:
2834                     if (CONFIG_MPEG4_ENCODER) {
2835                         s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2836                         s->mb_intra= 0;
2837                         motion_x=s->b_direct_mv_table[xy][0];
2838                         motion_y=s->b_direct_mv_table[xy][1];
2839                         ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
2840                     }
2841                     break;
2842                 case CANDIDATE_MB_TYPE_DIRECT0:
2843                     if (CONFIG_MPEG4_ENCODER) {
2844                         s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2845                         s->mb_intra= 0;
2846                         ff_mpeg4_set_direct_mv(s, 0, 0);
2847                     }
2848                     break;
2849                 case CANDIDATE_MB_TYPE_BIDIR:
2850                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2851                     s->mb_intra= 0;
2852                     s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2853                     s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2854                     s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2855                     s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2856                     break;
2857                 case CANDIDATE_MB_TYPE_BACKWARD:
2858                     s->mv_dir = MV_DIR_BACKWARD;
2859                     s->mb_intra= 0;
2860                     motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2861                     motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2862                     break;
2863                 case CANDIDATE_MB_TYPE_FORWARD:
2864                     s->mv_dir = MV_DIR_FORWARD;
2865                     s->mb_intra= 0;
2866                     motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2867                     motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2868 //                    printf(" %d %d ", motion_x, motion_y);
2869                     break;
2870                 case CANDIDATE_MB_TYPE_FORWARD_I:
2871                     s->mv_dir = MV_DIR_FORWARD;
2872                     s->mv_type = MV_TYPE_FIELD;
2873                     s->mb_intra= 0;
2874                     for(i=0; i<2; i++){
2875                         j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2876                         s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2877                         s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2878                     }
2879                     break;
2880                 case CANDIDATE_MB_TYPE_BACKWARD_I:
2881                     s->mv_dir = MV_DIR_BACKWARD;
2882                     s->mv_type = MV_TYPE_FIELD;
2883                     s->mb_intra= 0;
2884                     for(i=0; i<2; i++){
2885                         j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2886                         s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2887                         s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2888                     }
2889                     break;
2890                 case CANDIDATE_MB_TYPE_BIDIR_I:
2891                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2892                     s->mv_type = MV_TYPE_FIELD;
2893                     s->mb_intra= 0;
2894                     for(dir=0; dir<2; dir++){
2895                         for(i=0; i<2; i++){
2896                             j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2897                             s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2898                             s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2899                         }
2900                     }
2901                     break;
2902                 default:
2903                     av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
2904                 }
2905
2906                 encode_mb(s, motion_x, motion_y);
2907
2908                 // RAL: Update last macroblock type
2909                 s->last_mv_dir = s->mv_dir;
2910
2911                 if (CONFIG_H263_ENCODER &&
2912                     s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2913                     ff_h263_update_motion_val(s);
2914
2915                 MPV_decode_mb(s, s->block);
2916             }
2917
2918             /* clean the MV table in IPS frames for direct mode in B frames */
2919             if(s->mb_intra /* && I,P,S_TYPE */){
2920                 s->p_mv_table[xy][0]=0;
2921                 s->p_mv_table[xy][1]=0;
2922             }
2923
2924             if(s->flags&CODEC_FLAG_PSNR){
2925                 int w= 16;
2926                 int h= 16;
2927
2928                 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2929                 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2930
2931                 s->current_picture.f.error[0] += sse(
2932                     s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
2933                     s->dest[0], w, h, s->linesize);
2934                 s->current_picture.f.error[1] += sse(
2935                     s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*chr_h,
2936                     s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2937                 s->current_picture.f.error[2] += sse(
2938                     s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*chr_h,
2939                     s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2940             }
2941             if(s->loop_filter){
2942                 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
2943                     ff_h263_loop_filter(s);
2944             }
2945 //printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_stride, put_bits_count(&s->pb));
2946         }
2947     }
2948
2949     //not beautiful here but we must write it before flushing so it has to be here
2950     if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
2951         msmpeg4_encode_ext_header(s);
2952
2953     write_slice_end(s);
2954
2955     /* Send the last GOB if RTP */
2956     if (s->avctx->rtp_callback) {
2957         int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
2958         pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
2959         /* Call the RTP callback to send the last GOB */
2960         emms_c();
2961         s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
2962     }
2963
2964     return 0;
2965 }
2966
2967 #define MERGE(field) dst->field += src->field; src->field=0
2968 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
2969     MERGE(me.scene_change_score);
2970     MERGE(me.mc_mb_var_sum_temp);
2971     MERGE(me.mb_var_sum_temp);
2972 }
2973
2974 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
2975     int i;
2976
2977     MERGE(dct_count[0]); //note, the other dct vars are not part of the context
2978     MERGE(dct_count[1]);
2979     MERGE(mv_bits);
2980     MERGE(i_tex_bits);
2981     MERGE(p_tex_bits);
2982     MERGE(i_count);
2983     MERGE(f_count);
2984     MERGE(b_count);
2985     MERGE(skip_count);
2986     MERGE(misc_bits);
2987     MERGE(error_count);
2988     MERGE(padding_bug_score);
2989     MERGE(current_picture.f.error[0]);
2990     MERGE(current_picture.f.error[1]);
2991     MERGE(current_picture.f.error[2]);
2992
2993     if(dst->avctx->noise_reduction){
2994         for(i=0; i<64; i++){
2995             MERGE(dct_error_sum[0][i]);
2996             MERGE(dct_error_sum[1][i]);
2997         }
2998     }
2999
3000     assert(put_bits_count(&src->pb) % 8 ==0);
3001     assert(put_bits_count(&dst->pb) % 8 ==0);
3002     avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3003     flush_put_bits(&dst->pb);
3004 }
3005
3006 static int estimate_qp(MpegEncContext *s, int dry_run){
3007     if (s->next_lambda){
3008         s->current_picture_ptr->f.quality =
3009         s->current_picture.f.quality = s->next_lambda;
3010         if(!dry_run) s->next_lambda= 0;
3011     } else if (!s->fixed_qscale) {
3012         s->current_picture_ptr->f.quality =
3013         s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run);
3014         if (s->current_picture.f.quality < 0)
3015             return -1;
3016     }
3017
3018     if(s->adaptive_quant){
3019         switch(s->codec_id){
3020         case CODEC_ID_MPEG4:
3021             if (CONFIG_MPEG4_ENCODER)
3022                 ff_clean_mpeg4_qscales(s);
3023             break;
3024         case CODEC_ID_H263:
3025         case CODEC_ID_H263P:
3026         case CODEC_ID_FLV1:
3027             if (CONFIG_H263_ENCODER)
3028                 ff_clean_h263_qscales(s);
3029             break;
3030         default:
3031             ff_init_qscale_tab(s);
3032         }
3033
3034         s->lambda= s->lambda_table[0];
3035         //FIXME broken
3036     }else
3037         s->lambda = s->current_picture.f.quality;
3038 //printf("%d %d\n", s->avctx->global_quality, s->current_picture.quality);
3039     update_qscale(s);
3040     return 0;
3041 }
3042
3043 /* must be called before writing the header */
3044 static void set_frame_distances(MpegEncContext * s){
3045     assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
3046     s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
3047
3048     if(s->pict_type==AV_PICTURE_TYPE_B){
3049         s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3050         assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3051     }else{
3052         s->pp_time= s->time - s->last_non_b_time;
3053         s->last_non_b_time= s->time;
3054         assert(s->picture_number==0 || s->pp_time > 0);
3055     }
3056 }
3057
3058 static int encode_picture(MpegEncContext *s, int picture_number)
3059 {
3060     int i;
3061     int bits;
3062     int context_count = s->avctx->thread_count;
3063
3064     s->picture_number = picture_number;
3065
3066     /* Reset the average MB variance */
3067     s->me.mb_var_sum_temp    =
3068     s->me.mc_mb_var_sum_temp = 0;
3069
3070     /* we need to initialize some time vars before we can encode b-frames */
3071     // RAL: Condition added for MPEG1VIDEO
3072     if (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3073         set_frame_distances(s);
3074     if(CONFIG_MPEG4_ENCODER && s->codec_id == CODEC_ID_MPEG4)
3075         ff_set_mpeg4_time(s);
3076
3077     s->me.scene_change_score=0;
3078
3079 //    s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3080
3081     if(s->pict_type==AV_PICTURE_TYPE_I){
3082         if(s->msmpeg4_version >= 3) s->no_rounding=1;
3083         else                        s->no_rounding=0;
3084     }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3085         if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4)
3086             s->no_rounding ^= 1;
3087     }
3088
3089     if(s->flags & CODEC_FLAG_PASS2){
3090         if (estimate_qp(s,1) < 0)
3091             return -1;
3092         ff_get_2pass_fcode(s);
3093     }else if(!(s->flags & CODEC_FLAG_QSCALE)){
3094         if(s->pict_type==AV_PICTURE_TYPE_B)
3095             s->lambda= s->last_lambda_for[s->pict_type];
3096         else
3097             s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3098         update_qscale(s);
3099     }
3100
3101     s->mb_intra=0; //for the rate distortion & bit compare functions
3102     for(i=1; i<context_count; i++){
3103         ff_update_duplicate_context(s->thread_context[i], s);
3104     }
3105
3106     if(ff_init_me(s)<0)
3107         return -1;
3108
3109     /* Estimate motion for every MB */
3110     if(s->pict_type != AV_PICTURE_TYPE_I){
3111         s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3112         s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3113         if(s->pict_type != AV_PICTURE_TYPE_B && s->avctx->me_threshold==0){
3114             if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3115                 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3116             }
3117         }
3118
3119         s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3120     }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3121         /* I-Frame */
3122         for(i=0; i<s->mb_stride*s->mb_height; i++)
3123             s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3124
3125         if(!s->fixed_qscale){
3126             /* finding spatial complexity for I-frame rate control */
3127             s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3128         }
3129     }
3130     for(i=1; i<context_count; i++){
3131         merge_context_after_me(s, s->thread_context[i]);
3132     }
3133     s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3134     s->current_picture.   mb_var_sum= s->current_picture_ptr->   mb_var_sum= s->me.   mb_var_sum_temp;
3135     emms_c();
3136
3137     if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
3138         s->pict_type= AV_PICTURE_TYPE_I;
3139         for(i=0; i<s->mb_stride*s->mb_height; i++)
3140             s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3141 //printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3142     }
3143
3144     if(!s->umvplus){
3145         if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3146             s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3147
3148             if(s->flags & CODEC_FLAG_INTERLACED_ME){
3149                 int a,b;
3150                 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3151                 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3152                 s->f_code= FFMAX3(s->f_code, a, b);
3153             }
3154
3155             ff_fix_long_p_mvs(s);
3156             ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3157             if(s->flags & CODEC_FLAG_INTERLACED_ME){
3158                 int j;
3159                 for(i=0; i<2; i++){
3160                     for(j=0; j<2; j++)
3161                         ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3162                                         s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3163                 }
3164             }
3165         }
3166
3167         if(s->pict_type==AV_PICTURE_TYPE_B){
3168             int a, b;
3169
3170             a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3171             b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3172             s->f_code = FFMAX(a, b);
3173
3174             a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3175             b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3176             s->b_code = FFMAX(a, b);
3177
3178             ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3179             ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3180             ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3181             ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3182             if(s->flags & CODEC_FLAG_INTERLACED_ME){
3183                 int dir, j;
3184                 for(dir=0; dir<2; dir++){
3185                     for(i=0; i<2; i++){
3186                         for(j=0; j<2; j++){
3187                             int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3188                                           : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3189                             ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3190                                             s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3191                         }
3192                     }
3193                 }
3194             }
3195         }
3196     }
3197
3198     if (estimate_qp(s, 0) < 0)
3199         return -1;
3200
3201     if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
3202         s->qscale= 3; //reduce clipping problems
3203
3204     if (s->out_format == FMT_MJPEG) {
3205         /* for mjpeg, we do include qscale in the matrix */
3206         for(i=1;i<64;i++){
3207             int j= s->dsp.idct_permutation[i];
3208
3209             s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3210         }
3211         s->y_dc_scale_table=
3212         s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3213         s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3214         ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
3215                        s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3216         s->qscale= 8;
3217     }
3218
3219     //FIXME var duplication
3220     s->current_picture_ptr->f.key_frame =
3221     s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3222     s->current_picture_ptr->f.pict_type =
3223     s->current_picture.f.pict_type = s->pict_type;
3224
3225     if (s->current_picture.f.key_frame)
3226         s->picture_in_gop_number=0;
3227
3228     s->last_bits= put_bits_count(&s->pb);
3229     switch(s->out_format) {
3230     case FMT_MJPEG:
3231         if (CONFIG_MJPEG_ENCODER)
3232             ff_mjpeg_encode_picture_header(s);
3233         break;
3234     case FMT_H261:
3235         if (CONFIG_H261_ENCODER)
3236             ff_h261_encode_picture_header(s, picture_number);
3237         break;
3238     case FMT_H263:
3239         if (CONFIG_WMV2_ENCODER && s->codec_id == CODEC_ID_WMV2)
3240             ff_wmv2_encode_picture_header(s, picture_number);
3241         else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3242             msmpeg4_encode_picture_header(s, picture_number);
3243         else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
3244             mpeg4_encode_picture_header(s, picture_number);
3245         else if (CONFIG_RV10_ENCODER && s->codec_id == CODEC_ID_RV10)
3246             rv10_encode_picture_header(s, picture_number);
3247         else if (CONFIG_RV20_ENCODER && s->codec_id == CODEC_ID_RV20)
3248             rv20_encode_picture_header(s, picture_number);
3249         else if (CONFIG_FLV_ENCODER && s->codec_id == CODEC_ID_FLV1)
3250             ff_flv_encode_picture_header(s, picture_number);
3251         else if (CONFIG_H263_ENCODER)
3252             h263_encode_picture_header(s, picture_number);
3253         break;
3254     case FMT_MPEG1:
3255         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3256             mpeg1_encode_picture_header(s, picture_number);
3257         break;
3258     case FMT_H264:
3259         break;
3260     default:
3261         assert(0);
3262     }
3263     bits= put_bits_count(&s->pb);
3264     s->header_bits= bits - s->last_bits;
3265
3266     for(i=1; i<context_count; i++){
3267         update_duplicate_context_after_me(s->thread_context[i], s);
3268     }
3269     s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3270     for(i=1; i<context_count; i++){
3271         merge_context_after_encode(s, s->thread_context[i]);
3272     }
3273     emms_c();
3274     return 0;
3275 }
3276
3277 static void denoise_dct_c(MpegEncContext *s, DCTELEM *block){
3278     const int intra= s->mb_intra;
3279     int i;
3280
3281     s->dct_count[intra]++;
3282
3283     for(i=0; i<64; i++){
3284         int level= block[i];
3285
3286         if(level){
3287             if(level>0){
3288                 s->dct_error_sum[intra][i] += level;
3289                 level -= s->dct_offset[intra][i];
3290                 if(level<0) level=0;
3291             }else{
3292                 s->dct_error_sum[intra][i] -= level;
3293                 level += s->dct_offset[intra][i];
3294                 if(level>0) level=0;
3295             }
3296             block[i]= level;
3297         }
3298     }