mpegvideo_enc: drop support for reusing the input motion vectors.
[ffmpeg.git] / libavcodec / mpegvideo_enc.c
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of Libav.
9  *
10  * Libav is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * Libav is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with Libav; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29
30 #include "libavutil/internal.h"
31 #include "libavutil/intmath.h"
32 #include "libavutil/mathematics.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/opt.h"
35 #include "avcodec.h"
36 #include "dct.h"
37 #include "dsputil.h"
38 #include "mpegvideo.h"
39 #include "h263.h"
40 #include "mathops.h"
41 #include "mjpegenc.h"
42 #include "msmpeg4.h"
43 #include "faandct.h"
44 #include "thread.h"
45 #include "aandcttab.h"
46 #include "flv.h"
47 #include "mpeg4video.h"
48 #include "internal.h"
49 #include "bytestream.h"
50 #include <limits.h>
51
52 //#undef NDEBUG
53 //#include <assert.h>
54
55 static int encode_picture(MpegEncContext *s, int picture_number);
56 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
57 static int sse_mb(MpegEncContext *s);
58 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
59 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
60
61 /* enable all paranoid tests for rounding, overflows, etc... */
62 //#define PARANOID
63
64 //#define DEBUG
65
66 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
67 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
68
69 const AVOption ff_mpv_generic_options[] = {
70     FF_MPV_COMMON_OPTS
71     { NULL },
72 };
73
74 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
75                        uint16_t (*qmat16)[2][64],
76                        const uint16_t *quant_matrix,
77                        int bias, int qmin, int qmax, int intra)
78 {
79     int qscale;
80     int shift = 0;
81
82     for (qscale = qmin; qscale <= qmax; qscale++) {
83         int i;
84         if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
85             dsp->fdct == ff_jpeg_fdct_islow_10 ||
86             dsp->fdct == ff_faandct) {
87             for (i = 0; i < 64; i++) {
88                 const int j = dsp->idct_permutation[i];
89                 /* 16 <= qscale * quant_matrix[i] <= 7905
90                  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
91                  *             19952 <=              x  <= 249205026
92                  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
93                  *           3444240 >= (1 << 36) / (x) >= 275 */
94
95                 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
96                                         (qscale * quant_matrix[j]));
97             }
98         } else if (dsp->fdct == ff_fdct_ifast) {
99             for (i = 0; i < 64; i++) {
100                 const int j = dsp->idct_permutation[i];
101                 /* 16 <= qscale * quant_matrix[i] <= 7905
102                  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
103                  *             19952 <=              x  <= 249205026
104                  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
105                  *           3444240 >= (1 << 36) / (x) >= 275 */
106
107                 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
108                                         (ff_aanscales[i] * qscale *
109                                          quant_matrix[j]));
110             }
111         } else {
112             for (i = 0; i < 64; i++) {
113                 const int j = dsp->idct_permutation[i];
114                 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
115                  * Assume x = qscale * quant_matrix[i]
116                  * So             16 <=              x  <= 7905
117                  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
118                  * so          32768 >= (1 << 19) / (x) >= 67 */
119                 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
120                                         (qscale * quant_matrix[j]));
121                 //qmat  [qscale][i] = (1 << QMAT_SHIFT_MMX) /
122                 //                    (qscale * quant_matrix[i]);
123                 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
124                                        (qscale * quant_matrix[j]);
125
126                 if (qmat16[qscale][0][i] == 0 ||
127                     qmat16[qscale][0][i] == 128 * 256)
128                     qmat16[qscale][0][i] = 128 * 256 - 1;
129                 qmat16[qscale][1][i] =
130                     ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
131                                 qmat16[qscale][0][i]);
132             }
133         }
134
135         for (i = intra; i < 64; i++) {
136             int64_t max = 8191;
137             if (dsp->fdct == ff_fdct_ifast) {
138                 max = (8191LL * ff_aanscales[i]) >> 14;
139             }
140             while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
141                 shift++;
142             }
143         }
144     }
145     if (shift) {
146         av_log(NULL, AV_LOG_INFO,
147                "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
148                QMAT_SHIFT - shift);
149     }
150 }
151
152 static inline void update_qscale(MpegEncContext *s)
153 {
154     s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
155                 (FF_LAMBDA_SHIFT + 7);
156     s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
157
158     s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
159                  FF_LAMBDA_SHIFT;
160 }
161
162 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
163 {
164     int i;
165
166     if (matrix) {
167         put_bits(pb, 1, 1);
168         for (i = 0; i < 64; i++) {
169             put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
170         }
171     } else
172         put_bits(pb, 1, 0);
173 }
174
175 /**
176  * init s->current_picture.qscale_table from s->lambda_table
177  */
178 void ff_init_qscale_tab(MpegEncContext *s)
179 {
180     int8_t * const qscale_table = s->current_picture.f.qscale_table;
181     int i;
182
183     for (i = 0; i < s->mb_num; i++) {
184         unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
185         int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
186         qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
187                                                   s->avctx->qmax);
188     }
189 }
190
191 static void copy_picture_attributes(MpegEncContext *s, AVFrame *dst,
192                                     const AVFrame *src)
193 {
194     dst->pict_type              = src->pict_type;
195     dst->quality                = src->quality;
196     dst->coded_picture_number   = src->coded_picture_number;
197     dst->display_picture_number = src->display_picture_number;
198     //dst->reference              = src->reference;
199     dst->pts                    = src->pts;
200     dst->interlaced_frame       = src->interlaced_frame;
201     dst->top_field_first        = src->top_field_first;
202 }
203
204 static void update_duplicate_context_after_me(MpegEncContext *dst,
205                                               MpegEncContext *src)
206 {
207 #define COPY(a) dst->a= src->a
208     COPY(pict_type);
209     COPY(current_picture);
210     COPY(f_code);
211     COPY(b_code);
212     COPY(qscale);
213     COPY(lambda);
214     COPY(lambda2);
215     COPY(picture_in_gop_number);
216     COPY(gop_picture_number);
217     COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
218     COPY(progressive_frame);    // FIXME don't set in encode_header
219     COPY(partitioned_frame);    // FIXME don't set in encode_header
220 #undef COPY
221 }
222
223 /**
224  * Set the given MpegEncContext to defaults for encoding.
225  * the changed fields will not depend upon the prior state of the MpegEncContext.
226  */
227 static void MPV_encode_defaults(MpegEncContext *s)
228 {
229     int i;
230     ff_MPV_common_defaults(s);
231
232     for (i = -16; i < 16; i++) {
233         default_fcode_tab[i + MAX_MV] = 1;
234     }
235     s->me.mv_penalty = default_mv_penalty;
236     s->fcode_tab     = default_fcode_tab;
237 }
238
239 /* init video encoder */
240 av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
241 {
242     MpegEncContext *s = avctx->priv_data;
243     int i;
244     int chroma_h_shift, chroma_v_shift;
245
246     MPV_encode_defaults(s);
247
248     switch (avctx->codec_id) {
249     case AV_CODEC_ID_MPEG2VIDEO:
250         if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
251             avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
252             av_log(avctx, AV_LOG_ERROR,
253                    "only YUV420 and YUV422 are supported\n");
254             return -1;
255         }
256         break;
257     case AV_CODEC_ID_LJPEG:
258         if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
259             avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
260             avctx->pix_fmt != AV_PIX_FMT_YUVJ444P &&
261             avctx->pix_fmt != AV_PIX_FMT_BGRA     &&
262             ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
263               avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
264               avctx->pix_fmt != AV_PIX_FMT_YUV444P) ||
265              avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
266             av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
267             return -1;
268         }
269         break;
270     case AV_CODEC_ID_MJPEG:
271         if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
272             avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
273             ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
274               avctx->pix_fmt != AV_PIX_FMT_YUV422P) ||
275              avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
276             av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
277             return -1;
278         }
279         break;
280     default:
281         if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
282             av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
283             return -1;
284         }
285     }
286
287     switch (avctx->pix_fmt) {
288     case AV_PIX_FMT_YUVJ422P:
289     case AV_PIX_FMT_YUV422P:
290         s->chroma_format = CHROMA_422;
291         break;
292     case AV_PIX_FMT_YUVJ420P:
293     case AV_PIX_FMT_YUV420P:
294     default:
295         s->chroma_format = CHROMA_420;
296         break;
297     }
298
299     s->bit_rate = avctx->bit_rate;
300     s->width    = avctx->width;
301     s->height   = avctx->height;
302     if (avctx->gop_size > 600 &&
303         avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
304         av_log(avctx, AV_LOG_ERROR,
305                "Warning keyframe interval too large! reducing it ...\n");
306         avctx->gop_size = 600;
307     }
308     s->gop_size     = avctx->gop_size;
309     s->avctx        = avctx;
310     s->flags        = avctx->flags;
311     s->flags2       = avctx->flags2;
312     s->max_b_frames = avctx->max_b_frames;
313     s->codec_id     = avctx->codec->id;
314 #if FF_API_MPV_GLOBAL_OPTS
315     if (avctx->luma_elim_threshold)
316         s->luma_elim_threshold   = avctx->luma_elim_threshold;
317     if (avctx->chroma_elim_threshold)
318         s->chroma_elim_threshold = avctx->chroma_elim_threshold;
319 #endif
320     s->strict_std_compliance = avctx->strict_std_compliance;
321     s->quarter_sample     = (avctx->flags & CODEC_FLAG_QPEL) != 0;
322     s->mpeg_quant         = avctx->mpeg_quant;
323     s->rtp_mode           = !!avctx->rtp_payload_size;
324     s->intra_dc_precision = avctx->intra_dc_precision;
325     s->user_specified_pts = AV_NOPTS_VALUE;
326
327     if (s->gop_size <= 1) {
328         s->intra_only = 1;
329         s->gop_size   = 12;
330     } else {
331         s->intra_only = 0;
332     }
333
334     s->me_method = avctx->me_method;
335
336     /* Fixed QSCALE */
337     s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
338
339 #if FF_API_MPV_GLOBAL_OPTS
340     if (s->flags & CODEC_FLAG_QP_RD)
341         s->mpv_flags |= FF_MPV_FLAG_QP_RD;
342 #endif
343
344     s->adaptive_quant = (s->avctx->lumi_masking ||
345                          s->avctx->dark_masking ||
346                          s->avctx->temporal_cplx_masking ||
347                          s->avctx->spatial_cplx_masking  ||
348                          s->avctx->p_masking      ||
349                          s->avctx->border_masking ||
350                          (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
351                         !s->fixed_qscale;
352
353     s->loop_filter      = !!(s->flags & CODEC_FLAG_LOOP_FILTER);
354
355     if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
356         av_log(avctx, AV_LOG_ERROR,
357                "a vbv buffer size is needed, "
358                "for encoding with a maximum bitrate\n");
359         return -1;
360     }
361
362     if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
363         av_log(avctx, AV_LOG_INFO,
364                "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
365     }
366
367     if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
368         av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
369         return -1;
370     }
371
372     if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
373         av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
374         return -1;
375     }
376
377     if (avctx->rc_max_rate &&
378         avctx->rc_max_rate == avctx->bit_rate &&
379         avctx->rc_max_rate != avctx->rc_min_rate) {
380         av_log(avctx, AV_LOG_INFO,
381                "impossible bitrate constraints, this will fail\n");
382     }
383
384     if (avctx->rc_buffer_size &&
385         avctx->bit_rate * (int64_t)avctx->time_base.num >
386             avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
387         av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
388         return -1;
389     }
390
391     if (!s->fixed_qscale &&
392         avctx->bit_rate * av_q2d(avctx->time_base) >
393             avctx->bit_rate_tolerance) {
394         av_log(avctx, AV_LOG_ERROR,
395                "bitrate tolerance too small for bitrate\n");
396         return -1;
397     }
398
399     if (s->avctx->rc_max_rate &&
400         s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
401         (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
402          s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
403         90000LL * (avctx->rc_buffer_size - 1) >
404             s->avctx->rc_max_rate * 0xFFFFLL) {
405         av_log(avctx, AV_LOG_INFO,
406                "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
407                "specified vbv buffer is too large for the given bitrate!\n");
408     }
409
410     if ((s->flags & CODEC_FLAG_4MV)  && s->codec_id != AV_CODEC_ID_MPEG4 &&
411         s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
412         s->codec_id != AV_CODEC_ID_FLV1) {
413         av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
414         return -1;
415     }
416
417     if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
418         av_log(avctx, AV_LOG_ERROR,
419                "OBMC is only supported with simple mb decision\n");
420         return -1;
421     }
422
423     if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
424         av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
425         return -1;
426     }
427
428     if (s->max_b_frames                    &&
429         s->codec_id != AV_CODEC_ID_MPEG4      &&
430         s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
431         s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
432         av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
433         return -1;
434     }
435
436     if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
437          s->codec_id == AV_CODEC_ID_H263  ||
438          s->codec_id == AV_CODEC_ID_H263P) &&
439         (avctx->sample_aspect_ratio.num > 255 ||
440          avctx->sample_aspect_ratio.den > 255)) {
441         av_log(avctx, AV_LOG_ERROR,
442                "Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
443                avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
444         return -1;
445     }
446
447     if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) &&
448         s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
449         av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
450         return -1;
451     }
452
453     // FIXME mpeg2 uses that too
454     if (s->mpeg_quant && s->codec_id != AV_CODEC_ID_MPEG4) {
455         av_log(avctx, AV_LOG_ERROR,
456                "mpeg2 style quantization not supported by codec\n");
457         return -1;
458     }
459
460 #if FF_API_MPV_GLOBAL_OPTS
461     if (s->flags & CODEC_FLAG_CBP_RD)
462         s->mpv_flags |= FF_MPV_FLAG_CBP_RD;
463 #endif
464
465     if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
466         av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
467         return -1;
468     }
469
470     if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
471         s->avctx->mb_decision != FF_MB_DECISION_RD) {
472         av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
473         return -1;
474     }
475
476     if (s->avctx->scenechange_threshold < 1000000000 &&
477         (s->flags & CODEC_FLAG_CLOSED_GOP)) {
478         av_log(avctx, AV_LOG_ERROR,
479                "closed gop with scene change detection are not supported yet, "
480                "set threshold to 1000000000\n");
481         return -1;
482     }
483
484     if (s->flags & CODEC_FLAG_LOW_DELAY) {
485         if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
486             av_log(avctx, AV_LOG_ERROR,
487                   "low delay forcing is only available for mpeg2\n");
488             return -1;
489         }
490         if (s->max_b_frames != 0) {
491             av_log(avctx, AV_LOG_ERROR,
492                    "b frames cannot be used with low delay\n");
493             return -1;
494         }
495     }
496
497     if (s->q_scale_type == 1) {
498         if (avctx->qmax > 12) {
499             av_log(avctx, AV_LOG_ERROR,
500                    "non linear quant only supports qmax <= 12 currently\n");
501             return -1;
502         }
503     }
504
505     if (s->avctx->thread_count > 1         &&
506         s->codec_id != AV_CODEC_ID_MPEG4      &&
507         s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
508         s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
509         (s->codec_id != AV_CODEC_ID_H263P)) {
510         av_log(avctx, AV_LOG_ERROR,
511                "multi threaded encoding not supported by codec\n");
512         return -1;
513     }
514
515     if (s->avctx->thread_count < 1) {
516         av_log(avctx, AV_LOG_ERROR,
517                "automatic thread number detection not supported by codec,"
518                "patch welcome\n");
519         return -1;
520     }
521
522     if (s->avctx->thread_count > 1)
523         s->rtp_mode = 1;
524
525     if (!avctx->time_base.den || !avctx->time_base.num) {
526         av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
527         return -1;
528     }
529
530     i = (INT_MAX / 2 + 128) >> 8;
531     if (avctx->mb_threshold >= i) {
532         av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
533                i - 1);
534         return -1;
535     }
536
537     if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
538         av_log(avctx, AV_LOG_INFO,
539                "notice: b_frame_strategy only affects the first pass\n");
540         avctx->b_frame_strategy = 0;
541     }
542
543     i = av_gcd(avctx->time_base.den, avctx->time_base.num);
544     if (i > 1) {
545         av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
546         avctx->time_base.den /= i;
547         avctx->time_base.num /= i;
548         //return -1;
549     }
550
551     if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
552         s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG) {
553         // (a + x * 3 / 8) / x
554         s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
555         s->inter_quant_bias = 0;
556     } else {
557         s->intra_quant_bias = 0;
558         // (a - x / 4) / x
559         s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
560     }
561
562     if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
563         s->intra_quant_bias = avctx->intra_quant_bias;
564     if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
565         s->inter_quant_bias = avctx->inter_quant_bias;
566
567     av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift,
568                                      &chroma_v_shift);
569
570     if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
571         s->avctx->time_base.den > (1 << 16) - 1) {
572         av_log(avctx, AV_LOG_ERROR,
573                "timebase %d/%d not supported by MPEG 4 standard, "
574                "the maximum admitted value for the timebase denominator "
575                "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
576                (1 << 16) - 1);
577         return -1;
578     }
579     s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
580
581 #if FF_API_MPV_GLOBAL_OPTS
582     if (avctx->flags2 & CODEC_FLAG2_SKIP_RD)
583         s->mpv_flags |= FF_MPV_FLAG_SKIP_RD;
584     if (avctx->flags2 & CODEC_FLAG2_STRICT_GOP)
585         s->mpv_flags |= FF_MPV_FLAG_STRICT_GOP;
586     if (avctx->quantizer_noise_shaping)
587         s->quantizer_noise_shaping = avctx->quantizer_noise_shaping;
588 #endif
589
590     switch (avctx->codec->id) {
591     case AV_CODEC_ID_MPEG1VIDEO:
592         s->out_format = FMT_MPEG1;
593         s->low_delay  = !!(s->flags & CODEC_FLAG_LOW_DELAY);
594         avctx->delay  = s->low_delay ? 0 : (s->max_b_frames + 1);
595         break;
596     case AV_CODEC_ID_MPEG2VIDEO:
597         s->out_format = FMT_MPEG1;
598         s->low_delay  = !!(s->flags & CODEC_FLAG_LOW_DELAY);
599         avctx->delay  = s->low_delay ? 0 : (s->max_b_frames + 1);
600         s->rtp_mode   = 1;
601         break;
602     case AV_CODEC_ID_LJPEG:
603     case AV_CODEC_ID_MJPEG:
604         s->out_format = FMT_MJPEG;
605         s->intra_only = 1; /* force intra only for jpeg */
606         if (avctx->codec->id == AV_CODEC_ID_LJPEG &&
607             avctx->pix_fmt   == AV_PIX_FMT_BGRA) {
608             s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
609             s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
610             s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
611         } else {
612             s->mjpeg_vsample[0] = 2;
613             s->mjpeg_vsample[1] = 2 >> chroma_v_shift;
614             s->mjpeg_vsample[2] = 2 >> chroma_v_shift;
615             s->mjpeg_hsample[0] = 2;
616             s->mjpeg_hsample[1] = 2 >> chroma_h_shift;
617             s->mjpeg_hsample[2] = 2 >> chroma_h_shift;
618         }
619         if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) ||
620             ff_mjpeg_encode_init(s) < 0)
621             return -1;
622         avctx->delay = 0;
623         s->low_delay = 1;
624         break;
625     case AV_CODEC_ID_H261:
626         if (!CONFIG_H261_ENCODER)
627             return -1;
628         if (ff_h261_get_picture_format(s->width, s->height) < 0) {
629             av_log(avctx, AV_LOG_ERROR,
630                    "The specified picture size of %dx%d is not valid for the "
631                    "H.261 codec.\nValid sizes are 176x144, 352x288\n",
632                     s->width, s->height);
633             return -1;
634         }
635         s->out_format = FMT_H261;
636         avctx->delay  = 0;
637         s->low_delay  = 1;
638         break;
639     case AV_CODEC_ID_H263:
640         if (!CONFIG_H263_ENCODER)
641         return -1;
642         if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
643                              s->width, s->height) == 8) {
644             av_log(avctx, AV_LOG_INFO,
645                    "The specified picture size of %dx%d is not valid for "
646                    "the H.263 codec.\nValid sizes are 128x96, 176x144, "
647                    "352x288, 704x576, and 1408x1152."
648                    "Try H.263+.\n", s->width, s->height);
649             return -1;
650         }
651         s->out_format = FMT_H263;
652         avctx->delay  = 0;
653         s->low_delay  = 1;
654         break;
655     case AV_CODEC_ID_H263P:
656         s->out_format = FMT_H263;
657         s->h263_plus  = 1;
658         /* Fx */
659         s->h263_aic        = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
660         s->modified_quant  = s->h263_aic;
661         s->loop_filter     = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
662         s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
663
664         /* /Fx */
665         /* These are just to be sure */
666         avctx->delay = 0;
667         s->low_delay = 1;
668         break;
669     case AV_CODEC_ID_FLV1:
670         s->out_format      = FMT_H263;
671         s->h263_flv        = 2; /* format = 1; 11-bit codes */
672         s->unrestricted_mv = 1;
673         s->rtp_mode  = 0; /* don't allow GOB */
674         avctx->delay = 0;
675         s->low_delay = 1;
676         break;
677     case AV_CODEC_ID_RV10:
678         s->out_format = FMT_H263;
679         avctx->delay  = 0;
680         s->low_delay  = 1;
681         break;
682     case AV_CODEC_ID_RV20:
683         s->out_format      = FMT_H263;
684         avctx->delay       = 0;
685         s->low_delay       = 1;
686         s->modified_quant  = 1;
687         s->h263_aic        = 1;
688         s->h263_plus       = 1;
689         s->loop_filter     = 1;
690         s->unrestricted_mv = 0;
691         break;
692     case AV_CODEC_ID_MPEG4:
693         s->out_format      = FMT_H263;
694         s->h263_pred       = 1;
695         s->unrestricted_mv = 1;
696         s->low_delay       = s->max_b_frames ? 0 : 1;
697         avctx->delay       = s->low_delay ? 0 : (s->max_b_frames + 1);
698         break;
699     case AV_CODEC_ID_MSMPEG4V2:
700         s->out_format      = FMT_H263;
701         s->h263_pred       = 1;
702         s->unrestricted_mv = 1;
703         s->msmpeg4_version = 2;
704         avctx->delay       = 0;
705         s->low_delay       = 1;
706         break;
707     case AV_CODEC_ID_MSMPEG4V3:
708         s->out_format        = FMT_H263;
709         s->h263_pred         = 1;
710         s->unrestricted_mv   = 1;
711         s->msmpeg4_version   = 3;
712         s->flipflop_rounding = 1;
713         avctx->delay         = 0;
714         s->low_delay         = 1;
715         break;
716     case AV_CODEC_ID_WMV1:
717         s->out_format        = FMT_H263;
718         s->h263_pred         = 1;
719         s->unrestricted_mv   = 1;
720         s->msmpeg4_version   = 4;
721         s->flipflop_rounding = 1;
722         avctx->delay         = 0;
723         s->low_delay         = 1;
724         break;
725     case AV_CODEC_ID_WMV2:
726         s->out_format        = FMT_H263;
727         s->h263_pred         = 1;
728         s->unrestricted_mv   = 1;
729         s->msmpeg4_version   = 5;
730         s->flipflop_rounding = 1;
731         avctx->delay         = 0;
732         s->low_delay         = 1;
733         break;
734     default:
735         return -1;
736     }
737
738     avctx->has_b_frames = !s->low_delay;
739
740     s->encoding = 1;
741
742     s->progressive_frame    =
743     s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
744                                                 CODEC_FLAG_INTERLACED_ME) ||
745                                 s->alternate_scan);
746
747     /* init */
748     if (ff_MPV_common_init(s) < 0)
749         return -1;
750
751     if (ARCH_X86)
752         ff_MPV_encode_init_x86(s);
753
754     if (!s->dct_quantize)
755         s->dct_quantize = ff_dct_quantize_c;
756     if (!s->denoise_dct)
757         s->denoise_dct  = denoise_dct_c;
758     s->fast_dct_quantize = s->dct_quantize;
759     if (avctx->trellis)
760         s->dct_quantize  = dct_quantize_trellis_c;
761
762     if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
763         s->chroma_qscale_table = ff_h263_chroma_qscale_table;
764
765     s->quant_precision = 5;
766
767     ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
768     ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
769
770     if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
771         ff_h261_encode_init(s);
772     if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
773         ff_h263_encode_init(s);
774     if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
775         ff_msmpeg4_encode_init(s);
776     if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
777         && s->out_format == FMT_MPEG1)
778         ff_mpeg1_encode_init(s);
779
780     /* init q matrix */
781     for (i = 0; i < 64; i++) {
782         int j = s->dsp.idct_permutation[i];
783         if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
784             s->mpeg_quant) {
785             s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
786             s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
787         } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
788             s->intra_matrix[j] =
789             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
790         } else {
791             /* mpeg1/2 */
792             s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
793             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
794         }
795         if (s->avctx->intra_matrix)
796             s->intra_matrix[j] = s->avctx->intra_matrix[i];
797         if (s->avctx->inter_matrix)
798             s->inter_matrix[j] = s->avctx->inter_matrix[i];
799     }
800
801     /* precompute matrix */
802     /* for mjpeg, we do include qscale in the matrix */
803     if (s->out_format != FMT_MJPEG) {
804         ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
805                           s->intra_matrix, s->intra_quant_bias, avctx->qmin,
806                           31, 1);
807         ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
808                           s->inter_matrix, s->inter_quant_bias, avctx->qmin,
809                           31, 0);
810     }
811
812     if (ff_rate_control_init(s) < 0)
813         return -1;
814
815     return 0;
816 }
817
818 av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
819 {
820     MpegEncContext *s = avctx->priv_data;
821
822     ff_rate_control_uninit(s);
823
824     ff_MPV_common_end(s);
825     if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
826         s->out_format == FMT_MJPEG)
827         ff_mjpeg_encode_close(s);
828
829     av_freep(&avctx->extradata);
830
831     return 0;
832 }
833
834 static int get_sae(uint8_t *src, int ref, int stride)
835 {
836     int x,y;
837     int acc = 0;
838
839     for (y = 0; y < 16; y++) {
840         for (x = 0; x < 16; x++) {
841             acc += FFABS(src[x + y * stride] - ref);
842         }
843     }
844
845     return acc;
846 }
847
848 static int get_intra_count(MpegEncContext *s, uint8_t *src,
849                            uint8_t *ref, int stride)
850 {
851     int x, y, w, h;
852     int acc = 0;
853
854     w = s->width  & ~15;
855     h = s->height & ~15;
856
857     for (y = 0; y < h; y += 16) {
858         for (x = 0; x < w; x += 16) {
859             int offset = x + y * stride;
860             int sad  = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
861                                      16);
862             int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
863             int sae  = get_sae(src + offset, mean, stride);
864
865             acc += sae + 500 < sad;
866         }
867     }
868     return acc;
869 }
870
871
872 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
873 {
874     AVFrame *pic = NULL;
875     int64_t pts;
876     int i, display_picture_number = 0;
877     const int encoding_delay = s->max_b_frames ? s->max_b_frames :
878                                                  (s->low_delay ? 0 : 1);
879     int direct = 1;
880
881     if (pic_arg) {
882         pts = pic_arg->pts;
883         display_picture_number = s->input_picture_number++;
884
885         if (pts != AV_NOPTS_VALUE) {
886             if (s->user_specified_pts != AV_NOPTS_VALUE) {
887                 int64_t time = pts;
888                 int64_t last = s->user_specified_pts;
889
890                 if (time <= last) {
891                     av_log(s->avctx, AV_LOG_ERROR,
892                            "Error, Invalid timestamp=%"PRId64", "
893                            "last=%"PRId64"\n", pts, s->user_specified_pts);
894                     return -1;
895                 }
896
897                 if (!s->low_delay && display_picture_number == 1)
898                     s->dts_delta = time - last;
899             }
900             s->user_specified_pts = pts;
901         } else {
902             if (s->user_specified_pts != AV_NOPTS_VALUE) {
903                 s->user_specified_pts =
904                 pts = s->user_specified_pts + 1;
905                 av_log(s->avctx, AV_LOG_INFO,
906                        "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
907                        pts);
908             } else {
909                 pts = display_picture_number;
910             }
911         }
912     }
913
914     if (pic_arg) {
915         if (encoding_delay && !(s->flags & CODEC_FLAG_INPUT_PRESERVED))
916             direct = 0;
917         if (pic_arg->linesize[0] != s->linesize)
918             direct = 0;
919         if (pic_arg->linesize[1] != s->uvlinesize)
920             direct = 0;
921         if (pic_arg->linesize[2] != s->uvlinesize)
922             direct = 0;
923
924         av_dlog(s->avctx, "%d %d %d %d\n", pic_arg->linesize[0],
925                 pic_arg->linesize[1], s->linesize, s->uvlinesize);
926
927         if (direct) {
928             i = ff_find_unused_picture(s, 1);
929             if (i < 0)
930                 return i;
931
932             pic = &s->picture[i].f;
933             pic->reference = 3;
934
935             for (i = 0; i < 4; i++) {
936                 pic->data[i]     = pic_arg->data[i];
937                 pic->linesize[i] = pic_arg->linesize[i];
938             }
939             if (ff_alloc_picture(s, (Picture *) pic, 1) < 0) {
940                 return -1;
941             }
942         } else {
943             i = ff_find_unused_picture(s, 0);
944             if (i < 0)
945                 return i;
946
947             pic = &s->picture[i].f;
948             pic->reference = 3;
949
950             if (ff_alloc_picture(s, (Picture *) pic, 0) < 0) {
951                 return -1;
952             }
953
954             if (pic->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
955                 pic->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
956                 pic->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
957                 // empty
958             } else {
959                 int h_chroma_shift, v_chroma_shift;
960                 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
961                                                  &h_chroma_shift,
962                                                  &v_chroma_shift);
963
964                 for (i = 0; i < 3; i++) {
965                     int src_stride = pic_arg->linesize[i];
966                     int dst_stride = i ? s->uvlinesize : s->linesize;
967                     int h_shift = i ? h_chroma_shift : 0;
968                     int v_shift = i ? v_chroma_shift : 0;
969                     int w = s->width  >> h_shift;
970                     int h = s->height >> v_shift;
971                     uint8_t *src = pic_arg->data[i];
972                     uint8_t *dst = pic->data[i];
973
974                     if (!s->avctx->rc_buffer_size)
975                         dst += INPLACE_OFFSET;
976
977                     if (src_stride == dst_stride)
978                         memcpy(dst, src, src_stride * h);
979                     else {
980                         while (h--) {
981                             memcpy(dst, src, w);
982                             dst += dst_stride;
983                             src += src_stride;
984                         }
985                     }
986                 }
987             }
988         }
989         copy_picture_attributes(s, pic, pic_arg);
990         pic->display_picture_number = display_picture_number;
991         pic->pts = pts; // we set this here to avoid modifiying pic_arg
992     }
993
994     /* shift buffer entries */
995     for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
996         s->input_picture[i - 1] = s->input_picture[i];
997
998     s->input_picture[encoding_delay] = (Picture*) pic;
999
1000     return 0;
1001 }
1002
1003 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1004 {
1005     int x, y, plane;
1006     int score = 0;
1007     int64_t score64 = 0;
1008
1009     for (plane = 0; plane < 3; plane++) {
1010         const int stride = p->f.linesize[plane];
1011         const int bw = plane ? 1 : 2;
1012         for (y = 0; y < s->mb_height * bw; y++) {
1013             for (x = 0; x < s->mb_width * bw; x++) {
1014                 int off = p->f.type == FF_BUFFER_TYPE_SHARED ? 0 : 16;
1015                 uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
1016                 uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
1017                 int v   = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1018
1019                 switch (s->avctx->frame_skip_exp) {
1020                 case 0: score    =  FFMAX(score, v);          break;
1021                 case 1: score   += FFABS(v);                  break;
1022                 case 2: score   += v * v;                     break;
1023                 case 3: score64 += FFABS(v * v * (int64_t)v); break;
1024                 case 4: score64 += v * v * (int64_t)(v * v);  break;
1025                 }
1026             }
1027         }
1028     }
1029
1030     if (score)
1031         score64 = score;
1032
1033     if (score64 < s->avctx->frame_skip_threshold)
1034         return 1;
1035     if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1036         return 1;
1037     return 0;
1038 }
1039
1040 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1041 {
1042     AVPacket pkt = { 0 };
1043     int ret, got_output;
1044
1045     av_init_packet(&pkt);
1046     ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1047     if (ret < 0)
1048         return ret;
1049
1050     ret = pkt.size;
1051     av_free_packet(&pkt);
1052     return ret;
1053 }
1054
1055 static int estimate_best_b_count(MpegEncContext *s)
1056 {
1057     AVCodec *codec    = avcodec_find_encoder(s->avctx->codec_id);
1058     AVCodecContext *c = avcodec_alloc_context3(NULL);
1059     AVFrame input[FF_MAX_B_FRAMES + 2];
1060     const int scale = s->avctx->brd_scale;
1061     int i, j, out_size, p_lambda, b_lambda, lambda2;
1062     int64_t best_rd  = INT64_MAX;
1063     int best_b_count = -1;
1064
1065     assert(scale >= 0 && scale <= 3);
1066
1067     //emms_c();
1068     //s->next_picture_ptr->quality;
1069     p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1070     //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1071     b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1072     if (!b_lambda) // FIXME we should do this somewhere else
1073         b_lambda = p_lambda;
1074     lambda2  = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1075                FF_LAMBDA_SHIFT;
1076
1077     c->width        = s->width  >> scale;
1078     c->height       = s->height >> scale;
1079     c->flags        = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR |
1080                       CODEC_FLAG_INPUT_PRESERVED /*| CODEC_FLAG_EMU_EDGE*/;
1081     c->flags       |= s->avctx->flags & CODEC_FLAG_QPEL;
1082     c->mb_decision  = s->avctx->mb_decision;
1083     c->me_cmp       = s->avctx->me_cmp;
1084     c->mb_cmp       = s->avctx->mb_cmp;
1085     c->me_sub_cmp   = s->avctx->me_sub_cmp;
1086     c->pix_fmt      = AV_PIX_FMT_YUV420P;
1087     c->time_base    = s->avctx->time_base;
1088     c->max_b_frames = s->max_b_frames;
1089
1090     if (avcodec_open2(c, codec, NULL) < 0)
1091         return -1;
1092
1093     for (i = 0; i < s->max_b_frames + 2; i++) {
1094         int ysize = c->width * c->height;
1095         int csize = (c->width / 2) * (c->height / 2);
1096         Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1097                                                 s->next_picture_ptr;
1098
1099         avcodec_get_frame_defaults(&input[i]);
1100         input[i].data[0]     = av_malloc(ysize + 2 * csize);
1101         input[i].data[1]     = input[i].data[0] + ysize;
1102         input[i].data[2]     = input[i].data[1] + csize;
1103         input[i].linesize[0] = c->width;
1104         input[i].linesize[1] =
1105         input[i].linesize[2] = c->width / 2;
1106
1107         if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1108             pre_input = *pre_input_ptr;
1109
1110             if (pre_input.f.type != FF_BUFFER_TYPE_SHARED && i) {
1111                 pre_input.f.data[0] += INPLACE_OFFSET;
1112                 pre_input.f.data[1] += INPLACE_OFFSET;
1113                 pre_input.f.data[2] += INPLACE_OFFSET;
1114             }
1115
1116             s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0],
1117                                  pre_input.f.data[0], pre_input.f.linesize[0],
1118                                  c->width,      c->height);
1119             s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1],
1120                                  pre_input.f.data[1], pre_input.f.linesize[1],
1121                                  c->width >> 1, c->height >> 1);
1122             s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2],
1123                                  pre_input.f.data[2], pre_input.f.linesize[2],
1124                                  c->width >> 1, c->height >> 1);
1125         }
1126     }
1127
1128     for (j = 0; j < s->max_b_frames + 1; j++) {
1129         int64_t rd = 0;
1130
1131         if (!s->input_picture[j])
1132             break;
1133
1134         c->error[0] = c->error[1] = c->error[2] = 0;
1135
1136         input[0].pict_type = AV_PICTURE_TYPE_I;
1137         input[0].quality   = 1 * FF_QP2LAMBDA;
1138
1139         out_size = encode_frame(c, &input[0]);
1140
1141         //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1142
1143         for (i = 0; i < s->max_b_frames + 1; i++) {
1144             int is_p = i % (j + 1) == j || i == s->max_b_frames;
1145
1146             input[i + 1].pict_type = is_p ?
1147                                      AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1148             input[i + 1].quality   = is_p ? p_lambda : b_lambda;
1149
1150             out_size = encode_frame(c, &input[i + 1]);
1151
1152             rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1153         }
1154
1155         /* get the delayed frames */
1156         while (out_size) {
1157             out_size = encode_frame(c, NULL);
1158             rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1159         }
1160
1161         rd += c->error[0] + c->error[1] + c->error[2];
1162
1163         if (rd < best_rd) {
1164             best_rd = rd;
1165             best_b_count = j;
1166         }
1167     }
1168
1169     avcodec_close(c);
1170     av_freep(&c);
1171
1172     for (i = 0; i < s->max_b_frames + 2; i++) {
1173         av_freep(&input[i].data[0]);
1174     }
1175
1176     return best_b_count;
1177 }
1178
1179 static int select_input_picture(MpegEncContext *s)
1180 {
1181     int i;
1182
1183     for (i = 1; i < MAX_PICTURE_COUNT; i++)
1184         s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1185     s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1186
1187     /* set next picture type & ordering */
1188     if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
1189         if (/*s->picture_in_gop_number >= s->gop_size ||*/
1190             s->next_picture_ptr == NULL || s->intra_only) {
1191             s->reordered_input_picture[0] = s->input_picture[0];
1192             s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I;
1193             s->reordered_input_picture[0]->f.coded_picture_number =
1194                 s->coded_picture_number++;
1195         } else {
1196             int b_frames;
1197
1198             if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1199                 if (s->picture_in_gop_number < s->gop_size &&
1200                     skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1201                     // FIXME check that te gop check above is +-1 correct
1202                     if (s->input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED) {
1203                         for (i = 0; i < 4; i++)
1204                             s->input_picture[0]->f.data[i] = NULL;
1205                         s->input_picture[0]->f.type = 0;
1206                     } else {
1207                         assert(s->input_picture[0]->f.type == FF_BUFFER_TYPE_USER ||
1208                                s->input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL);
1209
1210                         s->avctx->release_buffer(s->avctx,
1211                                                  &s->input_picture[0]->f);
1212                     }
1213
1214                     emms_c();
1215                     ff_vbv_update(s, 0);
1216
1217                     goto no_output_pic;
1218                 }
1219             }
1220
1221             if (s->flags & CODEC_FLAG_PASS2) {
1222                 for (i = 0; i < s->max_b_frames + 1; i++) {
1223                     int pict_num = s->input_picture[0]->f.display_picture_number + i;
1224
1225                     if (pict_num >= s->rc_context.num_entries)
1226                         break;
1227                     if (!s->input_picture[i]) {
1228                         s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1229                         break;
1230                     }
1231
1232                     s->input_picture[i]->f.pict_type =
1233                         s->rc_context.entry[pict_num].new_pict_type;
1234                 }
1235             }
1236
1237             if (s->avctx->b_frame_strategy == 0) {
1238                 b_frames = s->max_b_frames;
1239                 while (b_frames && !s->input_picture[b_frames])
1240                     b_frames--;
1241             } else if (s->avctx->b_frame_strategy == 1) {
1242                 for (i = 1; i < s->max_b_frames + 1; i++) {
1243                     if (s->input_picture[i] &&
1244                         s->input_picture[i]->b_frame_score == 0) {
1245                         s->input_picture[i]->b_frame_score =
1246                             get_intra_count(s,
1247                                             s->input_picture[i    ]->f.data[0],
1248                                             s->input_picture[i - 1]->f.data[0],
1249                                             s->linesize) + 1;
1250                     }
1251                 }
1252                 for (i = 0; i < s->max_b_frames + 1; i++) {
1253                     if (s->input_picture[i] == NULL ||
1254                         s->input_picture[i]->b_frame_score - 1 >
1255                             s->mb_num / s->avctx->b_sensitivity)
1256                         break;
1257                 }
1258
1259                 b_frames = FFMAX(0, i - 1);
1260
1261                 /* reset scores */
1262                 for (i = 0; i < b_frames + 1; i++) {
1263                     s->input_picture[i]->b_frame_score = 0;
1264                 }
1265             } else if (s->avctx->b_frame_strategy == 2) {
1266                 b_frames = estimate_best_b_count(s);
1267             } else {
1268                 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1269                 b_frames = 0;
1270             }
1271
1272             emms_c();
1273
1274             for (i = b_frames - 1; i >= 0; i--) {
1275                 int type = s->input_picture[i]->f.pict_type;
1276                 if (type && type != AV_PICTURE_TYPE_B)
1277                     b_frames = i;
1278             }
1279             if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
1280                 b_frames == s->max_b_frames) {
1281                 av_log(s->avctx, AV_LOG_ERROR,
1282                        "warning, too many b frames in a row\n");
1283             }
1284
1285             if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1286                 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1287                     s->gop_size > s->picture_in_gop_number) {
1288                     b_frames = s->gop_size - s->picture_in_gop_number - 1;
1289                 } else {
1290                     if (s->flags & CODEC_FLAG_CLOSED_GOP)
1291                         b_frames = 0;
1292                     s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
1293                 }
1294             }
1295
1296             if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
1297                 s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
1298                 b_frames--;
1299
1300             s->reordered_input_picture[0] = s->input_picture[b_frames];
1301             if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I)
1302                 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P;
1303             s->reordered_input_picture[0]->f.coded_picture_number =
1304                 s->coded_picture_number++;
1305             for (i = 0; i < b_frames; i++) {
1306                 s->reordered_input_picture[i + 1] = s->input_picture[i];
1307                 s->reordered_input_picture[i + 1]->f.pict_type =
1308                     AV_PICTURE_TYPE_B;
1309                 s->reordered_input_picture[i + 1]->f.coded_picture_number =
1310                     s->coded_picture_number++;
1311             }
1312         }
1313     }
1314 no_output_pic:
1315     if (s->reordered_input_picture[0]) {
1316         s->reordered_input_picture[0]->f.reference =
1317            s->reordered_input_picture[0]->f.pict_type !=
1318                AV_PICTURE_TYPE_B ? 3 : 0;
1319
1320         ff_copy_picture(&s->new_picture, s->reordered_input_picture[0]);
1321
1322         if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED ||
1323             s->avctx->rc_buffer_size) {
1324             // input is a shared pix, so we can't modifiy it -> alloc a new
1325             // one & ensure that the shared one is reuseable
1326
1327             Picture *pic;
1328             int i = ff_find_unused_picture(s, 0);
1329             if (i < 0)
1330                 return i;
1331             pic = &s->picture[i];
1332
1333             pic->f.reference = s->reordered_input_picture[0]->f.reference;
1334             if (ff_alloc_picture(s, pic, 0) < 0) {
1335                 return -1;
1336             }
1337
1338             /* mark us unused / free shared pic */
1339             if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL)
1340                 s->avctx->release_buffer(s->avctx,
1341                                          &s->reordered_input_picture[0]->f);
1342             for (i = 0; i < 4; i++)
1343                 s->reordered_input_picture[0]->f.data[i] = NULL;
1344             s->reordered_input_picture[0]->f.type = 0;
1345
1346             copy_picture_attributes(s, &pic->f,
1347                                     &s->reordered_input_picture[0]->f);
1348
1349             s->current_picture_ptr = pic;
1350         } else {
1351             // input is not a shared pix -> reuse buffer for current_pix
1352
1353             assert(s->reordered_input_picture[0]->f.type ==
1354                        FF_BUFFER_TYPE_USER ||
1355                    s->reordered_input_picture[0]->f.type ==
1356                        FF_BUFFER_TYPE_INTERNAL);
1357
1358             s->current_picture_ptr = s->reordered_input_picture[0];
1359             for (i = 0; i < 4; i++) {
1360                 s->new_picture.f.data[i] += INPLACE_OFFSET;
1361             }
1362         }
1363         ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1364
1365         s->picture_number = s->new_picture.f.display_picture_number;
1366     } else {
1367         memset(&s->new_picture, 0, sizeof(Picture));
1368     }
1369     return 0;
1370 }
1371
1372 int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1373                           const AVFrame *pic_arg, int *got_packet)
1374 {
1375     MpegEncContext *s = avctx->priv_data;
1376     int i, stuffing_count, ret;
1377     int context_count = s->slice_context_count;
1378
1379     s->picture_in_gop_number++;
1380
1381     if (load_input_picture(s, pic_arg) < 0)
1382         return -1;
1383
1384     if (select_input_picture(s) < 0) {
1385         return -1;
1386     }
1387
1388     /* output? */
1389     if (s->new_picture.f.data[0]) {
1390         if (!pkt->data &&
1391             (ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*MAX_MB_BYTES)) < 0)
1392             return ret;
1393         if (s->mb_info) {
1394             s->mb_info_ptr = av_packet_new_side_data(pkt,
1395                                  AV_PKT_DATA_H263_MB_INFO,
1396                                  s->mb_width*s->mb_height*12);
1397             s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1398         }
1399
1400         for (i = 0; i < context_count; i++) {
1401             int start_y = s->thread_context[i]->start_mb_y;
1402             int   end_y = s->thread_context[i]->  end_mb_y;
1403             int h       = s->mb_height;
1404             uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1405             uint8_t *end   = pkt->data + (size_t)(((int64_t) pkt->size) *   end_y / h);
1406
1407             init_put_bits(&s->thread_context[i]->pb, start, end - start);
1408         }
1409
1410         s->pict_type = s->new_picture.f.pict_type;
1411         //emms_c();
1412         ff_MPV_frame_start(s, avctx);
1413 vbv_retry:
1414         if (encode_picture(s, s->picture_number) < 0)
1415             return -1;
1416
1417         avctx->header_bits = s->header_bits;
1418         avctx->mv_bits     = s->mv_bits;
1419         avctx->misc_bits   = s->misc_bits;
1420         avctx->i_tex_bits  = s->i_tex_bits;
1421         avctx->p_tex_bits  = s->p_tex_bits;
1422         avctx->i_count     = s->i_count;
1423         // FIXME f/b_count in avctx
1424         avctx->p_count     = s->mb_num - s->i_count - s->skip_count;
1425         avctx->skip_count  = s->skip_count;
1426
1427         ff_MPV_frame_end(s);
1428
1429         if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1430             ff_mjpeg_encode_picture_trailer(s);
1431
1432         if (avctx->rc_buffer_size) {
1433             RateControlContext *rcc = &s->rc_context;
1434             int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
1435
1436             if (put_bits_count(&s->pb) > max_size &&
1437                 s->lambda < s->avctx->lmax) {
1438                 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1439                                        (s->qscale + 1) / s->qscale);
1440                 if (s->adaptive_quant) {
1441                     int i;
1442                     for (i = 0; i < s->mb_height * s->mb_stride; i++)
1443                         s->lambda_table[i] =
1444                             FFMAX(s->lambda_table[i] + 1,
1445                                   s->lambda_table[i] * (s->qscale + 1) /
1446                                   s->qscale);
1447                 }
1448                 s->mb_skipped = 0;        // done in MPV_frame_start()
1449                 // done in encode_picture() so we must undo it
1450                 if (s->pict_type == AV_PICTURE_TYPE_P) {
1451                     if (s->flipflop_rounding          ||
1452                         s->codec_id == AV_CODEC_ID_H263P ||
1453                         s->codec_id == AV_CODEC_ID_MPEG4)
1454                         s->no_rounding ^= 1;
1455                 }
1456                 if (s->pict_type != AV_PICTURE_TYPE_B) {
1457                     s->time_base       = s->last_time_base;
1458                     s->last_non_b_time = s->time - s->pp_time;
1459                 }
1460                 for (i = 0; i < context_count; i++) {
1461                     PutBitContext *pb = &s->thread_context[i]->pb;
1462                     init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1463                 }
1464                 goto vbv_retry;
1465             }
1466
1467             assert(s->avctx->rc_max_rate);
1468         }
1469
1470         if (s->flags & CODEC_FLAG_PASS1)
1471             ff_write_pass1_stats(s);
1472
1473         for (i = 0; i < 4; i++) {
1474             s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i];
1475             avctx->error[i] += s->current_picture_ptr->f.error[i];
1476         }
1477
1478         if (s->flags & CODEC_FLAG_PASS1)
1479             assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1480                    avctx->i_tex_bits + avctx->p_tex_bits ==
1481                        put_bits_count(&s->pb));
1482         flush_put_bits(&s->pb);
1483         s->frame_bits  = put_bits_count(&s->pb);
1484
1485         stuffing_count = ff_vbv_update(s, s->frame_bits);
1486         if (stuffing_count) {
1487             if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1488                     stuffing_count + 50) {
1489                 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1490                 return -1;
1491             }
1492
1493             switch (s->codec_id) {
1494             case AV_CODEC_ID_MPEG1VIDEO:
1495             case AV_CODEC_ID_MPEG2VIDEO:
1496                 while (stuffing_count--) {
1497                     put_bits(&s->pb, 8, 0);
1498                 }
1499             break;
1500             case AV_CODEC_ID_MPEG4:
1501                 put_bits(&s->pb, 16, 0);
1502                 put_bits(&s->pb, 16, 0x1C3);
1503                 stuffing_count -= 4;
1504                 while (stuffing_count--) {
1505                     put_bits(&s->pb, 8, 0xFF);
1506                 }
1507             break;
1508             default:
1509                 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1510             }
1511             flush_put_bits(&s->pb);
1512             s->frame_bits  = put_bits_count(&s->pb);
1513         }
1514
1515         /* update mpeg1/2 vbv_delay for CBR */
1516         if (s->avctx->rc_max_rate                          &&
1517             s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1518             s->out_format == FMT_MPEG1                     &&
1519             90000LL * (avctx->rc_buffer_size - 1) <=
1520                 s->avctx->rc_max_rate * 0xFFFFLL) {
1521             int vbv_delay, min_delay;
1522             double inbits  = s->avctx->rc_max_rate *
1523                              av_q2d(s->avctx->time_base);
1524             int    minbits = s->frame_bits - 8 *
1525                              (s->vbv_delay_ptr - s->pb.buf - 1);
1526             double bits    = s->rc_context.buffer_index + minbits - inbits;
1527
1528             if (bits < 0)
1529                 av_log(s->avctx, AV_LOG_ERROR,
1530                        "Internal error, negative bits\n");
1531
1532             assert(s->repeat_first_field == 0);
1533
1534             vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1535             min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1536                         s->avctx->rc_max_rate;
1537
1538             vbv_delay = FFMAX(vbv_delay, min_delay);
1539
1540             assert(vbv_delay < 0xFFFF);
1541
1542             s->vbv_delay_ptr[0] &= 0xF8;
1543             s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1544             s->vbv_delay_ptr[1]  = vbv_delay >> 5;
1545             s->vbv_delay_ptr[2] &= 0x07;
1546             s->vbv_delay_ptr[2] |= vbv_delay << 3;
1547             avctx->vbv_delay     = vbv_delay * 300;
1548         }
1549         s->total_bits     += s->frame_bits;
1550         avctx->frame_bits  = s->frame_bits;
1551
1552         pkt->pts = s->current_picture.f.pts;
1553         if (!s->low_delay) {
1554             if (!s->current_picture.f.coded_picture_number)
1555                 pkt->dts = pkt->pts - s->dts_delta;
1556             else
1557                 pkt->dts = s->reordered_pts;
1558             s->reordered_pts = s->input_picture[0]->f.pts;
1559         } else
1560             pkt->dts = pkt->pts;
1561         if (s->current_picture.f.key_frame)
1562             pkt->flags |= AV_PKT_FLAG_KEY;
1563         if (s->mb_info)
1564             av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
1565     } else {
1566         s->frame_bits = 0;
1567     }
1568     assert((s->frame_bits & 7) == 0);
1569
1570     pkt->size = s->frame_bits / 8;
1571     *got_packet = !!pkt->size;
1572     return 0;
1573 }
1574
1575 static inline void dct_single_coeff_elimination(MpegEncContext *s,
1576                                                 int n, int threshold)
1577 {
1578     static const char tab[64] = {
1579         3, 2, 2, 1, 1, 1, 1, 1,
1580         1, 1, 1, 1, 1, 1, 1, 1,
1581         1, 1, 1, 1, 1, 1, 1, 1,
1582         0, 0, 0, 0, 0, 0, 0, 0,
1583         0, 0, 0, 0, 0, 0, 0, 0,
1584         0, 0, 0, 0, 0, 0, 0, 0,
1585         0, 0, 0, 0, 0, 0, 0, 0,
1586         0, 0, 0, 0, 0, 0, 0, 0
1587     };
1588     int score = 0;
1589     int run = 0;
1590     int i;
1591     int16_t *block = s->block[n];
1592     const int last_index = s->block_last_index[n];
1593     int skip_dc;
1594
1595     if (threshold < 0) {
1596         skip_dc = 0;
1597         threshold = -threshold;
1598     } else
1599         skip_dc = 1;
1600
1601     /* Are all we could set to zero already zero? */
1602     if (last_index <= skip_dc - 1)
1603         return;
1604
1605     for (i = 0; i <= last_index; i++) {
1606         const int j = s->intra_scantable.permutated[i];
1607         const int level = FFABS(block[j]);
1608         if (level == 1) {
1609             if (skip_dc && i == 0)
1610                 continue;
1611             score += tab[run];
1612             run = 0;
1613         } else if (level > 1) {
1614             return;
1615         } else {
1616             run++;
1617         }
1618     }
1619     if (score >= threshold)
1620         return;
1621     for (i = skip_dc; i <= last_index; i++) {
1622         const int j = s->intra_scantable.permutated[i];
1623         block[j] = 0;
1624     }
1625     if (block[0])
1626         s->block_last_index[n] = 0;
1627     else
1628         s->block_last_index[n] = -1;
1629 }
1630
1631 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
1632                                int last_index)
1633 {
1634     int i;
1635     const int maxlevel = s->max_qcoeff;
1636     const int minlevel = s->min_qcoeff;
1637     int overflow = 0;
1638
1639     if (s->mb_intra) {
1640         i = 1; // skip clipping of intra dc
1641     } else
1642         i = 0;
1643
1644     for (; i <= last_index; i++) {
1645         const int j = s->intra_scantable.permutated[i];
1646         int level = block[j];
1647
1648         if (level > maxlevel) {
1649             level = maxlevel;
1650             overflow++;
1651         } else if (level < minlevel) {
1652             level = minlevel;
1653             overflow++;
1654         }
1655
1656         block[j] = level;
1657     }
1658
1659     if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
1660         av_log(s->avctx, AV_LOG_INFO,
1661                "warning, clipping %d dct coefficients to %d..%d\n",
1662                overflow, minlevel, maxlevel);
1663 }
1664
1665 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
1666 {
1667     int x, y;
1668     // FIXME optimize
1669     for (y = 0; y < 8; y++) {
1670         for (x = 0; x < 8; x++) {
1671             int x2, y2;
1672             int sum = 0;
1673             int sqr = 0;
1674             int count = 0;
1675
1676             for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
1677                 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
1678                     int v = ptr[x2 + y2 * stride];
1679                     sum += v;
1680                     sqr += v * v;
1681                     count++;
1682                 }
1683             }
1684             weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
1685         }
1686     }
1687 }
1688
1689 static av_always_inline void encode_mb_internal(MpegEncContext *s,
1690                                                 int motion_x, int motion_y,
1691                                                 int mb_block_height,
1692                                                 int mb_block_count)
1693 {
1694     int16_t weight[8][64];
1695     int16_t orig[8][64];
1696     const int mb_x = s->mb_x;
1697     const int mb_y = s->mb_y;
1698     int i;
1699     int skip_dct[8];
1700     int dct_offset = s->linesize * 8; // default for progressive frames
1701     uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1702     int wrap_y, wrap_c;
1703
1704     for (i = 0; i < mb_block_count; i++)
1705         skip_dct[i] = s->skipdct;
1706
1707     if (s->adaptive_quant) {
1708         const int last_qp = s->qscale;
1709         const int mb_xy = mb_x + mb_y * s->mb_stride;
1710
1711         s->lambda = s->lambda_table[mb_xy];
1712         update_qscale(s);
1713
1714         if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
1715             s->qscale = s->current_picture_ptr->f.qscale_table[mb_xy];
1716             s->dquant = s->qscale - last_qp;
1717
1718             if (s->out_format == FMT_H263) {
1719                 s->dquant = av_clip(s->dquant, -2, 2);
1720
1721                 if (s->codec_id == AV_CODEC_ID_MPEG4) {
1722                     if (!s->mb_intra) {
1723                         if (s->pict_type == AV_PICTURE_TYPE_B) {
1724                             if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
1725                                 s->dquant = 0;
1726                         }
1727                         if (s->mv_type == MV_TYPE_8X8)
1728                             s->dquant = 0;
1729                     }
1730                 }
1731             }
1732         }
1733         ff_set_qscale(s, last_qp + s->dquant);
1734     } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
1735         ff_set_qscale(s, s->qscale + s->dquant);
1736
1737     wrap_y = s->linesize;
1738     wrap_c = s->uvlinesize;
1739     ptr_y  = s->new_picture.f.data[0] +
1740              (mb_y * 16 * wrap_y)              + mb_x * 16;
1741     ptr_cb = s->new_picture.f.data[1] +
1742              (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1743     ptr_cr = s->new_picture.f.data[2] +
1744              (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1745
1746     if (mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) {
1747         uint8_t *ebuf = s->edge_emu_buffer + 32;
1748         s->vdsp.emulated_edge_mc(ebuf, ptr_y, wrap_y, 16, 16, mb_x * 16,
1749                                  mb_y * 16, s->width, s->height);
1750         ptr_y = ebuf;
1751         s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb, wrap_c, 8,
1752                                  mb_block_height, mb_x * 8, mb_y * 8,
1753                                  s->width >> 1, s->height >> 1);
1754         ptr_cb = ebuf + 18 * wrap_y;
1755         s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr, wrap_c, 8,
1756                                  mb_block_height, mb_x * 8, mb_y * 8,
1757                                  s->width >> 1, s->height >> 1);
1758         ptr_cr = ebuf + 18 * wrap_y + 8;
1759     }
1760
1761     if (s->mb_intra) {
1762         if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1763             int progressive_score, interlaced_score;
1764
1765             s->interlaced_dct = 0;
1766             progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
1767                                                     NULL, wrap_y, 8) +
1768                                 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
1769                                                     NULL, wrap_y, 8) - 400;
1770
1771             if (progressive_score > 0) {
1772                 interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
1773                                                        NULL, wrap_y * 2, 8) +
1774                                    s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
1775                                                        NULL, wrap_y * 2, 8);
1776                 if (progressive_score > interlaced_score) {
1777                     s->interlaced_dct = 1;
1778
1779                     dct_offset = wrap_y;
1780                     wrap_y <<= 1;
1781                     if (s->chroma_format == CHROMA_422)
1782                         wrap_c <<= 1;
1783                 }
1784             }
1785         }
1786
1787         s->dsp.get_pixels(s->block[0], ptr_y                  , wrap_y);
1788         s->dsp.get_pixels(s->block[1], ptr_y              + 8 , wrap_y);
1789         s->dsp.get_pixels(s->block[2], ptr_y + dct_offset     , wrap_y);
1790         s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
1791
1792         if (s->flags & CODEC_FLAG_GRAY) {
1793             skip_dct[4] = 1;
1794             skip_dct[5] = 1;
1795         } else {
1796             s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
1797             s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
1798             if (!s->chroma_y_shift) { /* 422 */
1799                 s->dsp.get_pixels(s->block[6],
1800                                   ptr_cb + (dct_offset >> 1), wrap_c);
1801                 s->dsp.get_pixels(s->block[7],
1802                                   ptr_cr + (dct_offset >> 1), wrap_c);
1803             }
1804         }
1805     } else {
1806         op_pixels_func (*op_pix)[4];
1807         qpel_mc_func (*op_qpix)[16];
1808         uint8_t *dest_y, *dest_cb, *dest_cr;
1809
1810         dest_y  = s->dest[0];
1811         dest_cb = s->dest[1];
1812         dest_cr = s->dest[2];
1813
1814         if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
1815             op_pix  = s->dsp.put_pixels_tab;
1816             op_qpix = s->dsp.put_qpel_pixels_tab;
1817         } else {
1818             op_pix  = s->dsp.put_no_rnd_pixels_tab;
1819             op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
1820         }
1821
1822         if (s->mv_dir & MV_DIR_FORWARD) {
1823             ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0,
1824                           s->last_picture.f.data,
1825                           op_pix, op_qpix);
1826             op_pix  = s->dsp.avg_pixels_tab;
1827             op_qpix = s->dsp.avg_qpel_pixels_tab;
1828         }
1829         if (s->mv_dir & MV_DIR_BACKWARD) {
1830             ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1,
1831                           s->next_picture.f.data,
1832                           op_pix, op_qpix);
1833         }
1834
1835         if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1836             int progressive_score, interlaced_score;
1837
1838             s->interlaced_dct = 0;
1839             progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
1840                                                     ptr_y,              wrap_y,
1841                                                     8) +
1842                                 s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
1843                                                     ptr_y + wrap_y * 8, wrap_y,
1844                                                     8) - 400;
1845
1846             if (s->avctx->ildct_cmp == FF_CMP_VSSE)
1847                 progressive_score -= 400;
1848
1849             if (progressive_score > 0) {
1850                 interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
1851                                                        ptr_y,
1852                                                        wrap_y * 2, 8) +
1853                                    s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
1854                                                        ptr_y + wrap_y,
1855                                                        wrap_y * 2, 8);
1856
1857                 if (progressive_score > interlaced_score) {
1858                     s->interlaced_dct = 1;
1859
1860                     dct_offset = wrap_y;
1861                     wrap_y <<= 1;
1862                     if (s->chroma_format == CHROMA_422)
1863                         wrap_c <<= 1;
1864                 }
1865             }
1866         }
1867
1868         s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
1869         s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
1870         s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
1871                            dest_y + dct_offset, wrap_y);
1872         s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
1873                            dest_y + dct_offset + 8, wrap_y);
1874
1875         if (s->flags & CODEC_FLAG_GRAY) {
1876             skip_dct[4] = 1;
1877             skip_dct[5] = 1;
1878         } else {
1879             s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
1880             s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
1881             if (!s->chroma_y_shift) { /* 422 */
1882                 s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1),
1883                                    dest_cb + (dct_offset >> 1), wrap_c);
1884                 s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1),
1885                                    dest_cr + (dct_offset >> 1), wrap_c);
1886             }
1887         }
1888         /* pre quantization */
1889         if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
1890                 2 * s->qscale * s->qscale) {
1891             // FIXME optimize
1892             if (s->dsp.sad[1](NULL, ptr_y , dest_y,
1893                               wrap_y, 8) < 20 * s->qscale)
1894                 skip_dct[0] = 1;
1895             if (s->dsp.sad[1](NULL, ptr_y + 8,
1896                               dest_y + 8, wrap_y, 8) < 20 * s->qscale)
1897                 skip_dct[1] = 1;
1898             if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
1899                               dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
1900                 skip_dct[2] = 1;
1901             if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
1902                               dest_y + dct_offset + 8,
1903                               wrap_y, 8) < 20 * s->qscale)
1904                 skip_dct[3] = 1;
1905             if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
1906                               wrap_c, 8) < 20 * s->qscale)
1907                 skip_dct[4] = 1;
1908             if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
1909                               wrap_c, 8) < 20 * s->qscale)
1910                 skip_dct[5] = 1;
1911             if (!s->chroma_y_shift) { /* 422 */
1912                 if (s->dsp.sad[1](NULL, ptr_cb + (dct_offset >> 1),
1913                                   dest_cb + (dct_offset >> 1),
1914                                   wrap_c, 8) < 20 * s->qscale)
1915                     skip_dct[6] = 1;
1916                 if (s->dsp.sad[1](NULL, ptr_cr + (dct_offset >> 1),
1917                                   dest_cr + (dct_offset >> 1),
1918                                   wrap_c, 8) < 20 * s->qscale)
1919                     skip_dct[7] = 1;
1920             }
1921         }
1922     }
1923
1924     if (s->quantizer_noise_shaping) {
1925         if (!skip_dct[0])
1926             get_visual_weight(weight[0], ptr_y                 , wrap_y);
1927         if (!skip_dct[1])
1928             get_visual_weight(weight[1], ptr_y              + 8, wrap_y);
1929         if (!skip_dct[2])
1930             get_visual_weight(weight[2], ptr_y + dct_offset    , wrap_y);
1931         if (!skip_dct[3])
1932             get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
1933         if (!skip_dct[4])
1934             get_visual_weight(weight[4], ptr_cb                , wrap_c);
1935         if (!skip_dct[5])
1936             get_visual_weight(weight[5], ptr_cr                , wrap_c);
1937         if (!s->chroma_y_shift) { /* 422 */
1938             if (!skip_dct[6])
1939                 get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1),
1940                                   wrap_c);
1941             if (!skip_dct[7])
1942                 get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1),
1943                                   wrap_c);
1944         }
1945         memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
1946     }
1947
1948     /* DCT & quantize */
1949     assert(s->out_format != FMT_MJPEG || s->qscale == 8);
1950     {
1951         for (i = 0; i < mb_block_count; i++) {
1952             if (!skip_dct[i]) {
1953                 int overflow;
1954                 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
1955                 // FIXME we could decide to change to quantizer instead of
1956                 // clipping
1957                 // JS: I don't think that would be a good idea it could lower
1958                 //     quality instead of improve it. Just INTRADC clipping
1959                 //     deserves changes in quantizer
1960                 if (overflow)
1961                     clip_coeffs(s, s->block[i], s->block_last_index[i]);
1962             } else
1963                 s->block_last_index[i] = -1;
1964         }
1965         if (s->quantizer_noise_shaping) {
1966             for (i = 0; i < mb_block_count; i++) {
1967                 if (!skip_dct[i]) {
1968                     s->block_last_index[i] =
1969                         dct_quantize_refine(s, s->block[i], weight[i],
1970                                             orig[i], i, s->qscale);
1971                 }
1972             }
1973         }
1974
1975         if (s->luma_elim_threshold && !s->mb_intra)
1976             for (i = 0; i < 4; i++)
1977                 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
1978         if (s->chroma_elim_threshold && !s->mb_intra)
1979             for (i = 4; i < mb_block_count; i++)
1980                 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
1981
1982         if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
1983             for (i = 0; i < mb_block_count; i++) {
1984                 if (s->block_last_index[i] == -1)
1985                     s->coded_score[i] = INT_MAX / 256;
1986             }
1987         }
1988     }
1989
1990     if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
1991         s->block_last_index[4] =
1992         s->block_last_index[5] = 0;
1993         s->block[4][0] =
1994         s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
1995     }
1996
1997     // non c quantize code returns incorrect block_last_index FIXME
1998     if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
1999         for (i = 0; i < mb_block_count; i++) {
2000             int j;
2001             if (s->block_last_index[i] > 0) {
2002                 for (j = 63; j > 0; j--) {
2003                     if (s->block[i][s->intra_scantable.permutated[j]])
2004                         break;
2005                 }
2006                 s->block_last_index[i] = j;
2007             }
2008         }
2009     }
2010
2011     /* huffman encode */
2012     switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2013     case AV_CODEC_ID_MPEG1VIDEO:
2014     case AV_CODEC_ID_MPEG2VIDEO:
2015         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2016             ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2017         break;
2018     case AV_CODEC_ID_MPEG4:
2019         if (CONFIG_MPEG4_ENCODER)
2020             ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2021         break;
2022     case AV_CODEC_ID_MSMPEG4V2:
2023     case AV_CODEC_ID_MSMPEG4V3:
2024     case AV_CODEC_ID_WMV1:
2025         if (CONFIG_MSMPEG4_ENCODER)
2026             ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2027         break;
2028     case AV_CODEC_ID_WMV2:
2029         if (CONFIG_WMV2_ENCODER)
2030             ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2031         break;
2032     case AV_CODEC_ID_H261:
2033         if (CONFIG_H261_ENCODER)
2034             ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2035         break;
2036     case AV_CODEC_ID_H263:
2037     case AV_CODEC_ID_H263P:
2038     case AV_CODEC_ID_FLV1:
2039     case AV_CODEC_ID_RV10:
2040     case AV_CODEC_ID_RV20:
2041         if (CONFIG_H263_ENCODER)
2042             ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2043         break;
2044     case AV_CODEC_ID_MJPEG:
2045         if (CONFIG_MJPEG_ENCODER)
2046             ff_mjpeg_encode_mb(s, s->block);
2047         break;
2048     default:
2049         assert(0);
2050     }
2051 }
2052
2053 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2054 {
2055     if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y,  8, 6);
2056     else                                encode_mb_internal(s, motion_x, motion_y, 16, 8);
2057 }
2058
2059 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2060     int i;
2061
2062     memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2063
2064     /* mpeg1 */
2065     d->mb_skip_run= s->mb_skip_run;
2066     for(i=0; i<3; i++)
2067         d->last_dc[i] = s->last_dc[i];
2068
2069     /* statistics */
2070     d->mv_bits= s->mv_bits;
2071     d->i_tex_bits= s->i_tex_bits;
2072     d->p_tex_bits= s->p_tex_bits;
2073     d->i_count= s->i_count;
2074     d->f_count= s->f_count;
2075     d->b_count= s->b_count;
2076     d->skip_count= s->skip_count;
2077     d->misc_bits= s->misc_bits;
2078     d->last_bits= 0;
2079
2080     d->mb_skipped= 0;
2081     d->qscale= s->qscale;
2082     d->dquant= s->dquant;
2083
2084     d->esc3_level_length= s->esc3_level_length;
2085 }
2086
2087 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2088     int i;
2089
2090     memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2091     memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2092
2093     /* mpeg1 */
2094     d->mb_skip_run= s->mb_skip_run;
2095     for(i=0; i<3; i++)
2096         d->last_dc[i] = s->last_dc[i];
2097
2098     /* statistics */
2099     d->mv_bits= s->mv_bits;
2100     d->i_tex_bits= s->i_tex_bits;
2101     d->p_tex_bits= s->p_tex_bits;
2102     d->i_count= s->i_count;
2103     d->f_count= s->f_count;
2104     d->b_count= s->b_count;
2105     d->skip_count= s->skip_count;
2106     d->misc_bits= s->misc_bits;
2107
2108     d->mb_intra= s->mb_intra;
2109     d->mb_skipped= s->mb_skipped;
2110     d->mv_type= s->mv_type;
2111     d->mv_dir= s->mv_dir;
2112     d->pb= s->pb;
2113     if(s->data_partitioning){
2114         d->pb2= s->pb2;
2115         d->tex_pb= s->tex_pb;
2116     }
2117     d->block= s->block;
2118     for(i=0; i<8; i++)
2119         d->block_last_index[i]= s->block_last_index[i];
2120     d->interlaced_dct= s->interlaced_dct;
2121     d->qscale= s->qscale;
2122
2123     d->esc3_level_length= s->esc3_level_length;
2124 }
2125
2126 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2127                            PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2128                            int *dmin, int *next_block, int motion_x, int motion_y)
2129 {
2130     int score;
2131     uint8_t *dest_backup[3];
2132
2133     copy_context_before_encode(s, backup, type);
2134
2135     s->block= s->blocks[*next_block];
2136     s->pb= pb[*next_block];
2137     if(s->data_partitioning){
2138         s->pb2   = pb2   [*next_block];
2139         s->tex_pb= tex_pb[*next_block];
2140     }
2141
2142     if(*next_block){
2143         memcpy(dest_backup, s->dest, sizeof(s->dest));
2144         s->dest[0] = s->rd_scratchpad;
2145         s->dest[1] = s->rd_scratchpad + 16*s->linesize;
2146         s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
2147         assert(s->linesize >= 32); //FIXME
2148     }
2149
2150     encode_mb(s, motion_x, motion_y);
2151
2152     score= put_bits_count(&s->pb);
2153     if(s->data_partitioning){
2154         score+= put_bits_count(&s->pb2);
2155         score+= put_bits_count(&s->tex_pb);
2156     }
2157
2158     if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2159         ff_MPV_decode_mb(s, s->block);
2160
2161         score *= s->lambda2;
2162         score += sse_mb(s) << FF_LAMBDA_SHIFT;
2163     }
2164
2165     if(*next_block){
2166         memcpy(s->dest, dest_backup, sizeof(s->dest));
2167     }
2168
2169     if(score<*dmin){
2170         *dmin= score;
2171         *next_block^=1;
2172
2173         copy_context_after_encode(best, s, type);
2174     }
2175 }
2176
2177 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2178     uint32_t *sq = ff_squareTbl + 256;
2179     int acc=0;
2180     int x,y;
2181
2182     if(w==16 && h==16)
2183         return s->dsp.sse[0](NULL, src1, src2, stride, 16);
2184     else if(w==8 && h==8)
2185         return s->dsp.sse[1](NULL, src1, src2, stride, 8);
2186
2187     for(y=0; y<h; y++){
2188         for(x=0; x<w; x++){
2189             acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2190         }
2191     }
2192
2193     assert(acc>=0);
2194
2195     return acc;
2196 }
2197
2198 static int sse_mb(MpegEncContext *s){
2199     int w= 16;
2200     int h= 16;
2201
2202     if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2203     if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2204
2205     if(w==16 && h==16)
2206       if(s->avctx->mb_cmp == FF_CMP_NSSE){
2207         return  s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2208                +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2209                +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2210       }else{
2211         return  s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2212                +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2213                +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2214       }
2215     else
2216         return  sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2217                +sse(s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2218                +sse(s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2219 }
2220
2221 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2222     MpegEncContext *s= *(void**)arg;
2223
2224
2225     s->me.pre_pass=1;
2226     s->me.dia_size= s->avctx->pre_dia_size;
2227     s->first_slice_line=1;
2228     for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2229         for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2230             ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2231         }
2232         s->first_slice_line=0;
2233     }
2234
2235     s->me.pre_pass=0;
2236
2237     return 0;
2238 }
2239
2240 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2241     MpegEncContext *s= *(void**)arg;
2242
2243     ff_check_alignment();
2244
2245     s->me.dia_size= s->avctx->dia_size;
2246     s->first_slice_line=1;
2247     for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2248         s->mb_x=0; //for block init below
2249         ff_init_block_index(s);
2250         for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2251             s->block_index[0]+=2;
2252             s->block_index[1]+=2;
2253             s->block_index[2]+=2;
2254             s->block_index[3]+=2;
2255
2256             /* compute motion vector & mb_type and store in context */
2257             if(s->pict_type==AV_PICTURE_TYPE_B)
2258                 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2259             else
2260                 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2261         }
2262         s->first_slice_line=0;
2263     }
2264     return 0;
2265 }
2266
2267 static int mb_var_thread(AVCodecContext *c, void *arg){
2268     MpegEncContext *s= *(void**)arg;
2269     int mb_x, mb_y;
2270
2271     ff_check_alignment();
2272
2273     for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2274         for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2275             int xx = mb_x * 16;
2276             int yy = mb_y * 16;
2277             uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
2278             int varc;
2279             int sum = s->dsp.pix_sum(pix, s->linesize);
2280
2281             varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
2282
2283             s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2284             s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2285             s->me.mb_var_sum_temp    += varc;
2286         }
2287     }
2288     return 0;
2289 }
2290
2291 static void write_slice_end(MpegEncContext *s){
2292     if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2293         if(s->partitioned_frame){
2294             ff_mpeg4_merge_partitions(s);
2295         }
2296
2297         ff_mpeg4_stuffing(&s->pb);
2298     }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2299         ff_mjpeg_encode_stuffing(&s->pb);
2300     }
2301
2302     avpriv_align_put_bits(&s->pb);
2303     flush_put_bits(&s->pb);
2304
2305     if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
2306         s->misc_bits+= get_bits_diff(s);
2307 }
2308
2309 static void write_mb_info(MpegEncContext *s)
2310 {
2311     uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2312     int offset = put_bits_count(&s->pb);
2313     int mba  = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2314     int gobn = s->mb_y / s->gob_index;
2315     int pred_x, pred_y;
2316     if (CONFIG_H263_ENCODER)
2317         ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2318     bytestream_put_le32(&ptr, offset);
2319     bytestream_put_byte(&ptr, s->qscale);
2320     bytestream_put_byte(&ptr, gobn);
2321     bytestream_put_le16(&ptr, mba);
2322     bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2323     bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2324     /* 4MV not implemented */
2325     bytestream_put_byte(&ptr, 0); /* hmv2 */
2326     bytestream_put_byte(&ptr, 0); /* vmv2 */
2327 }
2328
2329 static void update_mb_info(MpegEncContext *s, int startcode)
2330 {
2331     if (!s->mb_info)
2332         return;
2333     if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2334         s->mb_info_size += 12;
2335         s->prev_mb_info = s->last_mb_info;
2336     }
2337     if (startcode) {
2338         s->prev_mb_info = put_bits_count(&s->pb)/8;
2339         /* This might have incremented mb_info_size above, and we return without
2340          * actually writing any info into that slot yet. But in that case,
2341          * this will be called again at the start of the after writing the
2342          * start code, actually writing the mb info. */
2343         return;
2344     }
2345
2346     s->last_mb_info = put_bits_count(&s->pb)/8;
2347     if (!s->mb_info_size)
2348         s->mb_info_size += 12;
2349     write_mb_info(s);
2350 }
2351
2352 static int encode_thread(AVCodecContext *c, void *arg){
2353     MpegEncContext *s= *(void**)arg;
2354     int mb_x, mb_y, pdif = 0;
2355     int chr_h= 16>>s->chroma_y_shift;
2356     int i, j;
2357     MpegEncContext best_s, backup_s;
2358     uint8_t bit_buf[2][MAX_MB_BYTES];
2359     uint8_t bit_buf2[2][MAX_MB_BYTES];
2360     uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2361     PutBitContext pb[2], pb2[2], tex_pb[2];
2362
2363     ff_check_alignment();
2364
2365     for(i=0; i<2; i++){
2366         init_put_bits(&pb    [i], bit_buf    [i], MAX_MB_BYTES);
2367         init_put_bits(&pb2   [i], bit_buf2   [i], MAX_MB_BYTES);
2368         init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2369     }
2370
2371     s->last_bits= put_bits_count(&s->pb);
2372     s->mv_bits=0;
2373     s->misc_bits=0;
2374     s->i_tex_bits=0;
2375     s->p_tex_bits=0;
2376     s->i_count=0;
2377     s->f_count=0;
2378     s->b_count=0;
2379     s->skip_count=0;
2380
2381     for(i=0; i<3; i++){
2382         /* init last dc values */
2383         /* note: quant matrix value (8) is implied here */
2384         s->last_dc[i] = 128 << s->intra_dc_precision;
2385
2386         s->current_picture.f.error[i] = 0;
2387     }
2388     s->mb_skip_run = 0;
2389     memset(s->last_mv, 0, sizeof(s->last_mv));
2390
2391     s->last_mv_dir = 0;
2392
2393     switch(s->codec_id){
2394     case AV_CODEC_ID_H263:
2395     case AV_CODEC_ID_H263P:
2396     case AV_CODEC_ID_FLV1:
2397         if (CONFIG_H263_ENCODER)
2398             s->gob_index = ff_h263_get_gob_height(s);
2399         break;
2400     case AV_CODEC_ID_MPEG4:
2401         if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2402             ff_mpeg4_init_partitions(s);
2403         break;
2404     }
2405
2406     s->resync_mb_x=0;
2407     s->resync_mb_y=0;
2408     s->first_slice_line = 1;
2409     s->ptr_lastgob = s->pb.buf;
2410     for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2411         s->mb_x=0;
2412         s->mb_y= mb_y;
2413
2414         ff_set_qscale(s, s->qscale);
2415         ff_init_block_index(s);
2416
2417         for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2418             int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2419             int mb_type= s->mb_type[xy];
2420 //            int d;
2421             int dmin= INT_MAX;
2422             int dir;
2423
2424             if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2425                 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2426                 return -1;
2427             }
2428             if(s->data_partitioning){
2429                 if(   s->pb2   .buf_end - s->pb2   .buf - (put_bits_count(&s->    pb2)>>3) < MAX_MB_BYTES
2430                    || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2431                     av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2432                     return -1;
2433                 }
2434             }
2435
2436             s->mb_x = mb_x;
2437             s->mb_y = mb_y;  // moved into loop, can get changed by H.261
2438             ff_update_block_index(s);
2439
2440             if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2441                 ff_h261_reorder_mb_index(s);
2442                 xy= s->mb_y*s->mb_stride + s->mb_x;
2443                 mb_type= s->mb_type[xy];
2444             }
2445
2446             /* write gob / video packet header  */
2447             if(s->rtp_mode){
2448                 int current_packet_size, is_gob_start;
2449
2450                 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2451
2452                 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2453
2454                 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2455
2456                 switch(s->codec_id){
2457                 case AV_CODEC_ID_H263:
2458                 case AV_CODEC_ID_H263P:
2459                     if(!s->h263_slice_structured)
2460                         if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2461                     break;
2462                 case AV_CODEC_ID_MPEG2VIDEO:
2463                     if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2464                 case AV_CODEC_ID_MPEG1VIDEO:
2465                     if(s->mb_skip_run) is_gob_start=0;
2466                     break;
2467                 }
2468
2469                 if(is_gob_start){
2470                     if(s->start_mb_y != mb_y || mb_x!=0){
2471                         write_slice_end(s);
2472
2473                         if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2474                             ff_mpeg4_init_partitions(s);
2475                         }
2476                     }
2477
2478                     assert((put_bits_count(&s->pb)&7) == 0);
2479                     current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2480
2481                     if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){
2482                         int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2483                         int d= 100 / s->avctx->error_rate;
2484                         if(r % d == 0){
2485                             current_packet_size=0;
2486                             s->pb.buf_ptr= s->ptr_lastgob;
2487                             assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2488                         }
2489                     }
2490
2491                     if (s->avctx->rtp_callback){
2492                         int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2493                         s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2494                     }
2495                     update_mb_info(s, 1);
2496
2497                     switch(s->codec_id){
2498                     case AV_CODEC_ID_MPEG4:
2499                         if (CONFIG_MPEG4_ENCODER) {
2500                             ff_mpeg4_encode_video_packet_header(s);
2501                             ff_mpeg4_clean_buffers(s);
2502                         }
2503                     break;
2504                     case AV_CODEC_ID_MPEG1VIDEO:
2505                     case AV_CODEC_ID_MPEG2VIDEO:
2506                         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2507                             ff_mpeg1_encode_slice_header(s);
2508                             ff_mpeg1_clean_buffers(s);
2509                         }
2510                     break;
2511                     case AV_CODEC_ID_H263:
2512                     case AV_CODEC_ID_H263P:
2513                         if (CONFIG_H263_ENCODER)
2514                             ff_h263_encode_gob_header(s, mb_y);
2515                     break;
2516                     }
2517
2518                     if(s->flags&CODEC_FLAG_PASS1){
2519                         int bits= put_bits_count(&s->pb);
2520                         s->misc_bits+= bits - s->last_bits;
2521                         s->last_bits= bits;
2522                     }
2523
2524                     s->ptr_lastgob += current_packet_size;
2525                     s->first_slice_line=1;
2526                     s->resync_mb_x=mb_x;
2527                     s->resync_mb_y=mb_y;
2528                 }
2529             }
2530
2531             if(  (s->resync_mb_x   == s->mb_x)
2532                && s->resync_mb_y+1 == s->mb_y){
2533                 s->first_slice_line=0;
2534             }
2535
2536             s->mb_skipped=0;
2537             s->dquant=0; //only for QP_RD
2538
2539             update_mb_info(s, 0);
2540
2541             if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2542                 int next_block=0;
2543                 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2544
2545                 copy_context_before_encode(&backup_s, s, -1);
2546                 backup_s.pb= s->pb;
2547                 best_s.data_partitioning= s->data_partitioning;
2548                 best_s.partitioned_frame= s->partitioned_frame;
2549                 if(s->data_partitioning){
2550                     backup_s.pb2= s->pb2;
2551                     backup_s.tex_pb= s->tex_pb;
2552                 }
2553
2554                 if(mb_type&CANDIDATE_MB_TYPE_INTER){
2555                     s->mv_dir = MV_DIR_FORWARD;
2556                     s->mv_type = MV_TYPE_16X16;
2557                     s->mb_intra= 0;
2558                     s->mv[0][0][0] = s->p_mv_table[xy][0];
2559                     s->mv[0][0][1] = s->p_mv_table[xy][1];
2560                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2561                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2562                 }
2563                 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2564                     s->mv_dir = MV_DIR_FORWARD;
2565                     s->mv_type = MV_TYPE_FIELD;
2566                     s->mb_intra= 0;
2567                     for(i=0; i<2; i++){
2568                         j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2569                         s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2570                         s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2571                     }
2572                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2573                                  &dmin, &next_block, 0, 0);
2574                 }
2575                 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2576                     s->mv_dir = MV_DIR_FORWARD;
2577                     s->mv_type = MV_TYPE_16X16;
2578                     s->mb_intra= 0;
2579                     s->mv[0][0][0] = 0;
2580                     s->mv[0][0][1] = 0;
2581                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2582                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2583                 }
2584                 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
2585                     s->mv_dir = MV_DIR_FORWARD;
2586                     s->mv_type = MV_TYPE_8X8;
2587                     s->mb_intra= 0;
2588                     for(i=0; i<4; i++){
2589                         s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
2590                         s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
2591                     }
2592                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
2593                                  &dmin, &next_block, 0, 0);
2594                 }
2595                 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
2596                     s->mv_dir = MV_DIR_FORWARD;
2597                     s->mv_type = MV_TYPE_16X16;
2598                     s->mb_intra= 0;
2599                     s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2600                     s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2601                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
2602                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2603                 }
2604                 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
2605                     s->mv_dir = MV_DIR_BACKWARD;
2606                     s->mv_type = MV_TYPE_16X16;
2607                     s->mb_intra= 0;
2608                     s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2609                     s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2610                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
2611                                  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
2612                 }
2613                 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
2614                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2615                     s->mv_type = MV_TYPE_16X16;
2616                     s->mb_intra= 0;
2617                     s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2618                     s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2619                     s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2620                     s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2621                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
2622                                  &dmin, &next_block, 0, 0);
2623                 }
2624                 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
2625                     s->mv_dir = MV_DIR_FORWARD;
2626                     s->mv_type = MV_TYPE_FIELD;
2627                     s->mb_intra= 0;
2628                     for(i=0; i<2; i++){
2629                         j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2630                         s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2631                         s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2632                     }
2633                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
2634                                  &dmin, &next_block, 0, 0);
2635                 }
2636                 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
2637                     s->mv_dir = MV_DIR_BACKWARD;
2638                     s->mv_type = MV_TYPE_FIELD;
2639                     s->mb_intra= 0;
2640                     for(i=0; i<2; i++){
2641                         j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2642                         s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2643                         s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2644                     }
2645                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
2646                                  &dmin, &next_block, 0, 0);
2647                 }
2648                 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
2649                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2650                     s->mv_type = MV_TYPE_FIELD;
2651                     s->mb_intra= 0;
2652                     for(dir=0; dir<2; dir++){
2653                         for(i=0; i<2; i++){
2654                             j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2655                             s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2656                             s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2657                         }
2658                     }
2659                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
2660                                  &dmin, &next_block, 0, 0);
2661                 }
2662                 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
2663                     s->mv_dir = 0;
2664                     s->mv_type = MV_TYPE_16X16;
2665                     s->mb_intra= 1;
2666                     s->mv[0][0][0] = 0;
2667                     s->mv[0][0][1] = 0;
2668                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
2669                                  &dmin, &next_block, 0, 0);
2670                     if(s->h263_pred || s->h263_aic){
2671                         if(best_s.mb_intra)
2672                             s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
2673                         else
2674                             ff_clean_intra_table_entries(s); //old mode?
2675                     }
2676                 }
2677
2678                 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
2679                     if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
2680                         const int last_qp= backup_s.qscale;
2681                         int qpi, qp, dc[6];
2682                         int16_t ac[6][16];
2683                         const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
2684                         static const int dquant_tab[4]={-1,1,-2,2};
2685
2686                         assert(backup_s.dquant == 0);
2687
2688                         //FIXME intra
2689                         s->mv_dir= best_s.mv_dir;
2690                         s->mv_type = MV_TYPE_16X16;
2691                         s->mb_intra= best_s.mb_intra;
2692                         s->mv[0][0][0] = best_s.mv[0][0][0];
2693                         s->mv[0][0][1] = best_s.mv[0][0][1];
2694                         s->mv[1][0][0] = best_s.mv[1][0][0];
2695                         s->mv[1][0][1] = best_s.mv[1][0][1];
2696
2697                         qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
2698                         for(; qpi<4; qpi++){
2699                             int dquant= dquant_tab[qpi];
2700                             qp= last_qp + dquant;
2701                             if(qp < s->avctx->qmin || qp > s->avctx->qmax)
2702                                 continue;
2703                             backup_s.dquant= dquant;
2704                             if(s->mb_intra && s->dc_val[0]){
2705                                 for(i=0; i<6; i++){
2706                                     dc[i]= s->dc_val[0][ s->block_index[i] ];
2707                                     memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
2708                                 }
2709                             }
2710
2711                             encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2712                                          &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
2713                             if(best_s.qscale != qp){
2714                                 if(s->mb_intra && s->dc_val[0]){
2715                                     for(i=0; i<6; i++){
2716                                         s->dc_val[0][ s->block_index[i] ]= dc[i];
2717                                         memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
2718                                     }
2719                                 }
2720                             }
2721                         }
2722                     }
2723                 }
2724                 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
2725                     int mx= s->b_direct_mv_table[xy][0];
2726                     int my= s->b_direct_mv_table[xy][1];
2727
2728                     backup_s.dquant = 0;
2729                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2730                     s->mb_intra= 0;
2731                     ff_mpeg4_set_direct_mv(s, mx, my);
2732                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2733                                  &dmin, &next_block, mx, my);
2734                 }
2735                 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
2736                     backup_s.dquant = 0;
2737                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2738                     s->mb_intra= 0;
2739                     ff_mpeg4_set_direct_mv(s, 0, 0);
2740                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2741                                  &dmin, &next_block, 0, 0);
2742                 }
2743                 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
2744                     int coded=0;
2745                     for(i=0; i<6; i++)
2746                         coded |= s->block_last_index[i];
2747                     if(coded){
2748                         int mx,my;
2749                         memcpy(s->mv, best_s.mv, sizeof(s->mv));
2750                         if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
2751                             mx=my=0; //FIXME find the one we actually used
2752                             ff_mpeg4_set_direct_mv(s, mx, my);
2753                         }else if(best_s.mv_dir&MV_DIR_BACKWARD){
2754                             mx= s->mv[1][0][0];
2755                             my= s->mv[1][0][1];
2756                         }else{
2757                             mx= s->mv[0][0][0];
2758                             my= s->mv[0][0][1];
2759                         }
2760
2761                         s->mv_dir= best_s.mv_dir;
2762                         s->mv_type = best_s.mv_type;
2763                         s->mb_intra= 0;
2764 /*                        s->mv[0][0][0] = best_s.mv[0][0][0];
2765                         s->mv[0][0][1] = best_s.mv[0][0][1];
2766                         s->mv[1][0][0] = best_s.mv[1][0][0];
2767                         s->mv[1][0][1] = best_s.mv[1][0][1];*/
2768                         backup_s.dquant= 0;
2769                         s->skipdct=1;
2770                         encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2771                                         &dmin, &next_block, mx, my);
2772                         s->skipdct=0;
2773                     }
2774                 }
2775
2776                 s->current_picture.f.qscale_table[xy] = best_s.qscale;
2777
2778                 copy_context_after_encode(s, &best_s, -1);
2779
2780                 pb_bits_count= put_bits_count(&s->pb);
2781                 flush_put_bits(&s->pb);
2782                 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
2783                 s->pb= backup_s.pb;
2784
2785                 if(s->data_partitioning){
2786                     pb2_bits_count= put_bits_count(&s->pb2);
2787                     flush_put_bits(&s->pb2);
2788                     avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
2789                     s->pb2= backup_s.pb2;
2790
2791                     tex_pb_bits_count= put_bits_count(&s->tex_pb);
2792                     flush_put_bits(&s->tex_pb);
2793                     avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
2794                     s->tex_pb= backup_s.tex_pb;
2795                 }
2796                 s->last_bits= put_bits_count(&s->pb);
2797
2798                 if (CONFIG_H263_ENCODER &&
2799                     s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2800                     ff_h263_update_motion_val(s);
2801
2802                 if(next_block==0){ //FIXME 16 vs linesize16
2803                     s->dsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad                     , s->linesize  ,16);
2804                     s->dsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize    , s->uvlinesize, 8);
2805                     s->dsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
2806                 }
2807
2808                 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
2809                     ff_MPV_decode_mb(s, s->block);
2810             } else {
2811                 int motion_x = 0, motion_y = 0;
2812                 s->mv_type=MV_TYPE_16X16;
2813                 // only one MB-Type possible
2814
2815                 switch(mb_type){
2816                 case CANDIDATE_MB_TYPE_INTRA:
2817                     s->mv_dir = 0;
2818                     s->mb_intra= 1;
2819                     motion_x= s->mv[0][0][0] = 0;
2820                     motion_y= s->mv[0][0][1] = 0;
2821                     break;
2822                 case CANDIDATE_MB_TYPE_INTER:
2823                     s->mv_dir = MV_DIR_FORWARD;
2824                     s->mb_intra= 0;
2825                     motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
2826                     motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
2827                     break;
2828                 case CANDIDATE_MB_TYPE_INTER_I:
2829                     s->mv_dir = MV_DIR_FORWARD;
2830                     s->mv_type = MV_TYPE_FIELD;
2831                     s->mb_intra= 0;
2832                     for(i=0; i<2; i++){
2833                         j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2834                         s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2835                         s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2836                     }
2837                     break;
2838                 case CANDIDATE_MB_TYPE_INTER4V:
2839                     s->mv_dir = MV_DIR_FORWARD;
2840                     s->mv_type = MV_TYPE_8X8;
2841                     s->mb_intra= 0;
2842                     for(i=0; i<4; i++){
2843                         s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
2844                         s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
2845                     }
2846                     break;
2847                 case CANDIDATE_MB_TYPE_DIRECT:
2848                     if (CONFIG_MPEG4_ENCODER) {
2849                         s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2850                         s->mb_intra= 0;
2851                         motion_x=s->b_direct_mv_table[xy][0];
2852                         motion_y=s->b_direct_mv_table[xy][1];
2853                         ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
2854                     }
2855                     break;
2856                 case CANDIDATE_MB_TYPE_DIRECT0:
2857                     if (CONFIG_MPEG4_ENCODER) {
2858                         s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2859                         s->mb_intra= 0;
2860                         ff_mpeg4_set_direct_mv(s, 0, 0);
2861                     }
2862                     break;
2863                 case CANDIDATE_MB_TYPE_BIDIR:
2864                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2865                     s->mb_intra= 0;
2866                     s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2867                     s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2868                     s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2869                     s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2870                     break;
2871                 case CANDIDATE_MB_TYPE_BACKWARD:
2872                     s->mv_dir = MV_DIR_BACKWARD;
2873                     s->mb_intra= 0;
2874                     motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2875                     motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2876                     break;
2877                 case CANDIDATE_MB_TYPE_FORWARD:
2878                     s->mv_dir = MV_DIR_FORWARD;
2879                     s->mb_intra= 0;
2880                     motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2881                     motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2882                     break;
2883                 case CANDIDATE_MB_TYPE_FORWARD_I:
2884                     s->mv_dir = MV_DIR_FORWARD;
2885                     s->mv_type = MV_TYPE_FIELD;
2886                     s->mb_intra= 0;
2887                     for(i=0; i<2; i++){
2888                         j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2889                         s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2890                         s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2891                     }
2892                     break;
2893                 case CANDIDATE_MB_TYPE_BACKWARD_I:
2894                     s->mv_dir = MV_DIR_BACKWARD;
2895                     s->mv_type = MV_TYPE_FIELD;
2896                     s->mb_intra= 0;
2897                     for(i=0; i<2; i++){
2898                         j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2899                         s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2900                         s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2901                     }
2902                     break;
2903                 case CANDIDATE_MB_TYPE_BIDIR_I:
2904                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2905                     s->mv_type = MV_TYPE_FIELD;
2906                     s->mb_intra= 0;
2907                     for(dir=0; dir<2; dir++){
2908                         for(i=0; i<2; i++){
2909                             j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2910                             s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2911                             s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2912                         }
2913                     }
2914                     break;
2915                 default:
2916                     av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
2917                 }
2918
2919                 encode_mb(s, motion_x, motion_y);
2920
2921                 // RAL: Update last macroblock type
2922                 s->last_mv_dir = s->mv_dir;
2923
2924                 if (CONFIG_H263_ENCODER &&
2925                     s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2926                     ff_h263_update_motion_val(s);
2927
2928                 ff_MPV_decode_mb(s, s->block);
2929             }
2930
2931             /* clean the MV table in IPS frames for direct mode in B frames */
2932             if(s->mb_intra /* && I,P,S_TYPE */){
2933                 s->p_mv_table[xy][0]=0;
2934                 s->p_mv_table[xy][1]=0;
2935             }
2936
2937             if(s->flags&CODEC_FLAG_PSNR){
2938                 int w= 16;
2939                 int h= 16;
2940
2941                 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2942                 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2943
2944                 s->current_picture.f.error[0] += sse(
2945                     s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
2946                     s->dest[0], w, h, s->linesize);
2947                 s->current_picture.f.error[1] += sse(
2948                     s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*chr_h,
2949                     s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2950                 s->current_picture.f.error[2] += sse(
2951                     s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*chr_h,
2952                     s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2953             }
2954             if(s->loop_filter){
2955                 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
2956                     ff_h263_loop_filter(s);
2957             }
2958             av_dlog(s->avctx, "MB %d %d bits\n",
2959                     s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
2960         }
2961     }
2962
2963     //not beautiful here but we must write it before flushing so it has to be here
2964     if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
2965         ff_msmpeg4_encode_ext_header(s);
2966
2967     write_slice_end(s);
2968
2969     /* Send the last GOB if RTP */
2970     if (s->avctx->rtp_callback) {
2971         int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
2972         pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
2973         /* Call the RTP callback to send the last GOB */
2974         emms_c();
2975         s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
2976     }
2977
2978     return 0;
2979 }
2980
2981 #define MERGE(field) dst->field += src->field; src->field=0
2982 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
2983     MERGE(me.scene_change_score);
2984     MERGE(me.mc_mb_var_sum_temp);
2985     MERGE(me.mb_var_sum_temp);
2986 }
2987
2988 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
2989     int i;
2990
2991     MERGE(dct_count[0]); //note, the other dct vars are not part of the context
2992     MERGE(dct_count[1]);
2993     MERGE(mv_bits);
2994     MERGE(i_tex_bits);
2995     MERGE(p_tex_bits);
2996     MERGE(i_count);
2997     MERGE(f_count);
2998     MERGE(b_count);
2999     MERGE(skip_count);
3000     MERGE(misc_bits);
3001     MERGE(er.error_count);
3002     MERGE(padding_bug_score);
3003     MERGE(current_picture.f.error[0]);
3004     MERGE(current_picture.f.error[1]);
3005     MERGE(current_picture.f.error[2]);
3006
3007     if(dst->avctx->noise_reduction){
3008         for(i=0; i<64; i++){
3009             MERGE(dct_error_sum[0][i]);
3010             MERGE(dct_error_sum[1][i]);
3011         }
3012     }
3013
3014     assert(put_bits_count(&src->pb) % 8 ==0);
3015     assert(put_bits_count(&dst->pb) % 8 ==0);
3016     avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3017     flush_put_bits(&dst->pb);
3018 }
3019
3020 static int estimate_qp(MpegEncContext *s, int dry_run){
3021     if (s->next_lambda){
3022         s->current_picture_ptr->f.quality =
3023         s->current_picture.f.quality = s->next_lambda;
3024         if(!dry_run) s->next_lambda= 0;
3025     } else if (!s->fixed_qscale) {
3026         s->current_picture_ptr->f.quality =
3027         s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run);
3028         if (s->current_picture.f.quality < 0)
3029             return -1;
3030     }
3031
3032     if(s->adaptive_quant){
3033         switch(s->codec_id){
3034         case AV_CODEC_ID_MPEG4:
3035             if (CONFIG_MPEG4_ENCODER)
3036                 ff_clean_mpeg4_qscales(s);
3037             break;
3038         case AV_CODEC_ID_H263:
3039         case AV_CODEC_ID_H263P:
3040         case AV_CODEC_ID_FLV1:
3041             if (CONFIG_H263_ENCODER)
3042                 ff_clean_h263_qscales(s);
3043             break;
3044         default:
3045             ff_init_qscale_tab(s);
3046         }
3047
3048         s->lambda= s->lambda_table[0];
3049         //FIXME broken
3050     }else
3051         s->lambda = s->current_picture.f.quality;
3052     update_qscale(s);
3053     return 0;
3054 }
3055
3056 /* must be called before writing the header */
3057 static void set_frame_distances(MpegEncContext * s){
3058     assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
3059     s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
3060
3061     if(s->pict_type==AV_PICTURE_TYPE_B){
3062         s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3063         assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3064     }else{
3065         s->pp_time= s->time - s->last_non_b_time;
3066         s->last_non_b_time= s->time;
3067         assert(s->picture_number==0 || s->pp_time > 0);
3068     }
3069 }
3070
3071 static int encode_picture(MpegEncContext *s, int picture_number)
3072 {
3073     int i, ret;
3074     int bits;
3075     int context_count = s->slice_context_count;
3076
3077     s->picture_number = picture_number;
3078
3079     /* Reset the average MB variance */
3080     s->me.mb_var_sum_temp    =
3081     s->me.mc_mb_var_sum_temp = 0;
3082
3083     /* we need to initialize some time vars before we can encode b-frames */
3084     // RAL: Condition added for MPEG1VIDEO
3085     if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3086         set_frame_distances(s);
3087     if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3088         ff_set_mpeg4_time(s);
3089
3090     s->me.scene_change_score=0;
3091
3092 //    s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3093
3094     if(s->pict_type==AV_PICTURE_TYPE_I){
3095         if(s->msmpeg4_version >= 3) s->no_rounding=1;
3096         else                        s->no_rounding=0;
3097     }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3098         if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3099             s->no_rounding ^= 1;
3100     }
3101
3102     if(s->flags & CODEC_FLAG_PASS2){
3103         if (estimate_qp(s,1) < 0)
3104             return -1;
3105         ff_get_2pass_fcode(s);
3106     }else if(!(s->flags & CODEC_FLAG_QSCALE)){
3107         if(s->pict_type==AV_PICTURE_TYPE_B)
3108             s->lambda= s->last_lambda_for[s->pict_type];
3109         else
3110             s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3111         update_qscale(s);
3112     }
3113
3114     s->mb_intra=0; //for the rate distortion & bit compare functions
3115     for(i=1; i<context_count; i++){
3116         ret = ff_update_duplicate_context(s->thread_context[i], s);
3117         if (ret < 0)
3118             return ret;
3119     }
3120
3121     if(ff_init_me(s)<0)
3122         return -1;
3123
3124     /* Estimate motion for every MB */
3125     if(s->pict_type != AV_PICTURE_TYPE_I){
3126         s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3127         s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3128         if (s->pict_type != AV_PICTURE_TYPE_B) {
3129             if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3130                 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3131             }
3132         }
3133
3134         s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3135     }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3136         /* I-Frame */
3137         for(i=0; i<s->mb_stride*s->mb_height; i++)
3138             s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3139
3140         if(!s->fixed_qscale){
3141             /* finding spatial complexity for I-frame rate control */
3142             s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3143         }
3144     }
3145     for(i=1; i<context_count; i++){
3146         merge_context_after_me(s, s->thread_context[i]);
3147     }
3148     s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3149     s->current_picture.   mb_var_sum= s->current_picture_ptr->   mb_var_sum= s->me.   mb_var_sum_temp;
3150     emms_c();
3151
3152     if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
3153         s->pict_type= AV_PICTURE_TYPE_I;
3154         for(i=0; i<s->mb_stride*s->mb_height; i++)
3155             s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3156         av_dlog(s, "Scene change detected, encoding as I Frame %d %d\n",
3157                 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3158     }
3159
3160     if(!s->umvplus){
3161         if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3162             s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3163
3164             if(s->flags & CODEC_FLAG_INTERLACED_ME){
3165                 int a,b;
3166                 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3167                 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3168                 s->f_code= FFMAX3(s->f_code, a, b);
3169             }
3170
3171             ff_fix_long_p_mvs(s);
3172             ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3173             if(s->flags & CODEC_FLAG_INTERLACED_ME){
3174                 int j;
3175                 for(i=0; i<2; i++){
3176                     for(j=0; j<2; j++)
3177                         ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3178                                         s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3179                 }
3180             }
3181         }
3182
3183         if(s->pict_type==AV_PICTURE_TYPE_B){
3184             int a, b;
3185
3186             a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3187             b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3188             s->f_code = FFMAX(a, b);
3189
3190             a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3191             b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3192             s->b_code = FFMAX(a, b);
3193
3194             ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3195             ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3196             ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3197             ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3198             if(s->flags & CODEC_FLAG_INTERLACED_ME){
3199                 int dir, j;
3200                 for(dir=0; dir<2; dir++){
3201                     for(i=0; i<2; i++){
3202                         for(j=0; j<2; j++){
3203                             int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3204                                           : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3205                             ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3206                                             s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3207                         }
3208                     }
3209                 }
3210             }
3211         }
3212     }
3213
3214     if (estimate_qp(s, 0) < 0)
3215         return -1;
3216
3217     if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
3218         s->qscale= 3; //reduce clipping problems
3219
3220     if (s->out_format == FMT_MJPEG) {
3221         /* for mjpeg, we do include qscale in the matrix */
3222         for(i=1;i<64;i++){
3223             int j= s->dsp.idct_permutation[i];
3224
3225             s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3226         }
3227         s->y_dc_scale_table=
3228         s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3229         s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3230         ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
3231                        s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3232         s->qscale= 8;
3233     }
3234
3235     //FIXME var duplication
3236     s->current_picture_ptr->f.key_frame =
3237     s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3238     s->current_picture_ptr->f.pict_type =
3239     s->current_picture.f.pict_type = s->pict_type;
3240
3241     if (s->current_picture.f.key_frame)
3242         s->picture_in_gop_number=0;
3243
3244     s->last_bits= put_bits_count(&s->pb);
3245     switch(s->out_format) {
3246     case FMT_MJPEG:
3247         if (CONFIG_MJPEG_ENCODER)
3248             ff_mjpeg_encode_picture_header(s);
3249         break;
3250     case FMT_H261:
3251         if (CONFIG_H261_ENCODER)
3252             ff_h261_encode_picture_header(s, picture_number);
3253         break;
3254     case FMT_H263:
3255         if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3256             ff_wmv2_encode_picture_header(s, picture_number);
3257         else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3258             ff_msmpeg4_encode_picture_header(s, picture_number);
3259         else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
3260             ff_mpeg4_encode_picture_header(s, picture_number);
3261         else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10)
3262             ff_rv10_encode_picture_header(s, picture_number);
3263         else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3264             ff_rv20_encode_picture_header(s, picture_number);
3265         else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3266             ff_flv_encode_picture_header(s, picture_number);
3267         else if (CONFIG_H263_ENCODER)
3268             ff_h263_encode_picture_header(s, picture_number);
3269         break;
3270     case FMT_MPEG1:
3271         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3272             ff_mpeg1_encode_picture_header(s, picture_number);
3273         break;
3274     case FMT_H264:
3275         break;
3276     default:
3277         assert(0);
3278     }
3279     bits= put_bits_count(&s->pb);
3280     s->header_bits= bits - s->last_bits;
3281
3282     for(i=1; i<context_count; i++){
3283         update_duplicate_context_after_me(s->thread_context[i], s);
3284     }
3285     s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3286     for(i=1; i<context_count; i++){
3287         merge_context_after_encode(s, s->thread_context[i]);
3288     }
3289     emms_c();
3290     return 0;
3291 }
3292
3293 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3294     const int intra= s->mb_intra;
3295     int i;
3296
3297     s->dct_count[intra]++;
3298
3299     for(i=0; i<64; i++){
3300         int level= block[i];
3301
3302         if(level){