C99 initializers and kill warnings patch by (mru at users dot sourceforge dot net...
[ffmpeg.git] / libavcodec / mpegvideo.c
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard.
4  *
5  * This library is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU Lesser General Public
7  * License as published by the Free Software Foundation; either
8  * version 2 of the License, or (at your option) any later version.
9  *
10  * This library is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * Lesser General Public License for more details.
14  *
15  * You should have received a copy of the GNU Lesser General Public
16  * License along with this library; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  *
19  * 4MV & hq & b-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
20  */
21  
22 #include <ctype.h>
23 #include "avcodec.h"
24 #include "dsputil.h"
25 #include "mpegvideo.h"
26 #include "simple_idct.h"
27
28 #ifdef USE_FASTMEMCPY
29 #include "fastmemcpy.h"
30 #endif
31
32 //#undef NDEBUG
33 //#include <assert.h>
34
35 static void encode_picture(MpegEncContext *s, int picture_number);
36 static void dct_unquantize_mpeg1_c(MpegEncContext *s, 
37                                    DCTELEM *block, int n, int qscale);
38 static void dct_unquantize_mpeg2_c(MpegEncContext *s,
39                                    DCTELEM *block, int n, int qscale);
40 static void dct_unquantize_h263_c(MpegEncContext *s, 
41                                   DCTELEM *block, int n, int qscale);
42 static void draw_edges_c(UINT8 *buf, int wrap, int width, int height, int w);
43 static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
44 static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
45
46 void (*draw_edges)(UINT8 *buf, int wrap, int width, int height, int w)= draw_edges_c;
47
48
49 /* enable all paranoid tests for rounding, overflows, etc... */
50 //#define PARANOID
51
52 //#define DEBUG
53
54
55 /* for jpeg fast DCT */
56 #define CONST_BITS 14
57
58 static const uint16_t aanscales[64] = {
59     /* precomputed values scaled up by 14 bits */
60     16384, 22725, 21407, 19266, 16384, 12873,  8867,  4520,
61     22725, 31521, 29692, 26722, 22725, 17855, 12299,  6270,
62     21407, 29692, 27969, 25172, 21407, 16819, 11585,  5906,
63     19266, 26722, 25172, 22654, 19266, 15137, 10426,  5315,
64     16384, 22725, 21407, 19266, 16384, 12873,  8867,  4520,
65     12873, 17855, 16819, 15137, 12873, 10114,  6967,  3552,
66     8867 , 12299, 11585, 10426,  8867,  6967,  4799,  2446,
67     4520 ,  6270,  5906,  5315,  4520,  3552,  2446,  1247
68 };
69
70 /* Input permutation for the simple_idct_mmx */
71 static const uint8_t simple_mmx_permutation[64]={
72         0x00, 0x08, 0x04, 0x09, 0x01, 0x0C, 0x05, 0x0D, 
73         0x10, 0x18, 0x14, 0x19, 0x11, 0x1C, 0x15, 0x1D, 
74         0x20, 0x28, 0x24, 0x29, 0x21, 0x2C, 0x25, 0x2D, 
75         0x12, 0x1A, 0x16, 0x1B, 0x13, 0x1E, 0x17, 0x1F, 
76         0x02, 0x0A, 0x06, 0x0B, 0x03, 0x0E, 0x07, 0x0F, 
77         0x30, 0x38, 0x34, 0x39, 0x31, 0x3C, 0x35, 0x3D, 
78         0x22, 0x2A, 0x26, 0x2B, 0x23, 0x2E, 0x27, 0x2F, 
79         0x32, 0x3A, 0x36, 0x3B, 0x33, 0x3E, 0x37, 0x3F,
80 };
81
82 static const uint8_t h263_chroma_roundtab[16] = {
83 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
84     0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
85 };
86
87 static UINT16 (*default_mv_penalty)[MAX_MV*2+1]=NULL;
88 static UINT8 default_fcode_tab[MAX_MV*2+1];
89
90 enum PixelFormat ff_yuv420p_list[2]= {PIX_FMT_YUV420P, -1};
91
92 static void convert_matrix(MpegEncContext *s, int (*qmat)[64], uint16_t (*qmat16)[64], uint16_t (*qmat16_bias)[64],
93                            const UINT16 *quant_matrix, int bias, int qmin, int qmax)
94 {
95     int qscale;
96
97     for(qscale=qmin; qscale<=qmax; qscale++){
98         int i;
99         if (s->fdct == ff_jpeg_fdct_islow) {
100             for(i=0;i<64;i++) {
101                 const int j= s->idct_permutation[i];
102                 /* 16 <= qscale * quant_matrix[i] <= 7905 */
103                 /* 19952         <= aanscales[i] * qscale * quant_matrix[i]           <= 249205026 */
104                 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
105                 /* 3444240       >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
106                 
107                 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / 
108                                 (qscale * quant_matrix[j]));
109             }
110         } else if (s->fdct == fdct_ifast) {
111             for(i=0;i<64;i++) {
112                 const int j= s->idct_permutation[i];
113                 /* 16 <= qscale * quant_matrix[i] <= 7905 */
114                 /* 19952         <= aanscales[i] * qscale * quant_matrix[i]           <= 249205026 */
115                 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
116                 /* 3444240       >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
117                 
118                 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) / 
119                                 (aanscales[i] * qscale * quant_matrix[j]));
120             }
121         } else {
122             for(i=0;i<64;i++) {
123                 const int j= s->idct_permutation[i];
124                 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
125                    So 16           <= qscale * quant_matrix[i]             <= 7905
126                    so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905
127                    so 32768        >= (1<<19) / (qscale * quant_matrix[i]) >= 67
128                 */
129                 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / (qscale * quant_matrix[j]));
130 //                qmat  [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]);
131                 qmat16[qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[j]);
132
133                 if(qmat16[qscale][i]==0 || qmat16[qscale][i]==128*256) qmat16[qscale][i]=128*256-1;
134                 qmat16_bias[qscale][i]= ROUNDED_DIV(bias<<(16-QUANT_BIAS_SHIFT), qmat16[qscale][i]);
135             }
136         }
137     }
138 }
139 // move into common.c perhaps 
140 #define CHECKED_ALLOCZ(p, size)\
141 {\
142     p= av_mallocz(size);\
143     if(p==NULL){\
144         perror("malloc");\
145         goto fail;\
146     }\
147 }
148
149 void ff_init_scantable(MpegEncContext *s, ScanTable *st, const UINT8 *src_scantable){
150     int i;
151     int end;
152     
153     st->scantable= src_scantable;
154
155     for(i=0; i<64; i++){
156         int j;
157         j = src_scantable[i];
158         st->permutated[i] = s->idct_permutation[j];
159 #ifdef ARCH_POWERPC
160         st->inverse[j] = i;
161 #endif
162     }
163     
164     end=-1;
165     for(i=0; i<64; i++){
166         int j;
167         j = st->permutated[i];
168         if(j>end) end=j;
169         st->raster_end[i]= end;
170     }
171 }
172
173 /* XXX: those functions should be suppressed ASAP when all IDCTs are
174  converted */
175 // *FIXME* this is ugly hack using local static
176 static void (*ff_put_pixels_clamped)(const DCTELEM *block, UINT8 *pixels, int line_size);
177 static void (*ff_add_pixels_clamped)(const DCTELEM *block, UINT8 *pixels, int line_size);
178 static void ff_jref_idct_put(UINT8 *dest, int line_size, DCTELEM *block)
179 {
180     j_rev_dct (block);
181     ff_put_pixels_clamped(block, dest, line_size);
182 }
183 static void ff_jref_idct_add(UINT8 *dest, int line_size, DCTELEM *block)
184 {
185     j_rev_dct (block);
186     ff_add_pixels_clamped(block, dest, line_size);
187 }
188
189 /* init common dct for both encoder and decoder */
190 int DCT_common_init(MpegEncContext *s)
191 {
192     int i;
193
194     ff_put_pixels_clamped = s->dsp.put_pixels_clamped;
195     ff_add_pixels_clamped = s->dsp.add_pixels_clamped;
196
197     s->dct_unquantize_h263 = dct_unquantize_h263_c;
198     s->dct_unquantize_mpeg1 = dct_unquantize_mpeg1_c;
199     s->dct_unquantize_mpeg2 = dct_unquantize_mpeg2_c;
200     s->dct_quantize= dct_quantize_c;
201
202     if(s->avctx->dct_algo==FF_DCT_FASTINT)
203         s->fdct = fdct_ifast;
204     else
205         s->fdct = ff_jpeg_fdct_islow; //slow/accurate/default
206
207     if(s->avctx->idct_algo==FF_IDCT_INT){
208         s->idct_put= ff_jref_idct_put;
209         s->idct_add= ff_jref_idct_add;
210         s->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
211     }else{ //accurate/default
212         s->idct_put= simple_idct_put;
213         s->idct_add= simple_idct_add;
214         s->idct_permutation_type= FF_NO_IDCT_PERM;
215     }
216         
217 #ifdef HAVE_MMX
218     MPV_common_init_mmx(s);
219 #endif
220 #ifdef ARCH_ALPHA
221     MPV_common_init_axp(s);
222 #endif
223 #ifdef HAVE_MLIB
224     MPV_common_init_mlib(s);
225 #endif
226 #ifdef HAVE_MMI
227     MPV_common_init_mmi(s);
228 #endif
229 #ifdef ARCH_ARMV4L
230     MPV_common_init_armv4l(s);
231 #endif
232 #ifdef ARCH_POWERPC
233     MPV_common_init_ppc(s);
234 #endif
235
236     s->fast_dct_quantize= s->dct_quantize;
237
238     if(s->flags&CODEC_FLAG_TRELLIS_QUANT){
239         s->dct_quantize= dct_quantize_trellis_c; //move before MPV_common_init_*
240     }
241
242     switch(s->idct_permutation_type){
243     case FF_NO_IDCT_PERM:
244         for(i=0; i<64; i++)
245             s->idct_permutation[i]= i;
246         break;
247     case FF_LIBMPEG2_IDCT_PERM:
248         for(i=0; i<64; i++)
249             s->idct_permutation[i]= (i & 0x38) | ((i & 6) >> 1) | ((i & 1) << 2);
250         break;
251     case FF_SIMPLE_IDCT_PERM:
252         for(i=0; i<64; i++)
253             s->idct_permutation[i]= simple_mmx_permutation[i];
254         break;
255     case FF_TRANSPOSE_IDCT_PERM:
256         for(i=0; i<64; i++)
257             s->idct_permutation[i]= ((i&7)<<3) | (i>>3);
258         break;
259     default:
260         fprintf(stderr, "Internal error, IDCT permutation not set\n");
261         return -1;
262     }
263
264
265     /* load & permutate scantables
266        note: only wmv uses differnt ones 
267     */
268     ff_init_scantable(s, &s->inter_scantable  , ff_zigzag_direct);
269     ff_init_scantable(s, &s->intra_scantable  , ff_zigzag_direct);
270     ff_init_scantable(s, &s->intra_h_scantable, ff_alternate_horizontal_scan);
271     ff_init_scantable(s, &s->intra_v_scantable, ff_alternate_vertical_scan);
272
273     return 0;
274 }
275
276 /**
277  * allocates a Picture
278  * The pixels are allocated/set by calling get_buffer() if shared=0
279  */
280 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
281     
282     if(shared){
283         assert(pic->data[0]);
284         assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
285         pic->type= FF_BUFFER_TYPE_SHARED;
286     }else{
287         int r;
288         
289         assert(!pic->data[0]);
290         
291         r= s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
292         
293         if(r<0 || !pic->age || !pic->type || !pic->data[0]){
294             fprintf(stderr, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
295             return -1;
296         }
297
298         if(s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])){
299             fprintf(stderr, "get_buffer() failed (stride changed)\n");
300             return -1;
301         }
302
303         if(pic->linesize[1] != pic->linesize[2]){
304             fprintf(stderr, "get_buffer() failed (uv stride missmatch)\n");
305             return -1;
306         }
307
308         s->linesize  = pic->linesize[0];
309         s->uvlinesize= pic->linesize[1];
310     }
311     
312     if(pic->qscale_table==NULL){
313         if (s->encoding) {        
314             CHECKED_ALLOCZ(pic->mb_var   , s->mb_num * sizeof(INT16))
315             CHECKED_ALLOCZ(pic->mc_mb_var, s->mb_num * sizeof(INT16))
316             CHECKED_ALLOCZ(pic->mb_mean  , s->mb_num * sizeof(INT8))
317             CHECKED_ALLOCZ(pic->mb_cmp_score, s->mb_num * sizeof(int32_t))
318         }
319
320         CHECKED_ALLOCZ(pic->mbskip_table , s->mb_num * sizeof(UINT8)+1) //the +1 is for the slice end check
321         CHECKED_ALLOCZ(pic->qscale_table , s->mb_num * sizeof(UINT8))
322         pic->qstride= s->mb_width;
323     }
324     
325     return 0;
326 fail: //for the CHECKED_ALLOCZ macro
327     return -1;
328 }
329
330 /**
331  * deallocates a picture
332  */
333 static void free_picture(MpegEncContext *s, Picture *pic){
334     int i;
335
336     if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
337         s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
338     }
339
340     av_freep(&pic->mb_var);
341     av_freep(&pic->mc_mb_var);
342     av_freep(&pic->mb_mean);
343     av_freep(&pic->mb_cmp_score);
344     av_freep(&pic->mbskip_table);
345     av_freep(&pic->qscale_table);
346     
347     if(pic->type == FF_BUFFER_TYPE_INTERNAL){
348         for(i=0; i<4; i++){
349             av_freep(&pic->base[i]);
350             pic->data[i]= NULL;
351         }
352         av_freep(&pic->opaque);
353         pic->type= 0;
354     }else if(pic->type == FF_BUFFER_TYPE_SHARED){
355         for(i=0; i<4; i++){
356             pic->base[i]=
357             pic->data[i]= NULL;
358         }
359         pic->type= 0;        
360     }
361 }
362
363 /* init common structure for both encoder and decoder */
364 int MPV_common_init(MpegEncContext *s)
365 {
366     int y_size, c_size, yc_size, i;
367
368     dsputil_init(&s->dsp, s->avctx->dsp_mask);
369     DCT_common_init(s);
370
371     s->flags= s->avctx->flags;
372
373     s->mb_width  = (s->width  + 15) / 16;
374     s->mb_height = (s->height + 15) / 16;
375
376     /* set default edge pos, will be overriden in decode_header if needed */
377     s->h_edge_pos= s->mb_width*16;
378     s->v_edge_pos= s->mb_height*16;
379
380     s->mb_num = s->mb_width * s->mb_height;
381
382     y_size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
383     c_size = (s->mb_width + 2) * (s->mb_height + 2);
384     yc_size = y_size + 2 * c_size;
385
386     /* convert fourcc to upper case */
387     s->avctx->fourcc=   toupper( s->avctx->fourcc     &0xFF)          
388                      + (toupper((s->avctx->fourcc>>8 )&0xFF)<<8 )
389                      + (toupper((s->avctx->fourcc>>16)&0xFF)<<16) 
390                      + (toupper((s->avctx->fourcc>>24)&0xFF)<<24);
391
392     CHECKED_ALLOCZ(s->allocated_edge_emu_buffer, (s->width+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance
393     s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*17;
394
395     s->avctx->coded_frame= (AVFrame*)&s->current_picture;
396
397     if (s->encoding) {
398         int mv_table_size= (s->mb_width+2)*(s->mb_height+2);
399
400         /* Allocate MV tables */
401         CHECKED_ALLOCZ(s->p_mv_table            , mv_table_size * 2 * sizeof(INT16))
402         CHECKED_ALLOCZ(s->b_forw_mv_table       , mv_table_size * 2 * sizeof(INT16))
403         CHECKED_ALLOCZ(s->b_back_mv_table       , mv_table_size * 2 * sizeof(INT16))
404         CHECKED_ALLOCZ(s->b_bidir_forw_mv_table , mv_table_size * 2 * sizeof(INT16))
405         CHECKED_ALLOCZ(s->b_bidir_back_mv_table , mv_table_size * 2 * sizeof(INT16))
406         CHECKED_ALLOCZ(s->b_direct_mv_table     , mv_table_size * 2 * sizeof(INT16))
407
408         //FIXME should be linesize instead of s->width*2 but that isnt known before get_buffer()
409         CHECKED_ALLOCZ(s->me.scratchpad,  s->width*2*16*3*sizeof(uint8_t)) 
410         
411         CHECKED_ALLOCZ(s->me.map      , ME_MAP_SIZE*sizeof(uint32_t))
412         CHECKED_ALLOCZ(s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t))
413
414         if(s->codec_id==CODEC_ID_MPEG4){
415             CHECKED_ALLOCZ(s->tex_pb_buffer, PB_BUFFER_SIZE);
416             CHECKED_ALLOCZ(   s->pb2_buffer, PB_BUFFER_SIZE);
417         }
418         
419         if(s->msmpeg4_version){
420             CHECKED_ALLOCZ(s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int));
421         }
422         CHECKED_ALLOCZ(s->avctx->stats_out, 256);
423     }
424         
425     CHECKED_ALLOCZ(s->error_status_table, s->mb_num*sizeof(UINT8))
426     
427     if (s->out_format == FMT_H263 || s->encoding) {
428         int size;
429         /* Allocate MB type table */
430         CHECKED_ALLOCZ(s->mb_type  , s->mb_num * sizeof(UINT8))
431
432         /* MV prediction */
433         size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
434         CHECKED_ALLOCZ(s->motion_val, size * 2 * sizeof(INT16));
435     }
436
437     if(s->codec_id==CODEC_ID_MPEG4){
438         /* interlaced direct mode decoding tables */
439         CHECKED_ALLOCZ(s->field_mv_table, s->mb_num*2*2 * sizeof(INT16))
440         CHECKED_ALLOCZ(s->field_select_table, s->mb_num*2* sizeof(INT8))
441     }
442     /* 4mv b frame decoding table */
443     //note this is needed for h263 without b frames too (segfault on damaged streams otherwise)
444     CHECKED_ALLOCZ(s->co_located_type_table, s->mb_num * sizeof(UINT8))
445     if (s->out_format == FMT_H263) {
446         /* ac values */
447         CHECKED_ALLOCZ(s->ac_val[0], yc_size * sizeof(INT16) * 16);
448         s->ac_val[1] = s->ac_val[0] + y_size;
449         s->ac_val[2] = s->ac_val[1] + c_size;
450         
451         /* cbp values */
452         CHECKED_ALLOCZ(s->coded_block, y_size);
453         
454         /* divx501 bitstream reorder buffer */
455         CHECKED_ALLOCZ(s->bitstream_buffer, BITSTREAM_BUFFER_SIZE);
456
457         /* cbp, ac_pred, pred_dir */
458         CHECKED_ALLOCZ(s->cbp_table  , s->mb_num * sizeof(UINT8))
459         CHECKED_ALLOCZ(s->pred_dir_table, s->mb_num * sizeof(UINT8))
460     }
461     
462     if (s->h263_pred || s->h263_plus || !s->encoding) {
463         /* dc values */
464         //MN: we need these for error resilience of intra-frames
465         CHECKED_ALLOCZ(s->dc_val[0], yc_size * sizeof(INT16));
466         s->dc_val[1] = s->dc_val[0] + y_size;
467         s->dc_val[2] = s->dc_val[1] + c_size;
468         for(i=0;i<yc_size;i++)
469             s->dc_val[0][i] = 1024;
470     }
471
472     /* which mb is a intra block */
473     CHECKED_ALLOCZ(s->mbintra_table, s->mb_num);
474     memset(s->mbintra_table, 1, s->mb_num);
475     
476     /* default structure is frame */
477     s->picture_structure = PICT_FRAME;
478     
479     /* init macroblock skip table */
480     CHECKED_ALLOCZ(s->mbskip_table, s->mb_num+1);
481     //Note the +1 is for a quicker mpeg4 slice_end detection
482     
483     s->block= s->blocks[0];
484
485     s->parse_context.state= -1;
486
487     s->context_initialized = 1;
488     return 0;
489  fail:
490     MPV_common_end(s);
491     return -1;
492 }
493
494
495 //extern int sads;
496
497 /* init common structure for both encoder and decoder */
498 void MPV_common_end(MpegEncContext *s)
499 {
500     int i;
501
502     av_freep(&s->mb_type);
503     av_freep(&s->p_mv_table);
504     av_freep(&s->b_forw_mv_table);
505     av_freep(&s->b_back_mv_table);
506     av_freep(&s->b_bidir_forw_mv_table);
507     av_freep(&s->b_bidir_back_mv_table);
508     av_freep(&s->b_direct_mv_table);
509     av_freep(&s->motion_val);
510     av_freep(&s->dc_val[0]);
511     av_freep(&s->ac_val[0]);
512     av_freep(&s->coded_block);
513     av_freep(&s->mbintra_table);
514     av_freep(&s->cbp_table);
515     av_freep(&s->pred_dir_table);
516     av_freep(&s->me.scratchpad);
517     av_freep(&s->me.map);
518     av_freep(&s->me.score_map);
519     
520     av_freep(&s->mbskip_table);
521     av_freep(&s->bitstream_buffer);
522     av_freep(&s->tex_pb_buffer);
523     av_freep(&s->pb2_buffer);
524     av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
525     av_freep(&s->co_located_type_table);
526     av_freep(&s->field_mv_table);
527     av_freep(&s->field_select_table);
528     av_freep(&s->avctx->stats_out);
529     av_freep(&s->ac_stats);
530     av_freep(&s->error_status_table);
531
532     for(i=0; i<MAX_PICTURE_COUNT; i++){
533         free_picture(s, &s->picture[i]);
534     }
535     s->context_initialized = 0;
536 }
537
538 /* init video encoder */
539 int MPV_encode_init(AVCodecContext *avctx)
540 {
541     MpegEncContext *s = avctx->priv_data;
542     int i;
543
544     avctx->pix_fmt = PIX_FMT_YUV420P;
545
546     s->bit_rate = avctx->bit_rate;
547     s->bit_rate_tolerance = avctx->bit_rate_tolerance;
548     s->frame_rate = avctx->frame_rate;
549     s->width = avctx->width;
550     s->height = avctx->height;
551     if(avctx->gop_size > 600){
552         fprintf(stderr, "Warning keyframe interval too large! reducing it ...\n");
553         avctx->gop_size=600;
554     }
555     s->gop_size = avctx->gop_size;
556     s->rtp_mode = avctx->rtp_mode;
557     s->rtp_payload_size = avctx->rtp_payload_size;
558     if (avctx->rtp_callback)
559         s->rtp_callback = avctx->rtp_callback;
560     s->qmin= avctx->qmin;
561     s->qmax= avctx->qmax;
562     s->max_qdiff= avctx->max_qdiff;
563     s->qcompress= avctx->qcompress;
564     s->qblur= avctx->qblur;
565     s->avctx = avctx;
566     s->flags= avctx->flags;
567     s->max_b_frames= avctx->max_b_frames;
568     s->b_frame_strategy= avctx->b_frame_strategy;
569     s->codec_id= avctx->codec->id;
570     s->luma_elim_threshold  = avctx->luma_elim_threshold;
571     s->chroma_elim_threshold= avctx->chroma_elim_threshold;
572     s->strict_std_compliance= avctx->strict_std_compliance;
573     s->data_partitioning= avctx->flags & CODEC_FLAG_PART;
574     s->quarter_sample= (avctx->flags & CODEC_FLAG_QPEL)!=0;
575     s->mpeg_quant= avctx->mpeg_quant;
576
577     if (s->gop_size <= 1) {
578         s->intra_only = 1;
579         s->gop_size = 12;
580     } else {
581         s->intra_only = 0;
582     }
583
584     s->me_method = avctx->me_method;
585
586     /* Fixed QSCALE */
587     s->fixed_qscale = (avctx->flags & CODEC_FLAG_QSCALE);
588     
589     s->adaptive_quant= (   s->avctx->lumi_masking
590                         || s->avctx->dark_masking
591                         || s->avctx->temporal_cplx_masking 
592                         || s->avctx->spatial_cplx_masking
593                         || s->avctx->p_masking)
594                        && !s->fixed_qscale;
595     
596     s->progressive_sequence= !(avctx->flags & CODEC_FLAG_INTERLACED_DCT);
597
598     switch(avctx->codec->id) {
599     case CODEC_ID_MPEG1VIDEO:
600         s->out_format = FMT_MPEG1;
601         avctx->delay=0; //FIXME not sure, should check the spec
602         break;
603     case CODEC_ID_MJPEG:
604         s->out_format = FMT_MJPEG;
605         s->intra_only = 1; /* force intra only for jpeg */
606         s->mjpeg_write_tables = 1; /* write all tables */
607         s->mjpeg_data_only_frames = 0; /* write all the needed headers */
608         s->mjpeg_vsample[0] = 2; /* set up default sampling factors */
609         s->mjpeg_vsample[1] = 1; /* the only currently supported values */
610         s->mjpeg_vsample[2] = 1; 
611         s->mjpeg_hsample[0] = 2;
612         s->mjpeg_hsample[1] = 1; 
613         s->mjpeg_hsample[2] = 1; 
614         if (mjpeg_init(s) < 0)
615             return -1;
616         avctx->delay=0;
617         s->low_delay=1;
618         break;
619     case CODEC_ID_H263:
620         if (h263_get_picture_format(s->width, s->height) == 7) {
621             printf("Input picture size isn't suitable for h263 codec! try h263+\n");
622             return -1;
623         }
624         s->out_format = FMT_H263;
625         avctx->delay=0;
626         s->low_delay=1;
627         break;
628     case CODEC_ID_H263P:
629         s->out_format = FMT_H263;
630         s->h263_plus = 1;
631         s->unrestricted_mv = 1;
632         s->h263_aic = 1;
633         
634         /* These are just to be sure */
635         s->umvplus = 0;
636         s->umvplus_dec = 0;
637         avctx->delay=0;
638         s->low_delay=1;
639         break;
640     case CODEC_ID_RV10:
641         s->out_format = FMT_H263;
642         s->h263_rv10 = 1;
643         avctx->delay=0;
644         s->low_delay=1;
645         break;
646     case CODEC_ID_MPEG4:
647         s->out_format = FMT_H263;
648         s->h263_pred = 1;
649         s->unrestricted_mv = 1;
650         s->low_delay= s->max_b_frames ? 0 : 1;
651         avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
652         break;
653     case CODEC_ID_MSMPEG4V1:
654         s->out_format = FMT_H263;
655         s->h263_msmpeg4 = 1;
656         s->h263_pred = 1;
657         s->unrestricted_mv = 1;
658         s->msmpeg4_version= 1;
659         avctx->delay=0;
660         s->low_delay=1;
661         break;
662     case CODEC_ID_MSMPEG4V2:
663         s->out_format = FMT_H263;
664         s->h263_msmpeg4 = 1;
665         s->h263_pred = 1;
666         s->unrestricted_mv = 1;
667         s->msmpeg4_version= 2;
668         avctx->delay=0;
669         s->low_delay=1;
670         break;
671     case CODEC_ID_MSMPEG4V3:
672         s->out_format = FMT_H263;
673         s->h263_msmpeg4 = 1;
674         s->h263_pred = 1;
675         s->unrestricted_mv = 1;
676         s->msmpeg4_version= 3;
677         avctx->delay=0;
678         s->low_delay=1;
679         break;
680     case CODEC_ID_WMV1:
681         s->out_format = FMT_H263;
682         s->h263_msmpeg4 = 1;
683         s->h263_pred = 1;
684         s->unrestricted_mv = 1;
685         s->msmpeg4_version= 4;
686         avctx->delay=0;
687         s->low_delay=1;
688         break;
689     case CODEC_ID_WMV2:
690         s->out_format = FMT_H263;
691         s->h263_msmpeg4 = 1;
692         s->h263_pred = 1;
693         s->unrestricted_mv = 1;
694         s->msmpeg4_version= 5;
695         avctx->delay=0;
696         s->low_delay=1;
697         break;
698     default:
699         return -1;
700     }
701     
702     { /* set up some save defaults, some codecs might override them later */
703         static int done=0;
704         if(!done){
705             int i;
706             done=1;
707
708             default_mv_penalty= av_mallocz( sizeof(UINT16)*(MAX_FCODE+1)*(2*MAX_MV+1) );
709             memset(default_mv_penalty, 0, sizeof(UINT16)*(MAX_FCODE+1)*(2*MAX_MV+1));
710             memset(default_fcode_tab , 0, sizeof(UINT8)*(2*MAX_MV+1));
711
712             for(i=-16; i<16; i++){
713                 default_fcode_tab[i + MAX_MV]= 1;
714             }
715         }
716     }
717     s->me.mv_penalty= default_mv_penalty;
718     s->fcode_tab= default_fcode_tab;
719     s->y_dc_scale_table=
720     s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
721  
722     /* dont use mv_penalty table for crap MV as it would be confused */
723     //FIXME remove after fixing / removing old ME
724     if (s->me_method < ME_EPZS) s->me.mv_penalty = default_mv_penalty;
725
726     s->encoding = 1;
727
728     /* init */
729     if (MPV_common_init(s) < 0)
730         return -1;
731     
732     ff_init_me(s);
733
734 #ifdef CONFIG_ENCODERS
735     if (s->out_format == FMT_H263)
736         h263_encode_init(s);
737     else if (s->out_format == FMT_MPEG1)
738         ff_mpeg1_encode_init(s);
739     if(s->msmpeg4_version)
740         ff_msmpeg4_encode_init(s);
741 #endif
742
743     /* init default q matrix */
744     for(i=0;i<64;i++) {
745         int j= s->idct_permutation[i];
746         if(s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){
747             s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
748             s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
749         }else if(s->out_format == FMT_H263){
750             s->intra_matrix[j] =
751             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
752         }else{ /* mpeg1 */
753             s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
754             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
755         }
756     }
757
758     /* precompute matrix */
759     /* for mjpeg, we do include qscale in the matrix */
760     if (s->out_format != FMT_MJPEG) {
761         convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16, s->q_intra_matrix16_bias, 
762                        s->intra_matrix, s->intra_quant_bias, 1, 31);
763         convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16, s->q_inter_matrix16_bias, 
764                        s->inter_matrix, s->inter_quant_bias, 1, 31);
765     }
766
767     if(ff_rate_control_init(s) < 0)
768         return -1;
769
770     s->picture_number = 0;
771     s->picture_in_gop_number = 0;
772     s->fake_picture_number = 0;
773     /* motion detector init */
774     s->f_code = 1;
775     s->b_code = 1;
776
777     return 0;
778 }
779
780 int MPV_encode_end(AVCodecContext *avctx)
781 {
782     MpegEncContext *s = avctx->priv_data;
783
784 #ifdef STATS
785     print_stats();
786 #endif
787
788     ff_rate_control_uninit(s);
789
790     MPV_common_end(s);
791     if (s->out_format == FMT_MJPEG)
792         mjpeg_close(s);
793       
794     return 0;
795 }
796
797 /* draw the edges of width 'w' of an image of size width, height */
798 //FIXME check that this is ok for mpeg4 interlaced
799 static void draw_edges_c(UINT8 *buf, int wrap, int width, int height, int w)
800 {
801     UINT8 *ptr, *last_line;
802     int i;
803
804     last_line = buf + (height - 1) * wrap;
805     for(i=0;i<w;i++) {
806         /* top and bottom */
807         memcpy(buf - (i + 1) * wrap, buf, width);
808         memcpy(last_line + (i + 1) * wrap, last_line, width);
809     }
810     /* left and right */
811     ptr = buf;
812     for(i=0;i<height;i++) {
813         memset(ptr - w, ptr[0], w);
814         memset(ptr + width, ptr[width-1], w);
815         ptr += wrap;
816     }
817     /* corners */
818     for(i=0;i<w;i++) {
819         memset(buf - (i + 1) * wrap - w, buf[0], w); /* top left */
820         memset(buf - (i + 1) * wrap + width, buf[width-1], w); /* top right */
821         memset(last_line + (i + 1) * wrap - w, last_line[0], w); /* top left */
822         memset(last_line + (i + 1) * wrap + width, last_line[width-1], w); /* top right */
823     }
824 }
825
826 static int find_unused_picture(MpegEncContext *s, int shared){
827     int i;
828     
829     if(shared){
830         for(i=0; i<MAX_PICTURE_COUNT; i++){
831             if(s->picture[i].data[0]==NULL && s->picture[i].type==0) break;
832         }
833     }else{
834         for(i=0; i<MAX_PICTURE_COUNT; i++){
835             if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) break;
836         }
837         for(i=0; i<MAX_PICTURE_COUNT; i++){
838             if(s->picture[i].data[0]==NULL) break;
839         }
840     }
841
842     assert(i<MAX_PICTURE_COUNT);
843     return i;
844 }
845
846 /* generic function for encode/decode called before a frame is coded/decoded */
847 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
848 {
849     int i;
850     AVFrame *pic;
851
852     s->mb_skiped = 0;
853     
854     /* mark&release old frames */
855     if (s->pict_type != B_TYPE && s->last_picture.data[0]) {
856         for(i=0; i<MAX_PICTURE_COUNT; i++){
857 //printf("%8X %d %d %X %X\n", s->picture[i].data[0], s->picture[i].type, i, s->next_picture.data[0], s->last_picture.data[0]);
858             if(s->picture[i].data[0] == s->last_picture.data[0]){
859 //                s->picture[i].reference=0;
860                 avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
861                 break;
862             }    
863         }
864         assert(i<MAX_PICTURE_COUNT);
865
866         /* release forgotten pictures */
867         /* if(mpeg124/h263) */
868         if(!s->encoding){
869             for(i=0; i<MAX_PICTURE_COUNT; i++){
870                 if(s->picture[i].data[0] && s->picture[i].data[0] != s->next_picture.data[0] && s->picture[i].reference){
871                     fprintf(stderr, "releasing zombie picture\n");
872                     avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);                
873                 }
874             }
875         }
876     }
877 alloc:
878     if(!s->encoding){
879         i= find_unused_picture(s, 0);
880     
881         pic= (AVFrame*)&s->picture[i];
882         pic->reference= s->pict_type != B_TYPE;
883         pic->coded_picture_number= s->current_picture.coded_picture_number+1;
884         
885         alloc_picture(s, (Picture*)pic, 0);
886
887         s->current_picture= s->picture[i];
888     }
889
890     if (s->pict_type != B_TYPE) {
891         s->last_picture= s->next_picture;
892         s->next_picture= s->current_picture;
893     }
894     
895     if(s->pict_type != I_TYPE && s->last_picture.data[0]==NULL){
896         fprintf(stderr, "warning: first frame is no keyframe\n");
897         assert(s->pict_type != B_TYPE); //these should have been dropped if we dont have a reference
898         goto alloc;
899     }
900    
901     s->hurry_up= s->avctx->hurry_up;
902     s->error_resilience= avctx->error_resilience;
903
904     /* set dequantizer, we cant do it during init as it might change for mpeg4
905        and we cant do it in the header decode as init isnt called for mpeg4 there yet */
906     if(s->out_format == FMT_H263){
907         if(s->mpeg_quant)
908             s->dct_unquantize = s->dct_unquantize_mpeg2;
909         else
910             s->dct_unquantize = s->dct_unquantize_h263;
911     }else 
912         s->dct_unquantize = s->dct_unquantize_mpeg1;
913
914     return 0;
915 }
916
917 /* generic function for encode/decode called after a frame has been coded/decoded */
918 void MPV_frame_end(MpegEncContext *s)
919 {
920     int i;
921     /* draw edge for correct motion prediction if outside */
922     if(s->codec_id!=CODEC_ID_SVQ1){
923         if (s->pict_type != B_TYPE && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
924             draw_edges(s->current_picture.data[0], s->linesize  , s->h_edge_pos   , s->v_edge_pos   , EDGE_WIDTH  );
925             draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
926             draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
927         }
928     }
929     emms_c();
930     
931     s->last_pict_type    = s->pict_type;
932     if(s->pict_type!=B_TYPE){
933         s->last_non_b_pict_type= s->pict_type;
934     }
935     
936     s->current_picture.quality= s->qscale; //FIXME get average of qscale_table
937     s->current_picture.pict_type= s->pict_type;
938     s->current_picture.key_frame= s->pict_type == I_TYPE;
939     
940     /* copy back current_picture variables */
941     for(i=0; i<MAX_PICTURE_COUNT; i++){
942         if(s->picture[i].data[0] == s->current_picture.data[0]){
943             s->picture[i]= s->current_picture;
944             break;
945         }    
946     }
947     assert(i<MAX_PICTURE_COUNT);
948
949     /* release non refernce frames */
950     for(i=0; i<MAX_PICTURE_COUNT; i++){
951         if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/)
952             s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
953     }
954     if(s->avctx->debug&FF_DEBUG_SKIP){
955         int x,y;        
956         for(y=0; y<s->mb_height; y++){
957             for(x=0; x<s->mb_width; x++){
958                 int count= s->mbskip_table[x + y*s->mb_width];
959                 if(count>9) count=9;
960                 printf(" %1d", count);
961             }
962             printf("\n");
963         }
964         printf("pict type: %d\n", s->pict_type);
965     }
966 }
967
968 static int get_sae(uint8_t *src, int ref, int stride){
969     int x,y;
970     int acc=0;
971     
972     for(y=0; y<16; y++){
973         for(x=0; x<16; x++){
974             acc+= ABS(src[x+y*stride] - ref);
975         }
976     }
977     
978     return acc;
979 }
980
981 static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride){
982     int x, y, w, h;
983     int acc=0;
984     
985     w= s->width &~15;
986     h= s->height&~15;
987     
988     for(y=0; y<h; y+=16){
989         for(x=0; x<w; x+=16){
990             int offset= x + y*stride;
991             int sad = s->dsp.pix_abs16x16(src + offset, ref + offset, stride);
992             int mean= (s->dsp.pix_sum(src + offset, stride) + 128)>>8;
993             int sae = get_sae(src + offset, mean, stride);
994             
995             acc+= sae + 500 < sad;
996         }
997     }
998     return acc;
999 }
1000
1001
1002 static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
1003     AVFrame *pic;
1004     int i;
1005     const int encoding_delay= s->max_b_frames;
1006     int direct=1;
1007
1008     if(encoding_delay && !(s->flags&CODEC_FLAG_INPUT_PRESERVED)) direct=0;
1009     if(pic_arg->linesize[0] != s->linesize) direct=0;
1010     if(pic_arg->linesize[1] != s->uvlinesize) direct=0;
1011     if(pic_arg->linesize[2] != s->uvlinesize) direct=0;
1012   
1013 //    printf("%d %d %d %d\n",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize);
1014     
1015     if(direct){
1016         i= find_unused_picture(s, 1);
1017
1018         pic= (AVFrame*)&s->picture[i];
1019         pic->reference= 1;
1020     
1021         for(i=0; i<4; i++){
1022             pic->data[i]= pic_arg->data[i];
1023             pic->linesize[i]= pic_arg->linesize[i];
1024         }
1025         alloc_picture(s, (Picture*)pic, 1);
1026     }else{
1027         i= find_unused_picture(s, 0);
1028
1029         pic= (AVFrame*)&s->picture[i];
1030         pic->reference= 1;
1031
1032         alloc_picture(s, (Picture*)pic, 0);
1033
1034         if(   pic->data[0] == pic_arg->data[0] 
1035            && pic->data[1] == pic_arg->data[1]
1036            && pic->data[2] == pic_arg->data[2]){
1037        // empty
1038         }else{
1039             int h_chroma_shift, v_chroma_shift;
1040         
1041             avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1042         
1043             for(i=0; i<3; i++){
1044                 int src_stride= pic_arg->linesize[i];
1045                 int dst_stride= i ? s->uvlinesize : s->linesize;
1046                 int h_shift= i ? h_chroma_shift : 0;
1047                 int v_shift= i ? v_chroma_shift : 0;
1048                 int w= s->width >>h_shift;
1049                 int h= s->height>>v_shift;
1050                 uint8_t *src= pic_arg->data[i];
1051                 uint8_t *dst= pic->data[i];
1052             
1053                 if(src_stride==dst_stride)
1054                     memcpy(dst, src, src_stride*h);
1055                 else{
1056                     while(h--){
1057                         memcpy(dst, src, w);
1058                         dst += dst_stride;
1059                         src += src_stride;
1060                     }
1061                 }
1062             }
1063         }
1064     }
1065     pic->quality= pic_arg->quality;
1066     pic->pict_type= pic_arg->pict_type;
1067     pic->pts = pic_arg->pts;
1068     
1069     if(s->input_picture[encoding_delay])
1070         pic->display_picture_number= s->input_picture[encoding_delay]->display_picture_number + 1;
1071
1072     /* shift buffer entries */
1073     for(i=1; i<MAX_PICTURE_COUNT /*s->encoding_delay+1*/; i++)
1074         s->input_picture[i-1]= s->input_picture[i];
1075         
1076     s->input_picture[encoding_delay]= (Picture*)pic;
1077
1078     return 0;
1079 }
1080
1081 static void select_input_picture(MpegEncContext *s){
1082     int i;
1083     const int encoding_delay= s->max_b_frames;
1084     int coded_pic_num=0;    
1085
1086     if(s->reordered_input_picture[0])
1087         coded_pic_num= s->reordered_input_picture[0]->coded_picture_number + 1;
1088
1089     for(i=1; i<MAX_PICTURE_COUNT; i++)
1090         s->reordered_input_picture[i-1]= s->reordered_input_picture[i];
1091     s->reordered_input_picture[MAX_PICTURE_COUNT-1]= NULL;
1092
1093     /* set next picture types & ordering */
1094     if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){
1095         if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture.data[0]==NULL || s->intra_only){
1096             s->reordered_input_picture[0]= s->input_picture[0];
1097             s->reordered_input_picture[0]->pict_type= I_TYPE;
1098             s->reordered_input_picture[0]->coded_picture_number= coded_pic_num;
1099         }else{
1100             int b_frames;
1101             
1102             if(s->flags&CODEC_FLAG_PASS2){
1103                 for(i=0; i<s->max_b_frames+1; i++){
1104                     int pict_num= s->input_picture[0]->display_picture_number + i;
1105                     int pict_type= s->rc_context.entry[pict_num].new_pict_type;
1106                     s->input_picture[i]->pict_type= pict_type;
1107                     
1108                     if(i + 1 >= s->rc_context.num_entries) break;
1109                 }
1110             }
1111
1112             if(s->input_picture[0]->pict_type){
1113                 /* user selected pict_type */
1114                 for(b_frames=0; b_frames<s->max_b_frames+1; b_frames++){
1115                     if(s->input_picture[b_frames]->pict_type!=B_TYPE) break;
1116                 }
1117             
1118                 if(b_frames > s->max_b_frames){
1119                     fprintf(stderr, "warning, too many bframes in a row\n");
1120                     b_frames = s->max_b_frames;
1121                 }
1122             }else if(s->b_frame_strategy==0){
1123                 b_frames= s->max_b_frames;
1124             }else if(s->b_frame_strategy==1){
1125                 for(i=1; i<s->max_b_frames+1; i++){
1126                     if(s->input_picture[i]->b_frame_score==0){
1127                         s->input_picture[i]->b_frame_score= 
1128                             get_intra_count(s, s->input_picture[i  ]->data[0], 
1129                                                s->input_picture[i-1]->data[0], s->linesize) + 1;
1130                     }
1131                 }
1132                 for(i=0; i<s->max_b_frames; i++){
1133                     if(s->input_picture[i]->b_frame_score - 1 > s->mb_num/40) break;
1134                 }
1135                                 
1136                 b_frames= FFMAX(0, i-1);
1137                 
1138                 /* reset scores */
1139                 for(i=0; i<b_frames+1; i++){
1140                     s->input_picture[i]->b_frame_score=0;
1141                 }
1142             }else{
1143                 fprintf(stderr, "illegal b frame strategy\n");
1144                 b_frames=0;
1145             }
1146
1147             emms_c();
1148 //static int b_count=0;
1149 //b_count+= b_frames;
1150 //printf("b_frames: %d\n", b_count);
1151                         
1152             s->reordered_input_picture[0]= s->input_picture[b_frames];
1153             if(   s->picture_in_gop_number + b_frames >= s->gop_size 
1154                || s->reordered_input_picture[0]->pict_type== I_TYPE)
1155                 s->reordered_input_picture[0]->pict_type= I_TYPE;
1156             else
1157                 s->reordered_input_picture[0]->pict_type= P_TYPE;
1158             s->reordered_input_picture[0]->coded_picture_number= coded_pic_num;
1159             for(i=0; i<b_frames; i++){
1160                 coded_pic_num++;
1161                 s->reordered_input_picture[i+1]= s->input_picture[i];
1162                 s->reordered_input_picture[i+1]->pict_type= B_TYPE;
1163                 s->reordered_input_picture[i+1]->coded_picture_number= coded_pic_num;
1164             }
1165         }
1166     }
1167     
1168     if(s->reordered_input_picture[0]){
1169        s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE;
1170
1171         if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED){
1172             int i= find_unused_picture(s, 0);
1173             Picture *pic= &s->picture[i];
1174
1175             s->new_picture= *s->reordered_input_picture[0];
1176
1177             /* mark us unused / free shared pic */
1178             for(i=0; i<4; i++)
1179                 s->reordered_input_picture[0]->data[i]= NULL;
1180             s->reordered_input_picture[0]->type= 0;
1181             
1182             pic->pict_type = s->reordered_input_picture[0]->pict_type;
1183             pic->quality   = s->reordered_input_picture[0]->quality;
1184             pic->coded_picture_number = s->reordered_input_picture[0]->coded_picture_number;
1185             pic->reference = s->reordered_input_picture[0]->reference;
1186             
1187             alloc_picture(s, pic, 0);
1188
1189             s->current_picture= *pic;
1190         }else{
1191             assert(   s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_USER 
1192                    || s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL);
1193             
1194             s->new_picture= *s->reordered_input_picture[0];
1195
1196             for(i=0; i<4; i++){
1197                 s->reordered_input_picture[0]->data[i]-=16; //FIXME dirty
1198             }
1199             s->current_picture= *s->reordered_input_picture[0];
1200         }
1201     
1202         s->picture_number= s->new_picture.display_picture_number;
1203 //printf("dpn:%d\n", s->picture_number);
1204     }else{
1205        memset(&s->new_picture, 0, sizeof(Picture));
1206     }
1207 }
1208
1209 int MPV_encode_picture(AVCodecContext *avctx,
1210                        unsigned char *buf, int buf_size, void *data)
1211 {
1212     MpegEncContext *s = avctx->priv_data;
1213     AVFrame *pic_arg = data;
1214     int i;
1215
1216     init_put_bits(&s->pb, buf, buf_size, NULL, NULL);
1217
1218     s->picture_in_gop_number++;
1219
1220     load_input_picture(s, pic_arg);
1221     
1222     select_input_picture(s);
1223     
1224     /* output? */
1225     if(s->new_picture.data[0]){
1226
1227         s->pict_type= s->new_picture.pict_type;
1228         if (s->fixed_qscale){ /* the ratecontrol needs the last qscale so we dont touch it for CBR */
1229             s->qscale= (int)(s->new_picture.quality+0.5);
1230             assert(s->qscale);
1231         }
1232 //emms_c();
1233 //printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale);
1234         MPV_frame_start(s, avctx);
1235
1236         encode_picture(s, s->picture_number);
1237         
1238         avctx->real_pict_num  = s->picture_number;
1239         avctx->header_bits = s->header_bits;
1240         avctx->mv_bits     = s->mv_bits;
1241         avctx->misc_bits   = s->misc_bits;
1242         avctx->i_tex_bits  = s->i_tex_bits;
1243         avctx->p_tex_bits  = s->p_tex_bits;
1244         avctx->i_count     = s->i_count;
1245         avctx->p_count     = s->mb_num - s->i_count - s->skip_count; //FIXME f/b_count in avctx
1246         avctx->skip_count  = s->skip_count;
1247
1248         MPV_frame_end(s);
1249
1250         if (s->out_format == FMT_MJPEG)
1251             mjpeg_picture_trailer(s);
1252         
1253         if(s->flags&CODEC_FLAG_PASS1)
1254             ff_write_pass1_stats(s);
1255     }
1256
1257     s->input_picture_number++;
1258
1259     flush_put_bits(&s->pb);
1260     s->frame_bits  = (pbBufPtr(&s->pb) - s->pb.buf) * 8;
1261     
1262     s->total_bits += s->frame_bits;
1263     avctx->frame_bits  = s->frame_bits;
1264
1265     for(i=0; i<4; i++){
1266         avctx->error[i] += s->current_picture.error[i];
1267     }
1268     
1269     return pbBufPtr(&s->pb) - s->pb.buf;
1270 }
1271
1272 static inline void gmc1_motion(MpegEncContext *s,
1273                                UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
1274                                int dest_offset,
1275                                UINT8 **ref_picture, int src_offset)
1276 {
1277     UINT8 *ptr;
1278     int offset, src_x, src_y, linesize, uvlinesize;
1279     int motion_x, motion_y;
1280     int emu=0;
1281
1282     motion_x= s->sprite_offset[0][0];
1283     motion_y= s->sprite_offset[0][1];
1284     src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy+1));
1285     src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy+1));
1286     motion_x<<=(3-s->sprite_warping_accuracy);
1287     motion_y<<=(3-s->sprite_warping_accuracy);
1288     src_x = clip(src_x, -16, s->width);
1289     if (src_x == s->width)
1290         motion_x =0;
1291     src_y = clip(src_y, -16, s->height);
1292     if (src_y == s->height)
1293         motion_y =0;
1294
1295     linesize = s->linesize;
1296     uvlinesize = s->uvlinesize;
1297     
1298     ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset;
1299
1300     dest_y+=dest_offset;
1301     if(s->flags&CODEC_FLAG_EMU_EDGE){
1302         if(src_x<0 || src_y<0 || src_x + 17 >= s->h_edge_pos
1303                               || src_y + 17 >= s->v_edge_pos){
1304             ff_emulated_edge_mc(s, ptr, linesize, 17, 17, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
1305             ptr= s->edge_emu_buffer;
1306         }
1307     }
1308     
1309     if((motion_x|motion_y)&7){
1310         s->dsp.gmc1(dest_y  , ptr  , linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
1311         s->dsp.gmc1(dest_y+8, ptr+8, linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
1312     }else{
1313         int dxy;
1314         
1315         dxy= ((motion_x>>3)&1) | ((motion_y>>2)&2);
1316         if (s->no_rounding){
1317             s->dsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
1318         }else{
1319             s->dsp.put_pixels_tab       [0][dxy](dest_y, ptr, linesize, 16);
1320         }
1321     }
1322     
1323     if(s->flags&CODEC_FLAG_GRAY) return;
1324
1325     motion_x= s->sprite_offset[1][0];
1326     motion_y= s->sprite_offset[1][1];
1327     src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy+1));
1328     src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy+1));
1329     motion_x<<=(3-s->sprite_warping_accuracy);
1330     motion_y<<=(3-s->sprite_warping_accuracy);
1331     src_x = clip(src_x, -8, s->width>>1);
1332     if (src_x == s->width>>1)
1333         motion_x =0;
1334     src_y = clip(src_y, -8, s->height>>1);
1335     if (src_y == s->height>>1)
1336         motion_y =0;
1337
1338     offset = (src_y * uvlinesize) + src_x + (src_offset>>1);
1339     ptr = ref_picture[1] + offset;
1340     if(s->flags&CODEC_FLAG_EMU_EDGE){
1341         if(src_x<0 || src_y<0 || src_x + 9 >= s->h_edge_pos>>1
1342                               || src_y + 9 >= s->v_edge_pos>>1){
1343             ff_emulated_edge_mc(s, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1344             ptr= s->edge_emu_buffer;
1345             emu=1;
1346         }
1347     }
1348     s->dsp.gmc1(dest_cb + (dest_offset>>1), ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
1349     
1350     ptr = ref_picture[2] + offset;
1351     if(emu){
1352         ff_emulated_edge_mc(s, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1353         ptr= s->edge_emu_buffer;
1354     }
1355     s->dsp.gmc1(dest_cr + (dest_offset>>1), ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
1356     
1357     return;
1358 }
1359
1360 static inline void gmc_motion(MpegEncContext *s,
1361                                UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
1362                                int dest_offset,
1363                                UINT8 **ref_picture, int src_offset)
1364 {
1365     UINT8 *ptr;
1366     int linesize, uvlinesize;
1367     const int a= s->sprite_warping_accuracy;
1368     int ox, oy;
1369
1370     linesize = s->linesize;
1371     uvlinesize = s->uvlinesize;
1372
1373     ptr = ref_picture[0] + src_offset;
1374
1375     dest_y+=dest_offset;
1376     
1377     ox= s->sprite_offset[0][0] + s->sprite_delta[0][0]*s->mb_x*16 + s->sprite_delta[0][1]*s->mb_y*16;
1378     oy= s->sprite_offset[0][1] + s->sprite_delta[1][0]*s->mb_x*16 + s->sprite_delta[1][1]*s->mb_y*16;
1379
1380     s->dsp.gmc(dest_y, ptr, linesize, 16,
1381            ox, 
1382            oy, 
1383            s->sprite_delta[0][0], s->sprite_delta[0][1],
1384            s->sprite_delta[1][0], s->sprite_delta[1][1], 
1385            a+1, (1<<(2*a+1)) - s->no_rounding,
1386            s->h_edge_pos, s->v_edge_pos);
1387     s->dsp.gmc(dest_y+8, ptr, linesize, 16,
1388            ox + s->sprite_delta[0][0]*8, 
1389            oy + s->sprite_delta[1][0]*8, 
1390            s->sprite_delta[0][0], s->sprite_delta[0][1],
1391            s->sprite_delta[1][0], s->sprite_delta[1][1], 
1392            a+1, (1<<(2*a+1)) - s->no_rounding,
1393            s->h_edge_pos, s->v_edge_pos);
1394
1395     if(s->flags&CODEC_FLAG_GRAY) return;
1396
1397
1398     dest_cb+=dest_offset>>1;
1399     dest_cr+=dest_offset>>1;
1400     
1401     ox= s->sprite_offset[1][0] + s->sprite_delta[0][0]*s->mb_x*8 + s->sprite_delta[0][1]*s->mb_y*8;
1402     oy= s->sprite_offset[1][1] + s->sprite_delta[1][0]*s->mb_x*8 + s->sprite_delta[1][1]*s->mb_y*8;
1403
1404     ptr = ref_picture[1] + (src_offset>>1);
1405     s->dsp.gmc(dest_cb, ptr, uvlinesize, 8,
1406            ox, 
1407            oy, 
1408            s->sprite_delta[0][0], s->sprite_delta[0][1],
1409            s->sprite_delta[1][0], s->sprite_delta[1][1], 
1410            a+1, (1<<(2*a+1)) - s->no_rounding,
1411            s->h_edge_pos>>1, s->v_edge_pos>>1);
1412     
1413     ptr = ref_picture[2] + (src_offset>>1);
1414     s->dsp.gmc(dest_cr, ptr, uvlinesize, 8,
1415            ox, 
1416            oy, 
1417            s->sprite_delta[0][0], s->sprite_delta[0][1],
1418            s->sprite_delta[1][0], s->sprite_delta[1][1], 
1419            a+1, (1<<(2*a+1)) - s->no_rounding,
1420            s->h_edge_pos>>1, s->v_edge_pos>>1);
1421 }
1422
1423
1424 void ff_emulated_edge_mc(MpegEncContext *s, UINT8 *src, int linesize, int block_w, int block_h, 
1425                                     int src_x, int src_y, int w, int h){
1426     int x, y;
1427     int start_y, start_x, end_y, end_x;
1428     UINT8 *buf= s->edge_emu_buffer;
1429
1430     if(src_y>= h){
1431         src+= (h-1-src_y)*linesize;
1432         src_y=h-1;
1433     }else if(src_y<=-block_h){
1434         src+= (1-block_h-src_y)*linesize;
1435         src_y=1-block_h;
1436     }
1437     if(src_x>= w){
1438         src+= (w-1-src_x);
1439         src_x=w-1;
1440     }else if(src_x<=-block_w){
1441         src+= (1-block_w-src_x);
1442         src_x=1-block_w;
1443     }
1444
1445     start_y= FFMAX(0, -src_y);
1446     start_x= FFMAX(0, -src_x);
1447     end_y= FFMIN(block_h, h-src_y);
1448     end_x= FFMIN(block_w, w-src_x);
1449
1450     // copy existing part
1451     for(y=start_y; y<end_y; y++){
1452         for(x=start_x; x<end_x; x++){
1453             buf[x + y*linesize]= src[x + y*linesize];
1454         }
1455     }
1456
1457     //top
1458     for(y=0; y<start_y; y++){
1459         for(x=start_x; x<end_x; x++){
1460             buf[x + y*linesize]= buf[x + start_y*linesize];
1461         }
1462     }
1463
1464     //bottom
1465     for(y=end_y; y<block_h; y++){
1466         for(x=start_x; x<end_x; x++){
1467             buf[x + y*linesize]= buf[x + (end_y-1)*linesize];
1468         }
1469     }
1470                                     
1471     for(y=0; y<block_h; y++){
1472        //left
1473         for(x=0; x<start_x; x++){
1474             buf[x + y*linesize]= buf[start_x + y*linesize];
1475         }
1476        
1477        //right
1478         for(x=end_x; x<block_w; x++){
1479             buf[x + y*linesize]= buf[end_x - 1 + y*linesize];
1480         }
1481     }
1482 }
1483
1484
1485 /* apply one mpeg motion vector to the three components */
1486 static inline void mpeg_motion(MpegEncContext *s,
1487                                UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
1488                                int dest_offset,
1489                                UINT8 **ref_picture, int src_offset,
1490                                int field_based, op_pixels_func (*pix_op)[4],
1491                                int motion_x, int motion_y, int h)
1492 {
1493     UINT8 *ptr;
1494     int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize;
1495     int emu=0;
1496 #if 0    
1497 if(s->quarter_sample)
1498 {
1499     motion_x>>=1;
1500     motion_y>>=1;
1501 }
1502 #endif
1503     dxy = ((motion_y & 1) << 1) | (motion_x & 1);
1504     src_x = s->mb_x * 16 + (motion_x >> 1);
1505     src_y = s->mb_y * (16 >> field_based) + (motion_y >> 1);
1506                 
1507     /* WARNING: do no forget half pels */
1508     height = s->height >> field_based;
1509     v_edge_pos = s->v_edge_pos >> field_based;
1510     src_x = clip(src_x, -16, s->width);
1511     if (src_x == s->width)
1512         dxy &= ~1;
1513     src_y = clip(src_y, -16, height);
1514     if (src_y == height)
1515         dxy &= ~2;
1516     linesize   = s->linesize << field_based;
1517     uvlinesize = s->uvlinesize << field_based;
1518     ptr = ref_picture[0] + (src_y * linesize) + (src_x) + src_offset;
1519     dest_y += dest_offset;
1520
1521     if(s->flags&CODEC_FLAG_EMU_EDGE){
1522         if(src_x<0 || src_y<0 || src_x + (motion_x&1) + 16 > s->h_edge_pos
1523                               || src_y + (motion_y&1) + h  > v_edge_pos){
1524             ff_emulated_edge_mc(s, ptr - src_offset, s->linesize, 17, 17+field_based, 
1525                              src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
1526             ptr= s->edge_emu_buffer + src_offset;
1527             emu=1;
1528         }
1529     }
1530     pix_op[0][dxy](dest_y, ptr, linesize, h);
1531
1532     if(s->flags&CODEC_FLAG_GRAY) return;
1533
1534     if (s->out_format == FMT_H263) {
1535         dxy = 0;
1536         if ((motion_x & 3) != 0)
1537             dxy |= 1;
1538         if ((motion_y & 3) != 0)
1539             dxy |= 2;
1540         mx = motion_x >> 2;
1541         my = motion_y >> 2;
1542     } else {
1543         mx = motion_x / 2;
1544         my = motion_y / 2;
1545         dxy = ((my & 1) << 1) | (mx & 1);
1546         mx >>= 1;
1547         my >>= 1;
1548     }
1549     
1550     src_x = s->mb_x * 8 + mx;
1551     src_y = s->mb_y * (8 >> field_based) + my;
1552     src_x = clip(src_x, -8, s->width >> 1);
1553     if (src_x == (s->width >> 1))
1554         dxy &= ~1;
1555     src_y = clip(src_y, -8, height >> 1);
1556     if (src_y == (height >> 1))
1557         dxy &= ~2;
1558     offset = (src_y * uvlinesize) + src_x + (src_offset >> 1);
1559     ptr = ref_picture[1] + offset;
1560     if(emu){
1561         ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based, 
1562                          src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
1563         ptr= s->edge_emu_buffer + (src_offset >> 1);
1564     }
1565     pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
1566
1567     ptr = ref_picture[2] + offset;
1568     if(emu){
1569         ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based, 
1570                          src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
1571         ptr= s->edge_emu_buffer + (src_offset >> 1);
1572     }
1573     pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
1574 }
1575
1576 static inline void qpel_motion(MpegEncContext *s,
1577                                UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
1578                                int dest_offset,
1579                                UINT8 **ref_picture, int src_offset,
1580                                int field_based, op_pixels_func (*pix_op)[4],
1581                                qpel_mc_func (*qpix_op)[16],
1582                                int motion_x, int motion_y, int h)
1583 {
1584     UINT8 *ptr;
1585     int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize;
1586     int emu=0;
1587
1588     dxy = ((motion_y & 3) << 2) | (motion_x & 3);
1589     src_x = s->mb_x * 16 + (motion_x >> 2);
1590     src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
1591
1592     height = s->height >> field_based;
1593     v_edge_pos = s->v_edge_pos >> field_based;
1594     src_x = clip(src_x, -16, s->width);
1595     if (src_x == s->width)
1596         dxy &= ~3;
1597     src_y = clip(src_y, -16, height);
1598     if (src_y == height)
1599         dxy &= ~12;
1600     linesize = s->linesize << field_based;
1601     uvlinesize = s->uvlinesize << field_based;
1602     ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset;
1603     dest_y += dest_offset;
1604 //printf("%d %d %d\n", src_x, src_y, dxy);
1605     
1606     if(s->flags&CODEC_FLAG_EMU_EDGE){
1607         if(src_x<0 || src_y<0 || src_x + (motion_x&3) + 16 > s->h_edge_pos
1608                               || src_y + (motion_y&3) + h  > v_edge_pos){
1609             ff_emulated_edge_mc(s, ptr - src_offset, s->linesize, 17, 17+field_based, 
1610                              src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
1611             ptr= s->edge_emu_buffer + src_offset;
1612             emu=1;
1613         }
1614     }
1615     if(!field_based)
1616         qpix_op[0][dxy](dest_y, ptr, linesize);
1617     else{
1618         //damn interlaced mode
1619         //FIXME boundary mirroring is not exactly correct here
1620         qpix_op[1][dxy](dest_y  , ptr  , linesize);
1621         qpix_op[1][dxy](dest_y+8, ptr+8, linesize);
1622     }
1623
1624     if(s->flags&CODEC_FLAG_GRAY) return;
1625
1626     if(field_based){
1627         mx= motion_x/2;
1628         my= motion_y>>1;
1629     }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA){
1630         mx= (motion_x>>1)|(motion_x&1);
1631         my= (motion_y>>1)|(motion_y&1);
1632     }else{
1633         mx= motion_x/2;
1634         my= motion_y/2;
1635     }
1636     mx= (mx>>1)|(mx&1);
1637     my= (my>>1)|(my&1);
1638     dxy= (mx&1) | ((my&1)<<1);
1639     mx>>=1;
1640     my>>=1;
1641
1642     src_x = s->mb_x * 8 + mx;
1643     src_y = s->mb_y * (8 >> field_based) + my;
1644     src_x = clip(src_x, -8, s->width >> 1);
1645     if (src_x == (s->width >> 1))
1646         dxy &= ~1;
1647     src_y = clip(src_y, -8, height >> 1);
1648     if (src_y == (height >> 1))
1649         dxy &= ~2;
1650
1651     offset = (src_y * uvlinesize) + src_x + (src_offset >> 1);
1652     ptr = ref_picture[1] + offset;
1653     if(emu){
1654         ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based, 
1655                          src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
1656         ptr= s->edge_emu_buffer + (src_offset >> 1);
1657     }
1658     pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr,  uvlinesize, h >> 1);
1659     
1660     ptr = ref_picture[2] + offset;
1661     if(emu){
1662         ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based, 
1663                          src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
1664         ptr= s->edge_emu_buffer + (src_offset >> 1);
1665     }
1666     pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr,  uvlinesize, h >> 1);
1667 }
1668
1669 inline int ff_h263_round_chroma(int x){
1670     if (x >= 0)
1671         return  (h263_chroma_roundtab[x & 0xf] + ((x >> 3) & ~1));
1672     else {
1673         x = -x;
1674         return -(h263_chroma_roundtab[x & 0xf] + ((x >> 3) & ~1));
1675     }
1676 }
1677
1678 static inline void MPV_motion(MpegEncContext *s, 
1679                               UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
1680                               int dir, UINT8 **ref_picture, 
1681                               op_pixels_func (*pix_op)[4], qpel_mc_func (*qpix_op)[16])
1682 {
1683     int dxy, offset, mx, my, src_x, src_y, motion_x, motion_y;
1684     int mb_x, mb_y, i;
1685     UINT8 *ptr, *dest;
1686     int emu=0;
1687
1688     mb_x = s->mb_x;
1689     mb_y = s->mb_y;
1690
1691     switch(s->mv_type) {
1692     case MV_TYPE_16X16:
1693         if(s->mcsel){
1694             if(s->real_sprite_warping_points==1){
1695                 gmc1_motion(s, dest_y, dest_cb, dest_cr, 0,
1696                             ref_picture, 0);
1697             }else{
1698                 gmc_motion(s, dest_y, dest_cb, dest_cr, 0,
1699                             ref_picture, 0);
1700             }
1701         }else if(s->quarter_sample){
1702             qpel_motion(s, dest_y, dest_cb, dest_cr, 0,
1703                         ref_picture, 0,
1704                         0, pix_op, qpix_op,
1705                         s->mv[dir][0][0], s->mv[dir][0][1], 16);
1706         }else if(s->mspel){
1707             ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
1708                         ref_picture, pix_op,
1709                         s->mv[dir][0][0], s->mv[dir][0][1], 16);
1710         }else{
1711             mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
1712                         ref_picture, 0,
1713                         0, pix_op,
1714                         s->mv[dir][0][0], s->mv[dir][0][1], 16);
1715         }           
1716         break;
1717     case MV_TYPE_8X8:
1718         mx = 0;
1719         my = 0;
1720         if(s->quarter_sample){
1721             for(i=0;i<4;i++) {
1722                 motion_x = s->mv[dir][i][0];
1723                 motion_y = s->mv[dir][i][1];
1724
1725                 dxy = ((motion_y & 3) << 2) | (motion_x & 3);
1726                 src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
1727                 src_y = mb_y * 16 + (motion_y >> 2) + (i >>1) * 8;
1728                     
1729                 /* WARNING: do no forget half pels */
1730                 src_x = clip(src_x, -16, s->width);
1731                 if (src_x == s->width)
1732                     dxy &= ~3;
1733                 src_y = clip(src_y, -16, s->height);
1734                 if (src_y == s->height)
1735                     dxy &= ~12;
1736                     
1737                 ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
1738                 if(s->flags&CODEC_FLAG_EMU_EDGE){
1739                     if(src_x<0 || src_y<0 || src_x + (motion_x&3) + 8 > s->h_edge_pos
1740                                           || src_y + (motion_y&3) + 8 > s->v_edge_pos){
1741                         ff_emulated_edge_mc(s, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
1742                         ptr= s->edge_emu_buffer;
1743                     }
1744                 }
1745                 dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
1746                 qpix_op[1][dxy](dest, ptr, s->linesize);
1747
1748                 mx += s->mv[dir][i][0]/2;
1749                 my += s->mv[dir][i][1]/2;
1750             }
1751         }else{
1752             for(i=0;i<4;i++) {
1753                 motion_x = s->mv[dir][i][0];
1754                 motion_y = s->mv[dir][i][1];
1755
1756                 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
1757                 src_x = mb_x * 16 + (motion_x >> 1) + (i & 1) * 8;
1758                 src_y = mb_y * 16 + (motion_y >> 1) + (i >>1) * 8;
1759                     
1760                 /* WARNING: do no forget half pels */
1761                 src_x = clip(src_x, -16, s->width);
1762                 if (src_x == s->width)
1763                     dxy &= ~1;
1764                 src_y = clip(src_y, -16, s->height);
1765                 if (src_y == s->height)
1766                     dxy &= ~2;
1767                     
1768                 ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
1769                 if(s->flags&CODEC_FLAG_EMU_EDGE){
1770                     if(src_x<0 || src_y<0 || src_x + (motion_x&1) + 8 > s->h_edge_pos
1771                                           || src_y + (motion_y&1) + 8 > s->v_edge_pos){
1772                         ff_emulated_edge_mc(s, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
1773                         ptr= s->edge_emu_buffer;
1774                     }
1775                 }
1776                 dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
1777                 pix_op[1][dxy](dest, ptr, s->linesize, 8);
1778
1779                 mx += s->mv[dir][i][0];
1780                 my += s->mv[dir][i][1];
1781             }
1782         }
1783
1784         if(s->flags&CODEC_FLAG_GRAY) break;
1785         /* In case of 8X8, we construct a single chroma motion vector
1786            with a special rounding */
1787         mx= ff_h263_round_chroma(mx);
1788         my= ff_h263_round_chroma(my);
1789         dxy = ((my & 1) << 1) | (mx & 1);
1790         mx >>= 1;
1791         my >>= 1;
1792
1793         src_x = mb_x * 8 + mx;
1794         src_y = mb_y * 8 + my;
1795         src_x = clip(src_x, -8, s->width/2);
1796         if (src_x == s->width/2)
1797             dxy &= ~1;
1798         src_y = clip(src_y, -8, s->height/2);
1799         if (src_y == s->height/2)
1800             dxy &= ~2;
1801         
1802         offset = (src_y * (s->uvlinesize)) + src_x;
1803         ptr = ref_picture[1] + offset;
1804         if(s->flags&CODEC_FLAG_EMU_EDGE){
1805                 if(src_x<0 || src_y<0 || src_x + (dxy &1) + 8 > s->h_edge_pos>>1
1806                                       || src_y + (dxy>>1) + 8 > s->v_edge_pos>>1){
1807                     ff_emulated_edge_mc(s, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1808                     ptr= s->edge_emu_buffer;
1809                     emu=1;
1810                 }
1811             }
1812         pix_op[1][dxy](dest_cb, ptr, s->uvlinesize, 8);
1813
1814         ptr = ref_picture[2] + offset;
1815         if(emu){
1816             ff_emulated_edge_mc(s, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1817             ptr= s->edge_emu_buffer;
1818         }
1819         pix_op[1][dxy](dest_cr, ptr, s->uvlinesize, 8);
1820         break;
1821     case MV_TYPE_FIELD:
1822         if (s->picture_structure == PICT_FRAME) {
1823             if(s->quarter_sample){
1824                 /* top field */
1825                 qpel_motion(s, dest_y, dest_cb, dest_cr, 0,
1826                             ref_picture, s->field_select[dir][0] ? s->linesize : 0,
1827                             1, pix_op, qpix_op,
1828                             s->mv[dir][0][0], s->mv[dir][0][1], 8);
1829                 /* bottom field */
1830                 qpel_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
1831                             ref_picture, s->field_select[dir][1] ? s->linesize : 0,
1832                             1, pix_op, qpix_op,
1833                             s->mv[dir][1][0], s->mv[dir][1][1], 8);
1834             }else{
1835                 /* top field */       
1836                 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
1837                             ref_picture, s->field_select[dir][0] ? s->linesize : 0,
1838                             1, pix_op,
1839                             s->mv[dir][0][0], s->mv[dir][0][1], 8);
1840                 /* bottom field */
1841                 mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
1842                             ref_picture, s->field_select[dir][1] ? s->linesize : 0,
1843                             1, pix_op,
1844                             s->mv[dir][1][0], s->mv[dir][1][1], 8);
1845             }
1846         } else {
1847             
1848
1849         }
1850         break;
1851     }
1852 }
1853
1854
1855 /* put block[] to dest[] */
1856 static inline void put_dct(MpegEncContext *s, 
1857                            DCTELEM *block, int i, UINT8 *dest, int line_size)
1858 {
1859     s->dct_unquantize(s, block, i, s->qscale);
1860     s->idct_put (dest, line_size, block);
1861 }
1862
1863 /* add block[] to dest[] */
1864 static inline void add_dct(MpegEncContext *s, 
1865                            DCTELEM *block, int i, UINT8 *dest, int line_size)
1866 {
1867     if (s->block_last_index[i] >= 0) {
1868         s->idct_add (dest, line_size, block);
1869     }
1870 }
1871
1872 static inline void add_dequant_dct(MpegEncContext *s, 
1873                            DCTELEM *block, int i, UINT8 *dest, int line_size)
1874 {
1875     if (s->block_last_index[i] >= 0) {
1876         s->dct_unquantize(s, block, i, s->qscale);
1877
1878         s->idct_add (dest, line_size, block);
1879     }
1880 }
1881
1882 /**
1883  * cleans dc, ac, coded_block for the current non intra MB
1884  */
1885 void ff_clean_intra_table_entries(MpegEncContext *s)
1886 {
1887     int wrap = s->block_wrap[0];
1888     int xy = s->block_index[0];
1889     
1890     s->dc_val[0][xy           ] = 
1891     s->dc_val[0][xy + 1       ] = 
1892     s->dc_val[0][xy     + wrap] =
1893     s->dc_val[0][xy + 1 + wrap] = 1024;
1894     /* ac pred */
1895     memset(s->ac_val[0][xy       ], 0, 32 * sizeof(INT16));
1896     memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(INT16));
1897     if (s->msmpeg4_version>=3) {
1898         s->coded_block[xy           ] =
1899         s->coded_block[xy + 1       ] =
1900         s->coded_block[xy     + wrap] =
1901         s->coded_block[xy + 1 + wrap] = 0;
1902     }
1903     /* chroma */
1904     wrap = s->block_wrap[4];
1905     xy = s->mb_x + 1 + (s->mb_y + 1) * wrap;
1906     s->dc_val[1][xy] =
1907     s->dc_val[2][xy] = 1024;
1908     /* ac pred */
1909     memset(s->ac_val[1][xy], 0, 16 * sizeof(INT16));
1910     memset(s->ac_val[2][xy], 0, 16 * sizeof(INT16));
1911     
1912     s->mbintra_table[s->mb_x + s->mb_y*s->mb_width]= 0;
1913 }
1914
1915 /* generic function called after a macroblock has been parsed by the
1916    decoder or after it has been encoded by the encoder.
1917
1918    Important variables used:
1919    s->mb_intra : true if intra macroblock
1920    s->mv_dir   : motion vector direction
1921    s->mv_type  : motion vector type
1922    s->mv       : motion vector
1923    s->interlaced_dct : true if interlaced dct used (mpeg2)
1924  */
1925 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
1926 {
1927     int mb_x, mb_y;
1928     const int mb_xy = s->mb_y * s->mb_width + s->mb_x;
1929
1930     mb_x = s->mb_x;
1931     mb_y = s->mb_y;
1932
1933     s->current_picture.qscale_table[mb_xy]= s->qscale;
1934
1935     /* update DC predictors for P macroblocks */
1936     if (!s->mb_intra) {
1937         if (s->h263_pred || s->h263_aic) {
1938             if(s->mbintra_table[mb_xy])
1939                 ff_clean_intra_table_entries(s);
1940         } else {
1941             s->last_dc[0] =
1942             s->last_dc[1] =
1943             s->last_dc[2] = 128 << s->intra_dc_precision;
1944         }
1945     }
1946     else if (s->h263_pred || s->h263_aic)
1947         s->mbintra_table[mb_xy]=1;
1948
1949     /* update motion predictor, not for B-frames as they need the motion_val from the last P/S-Frame */
1950     if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE) { //FIXME move into h263.c if possible, format specific stuff shouldnt be here
1951         //FIXME a lot of thet is only needed for !low_delay
1952         const int wrap = s->block_wrap[0];
1953         const int xy = s->block_index[0];
1954         const int mb_index= s->mb_x + s->mb_y*s->mb_width;
1955         if(s->mv_type == MV_TYPE_8X8){
1956             s->co_located_type_table[mb_index]= CO_LOCATED_TYPE_4MV;
1957         } else {
1958             int motion_x, motion_y;
1959             if (s->mb_intra) {
1960                 motion_x = 0;
1961                 motion_y = 0;
1962                 if(s->co_located_type_table)
1963                     s->co_located_type_table[mb_index]= 0;
1964             } else if (s->mv_type == MV_TYPE_16X16) {
1965                 motion_x = s->mv[0][0][0];
1966                 motion_y = s->mv[0][0][1];
1967                 if(s->co_located_type_table)
1968                     s->co_located_type_table[mb_index]= 0;
1969             } else /*if (s->mv_type == MV_TYPE_FIELD)*/ {
1970                 int i;
1971                 motion_x = s->mv[0][0][0] + s->mv[0][1][0];
1972                 motion_y = s->mv[0][0][1] + s->mv[0][1][1];
1973                 motion_x = (motion_x>>1) | (motion_x&1);
1974                 for(i=0; i<2; i++){
1975                     s->field_mv_table[mb_index][i][0]= s->mv[0][i][0];
1976                     s->field_mv_table[mb_index][i][1]= s->mv[0][i][1];
1977                     s->field_select_table[mb_index][i]= s->field_select[0][i];
1978                 }
1979                 s->co_located_type_table[mb_index]= CO_LOCATED_TYPE_FIELDMV;
1980             }
1981             /* no update if 8X8 because it has been done during parsing */
1982             s->motion_val[xy][0] = motion_x;
1983             s->motion_val[xy][1] = motion_y;
1984             s->motion_val[xy + 1][0] = motion_x;
1985             s->motion_val[xy + 1][1] = motion_y;
1986             s->motion_val[xy + wrap][0] = motion_x;
1987             s->motion_val[xy + wrap][1] = motion_y;
1988             s->motion_val[xy + 1 + wrap][0] = motion_x;
1989             s->motion_val[xy + 1 + wrap][1] = motion_y;
1990         }
1991     }
1992     
1993     if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==B_TYPE))) { //FIXME precalc
1994         UINT8 *dest_y, *dest_cb, *dest_cr;
1995         int dct_linesize, dct_offset;
1996         op_pixels_func (*op_pix)[4];
1997         qpel_mc_func (*op_qpix)[16];
1998
1999         /* avoid copy if macroblock skipped in last frame too */
2000         if (s->pict_type != B_TYPE) {
2001             s->current_picture.mbskip_table[mb_xy]= s->mb_skiped;
2002         }
2003
2004         /* skip only during decoding as we might trash the buffers during encoding a bit */
2005         if(!s->encoding){
2006             UINT8 *mbskip_ptr = &s->mbskip_table[mb_xy];
2007             const int age= s->current_picture.age;
2008
2009             assert(age);
2010
2011             if (s->mb_skiped) {
2012                 s->mb_skiped= 0;
2013                 assert(s->pict_type!=I_TYPE);
2014  
2015                 (*mbskip_ptr) ++; /* indicate that this time we skiped it */
2016                 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2017
2018                 /* if previous was skipped too, then nothing to do !  */
2019                 if (*mbskip_ptr >= age){
2020 //if(s->pict_type!=B_TYPE && s->mb_x==0) printf("\n");
2021 //if(s->pict_type!=B_TYPE) printf("%d%d ", *mbskip_ptr, age);
2022                     if(s->pict_type!=B_TYPE) return;
2023                     if(s->avctx->draw_horiz_band==NULL && *mbskip_ptr > age) return; 
2024                     /* we dont draw complete frames here so we cant skip */
2025                 }
2026             } else {
2027                 *mbskip_ptr = 0; /* not skipped */
2028             }
2029         }else
2030             s->mb_skiped= 0;
2031
2032         if(s->pict_type==B_TYPE && s->avctx->draw_horiz_band){
2033             dest_y  = s->current_picture.data[0] + mb_x * 16;
2034             dest_cb = s->current_picture.data[1] + mb_x * 8;
2035             dest_cr = s->current_picture.data[2] + mb_x * 8;
2036         }else{
2037             dest_y  = s->current_picture.data[0] + (mb_y * 16* s->linesize  ) + mb_x * 16;
2038             dest_cb = s->current_picture.data[1] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
2039             dest_cr = s->current_picture.data[2] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
2040         }
2041
2042         if (s->interlaced_dct) {
2043             dct_linesize = s->linesize * 2;
2044             dct_offset = s->linesize;
2045         } else {
2046             dct_linesize = s->linesize;
2047             dct_offset = s->linesize * 8;
2048         }
2049
2050         if (!s->mb_intra) {
2051             /* motion handling */
2052             /* decoding or more than one mb_type (MC was allready done otherwise) */
2053             if((!s->encoding) || (s->mb_type[mb_xy]&(s->mb_type[mb_xy]-1))){
2054                 if ((!s->no_rounding) || s->pict_type==B_TYPE){                
2055                     op_pix = s->dsp.put_pixels_tab;
2056                     op_qpix= s->dsp.put_qpel_pixels_tab;
2057                 }else{
2058                     op_pix = s->dsp.put_no_rnd_pixels_tab;
2059                     op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab;
2060                 }
2061
2062                 if (s->mv_dir & MV_DIR_FORWARD) {
2063                     MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
2064                     op_pix = s->dsp.avg_pixels_tab;
2065                     op_qpix= s->dsp.avg_qpel_pixels_tab;
2066                 }
2067                 if (s->mv_dir & MV_DIR_BACKWARD) {
2068                     MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
2069                 }
2070             }
2071
2072             /* skip dequant / idct if we are really late ;) */
2073             if(s->hurry_up>1) return;
2074
2075             /* add dct residue */
2076             if(s->encoding || !(   s->mpeg2 || s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO 
2077                                 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2078                 add_dequant_dct(s, block[0], 0, dest_y, dct_linesize);
2079                 add_dequant_dct(s, block[1], 1, dest_y + 8, dct_linesize);
2080                 add_dequant_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
2081                 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
2082
2083                 if(!(s->flags&CODEC_FLAG_GRAY)){
2084                     add_dequant_dct(s, block[4], 4, dest_cb, s->uvlinesize);
2085                     add_dequant_dct(s, block[5], 5, dest_cr, s->uvlinesize);
2086                 }
2087             } else if(s->codec_id != CODEC_ID_WMV2){
2088                 add_dct(s, block[0], 0, dest_y, dct_linesize);
2089                 add_dct(s, block[1], 1, dest_y + 8, dct_linesize);
2090                 add_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
2091                 add_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
2092
2093                 if(!(s->flags&CODEC_FLAG_GRAY)){
2094                     add_dct(s, block[4], 4, dest_cb, s->uvlinesize);
2095                     add_dct(s, block[5], 5, dest_cr, s->uvlinesize);
2096                 }
2097             } else{
2098                 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2099             }
2100         } else {
2101             /* dct only in intra block */
2102             if(s->encoding || !(s->mpeg2 || s->codec_id==CODEC_ID_MPEG1VIDEO)){
2103                 put_dct(s, block[0], 0, dest_y, dct_linesize);
2104                 put_dct(s, block[1], 1, dest_y + 8, dct_linesize);
2105                 put_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
2106                 put_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
2107
2108                 if(!(s->flags&CODEC_FLAG_GRAY)){
2109                     put_dct(s, block[4], 4, dest_cb, s->uvlinesize);
2110                     put_dct(s, block[5], 5, dest_cr, s->uvlinesize);
2111                 }
2112             }else{
2113                 s->idct_put(dest_y                 , dct_linesize, block[0]);
2114                 s->idct_put(dest_y              + 8, dct_linesize, block[1]);
2115                 s->idct_put(dest_y + dct_offset    , dct_linesize, block[2]);
2116                 s->idct_put(dest_y + dct_offset + 8, dct_linesize, block[3]);
2117
2118                 if(!(s->flags&CODEC_FLAG_GRAY)){
2119                     s->idct_put(dest_cb, s->uvlinesize, block[4]);
2120                     s->idct_put(dest_cr, s->uvlinesize, block[5]);
2121                 }
2122             }
2123         }
2124     }
2125 }
2126
2127 static inline void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
2128 {
2129     static const char tab[64]=
2130         {3,2,2,1,1,1,1,1,
2131          1,1,1,1,1,1,1,1,
2132          1,1,1,1,1,1,1,1,
2133          0,0,0,0,0,0,0,0,
2134          0,0,0,0,0,0,0,0,
2135          0,0,0,0,0,0,0,0,
2136          0,0,0,0,0,0,0,0,
2137          0,0,0,0,0,0,0,0};
2138     int score=0;
2139     int run=0;
2140     int i;
2141     DCTELEM *block= s->block[n];
2142     const int last_index= s->block_last_index[n];
2143     int skip_dc;
2144
2145     if(threshold<0){
2146         skip_dc=0;
2147         threshold= -threshold;
2148     }else
2149         skip_dc=1;
2150
2151     /* are all which we could set to zero are allready zero? */
2152     if(last_index<=skip_dc - 1) return;
2153
2154     for(i=0; i<=last_index; i++){
2155         const int j = s->intra_scantable.permutated[i];
2156         const int level = ABS(block[j]);
2157         if(level==1){
2158             if(skip_dc && i==0) continue;
2159             score+= tab[run];
2160             run=0;
2161         }else if(level>1){
2162             return;
2163         }else{
2164             run++;
2165         }
2166     }
2167     if(score >= threshold) return;
2168     for(i=skip_dc; i<=last_index; i++){
2169         const int j = s->intra_scantable.permutated[i];
2170         block[j]=0;
2171     }
2172     if(block[0]) s->block_last_index[n]= 0;
2173     else         s->block_last_index[n]= -1;
2174 }
2175
2176 static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block, int last_index)
2177 {
2178     int i;
2179     const int maxlevel= s->max_qcoeff;
2180     const int minlevel= s->min_qcoeff;
2181     
2182     if(s->mb_intra){
2183         i=1; //skip clipping of intra dc
2184     }else
2185         i=0;
2186     
2187     for(;i<=last_index; i++){
2188         const int j= s->intra_scantable.permutated[i];
2189         int level = block[j];
2190        
2191         if     (level>maxlevel) level=maxlevel;
2192         else if(level<minlevel) level=minlevel;
2193         block[j]= level;
2194     }
2195 }
2196
2197 static inline void requantize_coeffs(MpegEncContext *s, DCTELEM block[64], int oldq, int newq, int n)
2198 {
2199     int i;
2200
2201     if(s->mb_intra){
2202         i=1; //skip clipping of intra dc
2203          //FIXME requantize, note (mpeg1/h263/h263p-aic dont need it,...)
2204     }else
2205         i=0;
2206     
2207     for(;i<=s->block_last_index[n]; i++){
2208         const int j = s->intra_scantable.permutated[i];
2209         int level = block[j];
2210         
2211         block[j]= ROUNDED_DIV(level*oldq, newq);
2212     }
2213
2214     for(i=s->block_last_index[n]; i>=0; i--){
2215         const int j = s->intra_scantable.permutated[i];
2216         if(block[j]) break;
2217     }
2218     s->block_last_index[n]= i;
2219 }
2220
2221 static inline void auto_requantize_coeffs(MpegEncContext *s, DCTELEM block[6][64])
2222 {
2223     int i,n, newq;
2224     const int maxlevel= s->max_qcoeff;
2225     const int minlevel= s->min_qcoeff;
2226     int largest=0, smallest=0;
2227
2228     assert(s->adaptive_quant);
2229     
2230     for(n=0; n<6; n++){
2231         if(s->mb_intra){
2232             i=1; //skip clipping of intra dc
2233              //FIXME requantize, note (mpeg1/h263/h263p-aic dont need it,...)
2234         }else
2235             i=0;
2236
2237         for(;i<=s->block_last_index[n]; i++){
2238             const int j = s->intra_scantable.permutated[i];
2239             int level = block[n][j];
2240             if(largest  < level) largest = level;
2241             if(smallest > level) smallest= level;
2242         }
2243     }
2244     
2245     for(newq=s->qscale+1; newq<32; newq++){
2246         if(   ROUNDED_DIV(smallest*s->qscale, newq) >= minlevel
2247            && ROUNDED_DIV(largest *s->qscale, newq) <= maxlevel) 
2248             break;
2249     }
2250         
2251     if(s->out_format==FMT_H263){
2252         /* h263 like formats cannot change qscale by more than 2 easiely */
2253         if(s->avctx->qmin + 2 < newq)
2254             newq= s->avctx->qmin + 2;
2255     }
2256
2257     for(n=0; n<6; n++){
2258         requantize_coeffs(s, block[n], s->qscale, newq, n);
2259         clip_coeffs(s, block[n], s->block_last_index[n]);
2260     }
2261      
2262     s->dquant+= newq - s->qscale;
2263     s->qscale= newq;
2264 }
2265 #if 0
2266 static int pix_vcmp16x8(UINT8 *s, int stride){ //FIXME move to dsputil & optimize
2267     int score=0;
2268     int x,y;
2269     
2270     for(y=0; y<7; y++){
2271         for(x=0; x<16; x+=4){
2272             score+= ABS(s[x  ] - s[x  +stride]) + ABS(s[x+1] - s[x+1+stride]) 
2273                    +ABS(s[x+2] - s[x+2+stride]) + ABS(s[x+3] - s[x+3+stride]);
2274         }
2275         s+= stride;
2276     }
2277     
2278     return score;
2279 }
2280
2281 static int pix_diff_vcmp16x8(UINT8 *s1, UINT8*s2, int stride){ //FIXME move to dsputil & optimize
2282     int score=0;
2283     int x,y;
2284     
2285     for(y=0; y<7; y++){
2286         for(x=0; x<16; x++){
2287             score+= ABS(s1[x  ] - s2[x ] - s1[x  +stride] + s2[x +stride]);
2288         }
2289         s1+= stride;
2290         s2+= stride;
2291     }
2292     
2293     return score;
2294 }
2295 #else
2296 #define SQ(a) ((a)*(a))
2297
2298 static int pix_vcmp16x8(UINT8 *s, int stride){ //FIXME move to dsputil & optimize
2299     int score=0;
2300     int x,y;
2301     
2302     for(y=0; y<7; y++){
2303         for(x=0; x<16; x+=4){
2304             score+= SQ(s[x  ] - s[x  +stride]) + SQ(s[x+1] - s[x+1+stride]) 
2305                    +SQ(s[x+2] - s[x+2+stride]) + SQ(s[x+3] - s[x+3+stride]);
2306         }
2307         s+= stride;
2308     }
2309     
2310     return score;
2311 }
2312
2313 static int pix_diff_vcmp16x8(UINT8 *s1, UINT8*s2, int stride){ //FIXME move to dsputil & optimize
2314     int score=0;
2315     int x,y;
2316     
2317     for(y=0; y<7; y++){
2318         for(x=0; x<16; x++){
2319             score+= SQ(s1[x  ] - s2[x ] - s1[x  +stride] + s2[x +stride]);
2320         }
2321         s1+= stride;
2322         s2+= stride;
2323     }
2324     
2325     return score;
2326 }
2327
2328 #endif
2329
2330 void ff_draw_horiz_band(MpegEncContext *s){
2331     if (    s->avctx->draw_horiz_band 
2332         && (s->last_picture.data[0] || s->low_delay) ) {
2333         UINT8 *src_ptr[3];
2334         int y, h, offset;
2335         y = s->mb_y * 16;
2336         h = s->height - y;
2337         if (h > 16)
2338             h = 16;
2339
2340         if(s->pict_type==B_TYPE)
2341             offset = 0;
2342         else
2343             offset = y * s->linesize;
2344
2345         if(s->pict_type==B_TYPE || s->low_delay){
2346             src_ptr[0] = s->current_picture.data[0] + offset;
2347             src_ptr[1] = s->current_picture.data[1] + (offset >> 2);
2348             src_ptr[2] = s->current_picture.data[2] + (offset >> 2);
2349         } else {
2350             src_ptr[0] = s->last_picture.data[0] + offset;
2351             src_ptr[1] = s->last_picture.data[1] + (offset >> 2);
2352             src_ptr[2] = s->last_picture.data[2] + (offset >> 2);
2353         }
2354         emms_c();
2355
2356         s->avctx->draw_horiz_band(s->avctx, src_ptr, s->linesize,
2357                                y, s->width, h);
2358     }
2359 }
2360
2361 static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2362 {
2363     const int mb_x= s->mb_x;
2364     const int mb_y= s->mb_y;
2365     int i;
2366     int skip_dct[6];
2367     int dct_offset   = s->linesize*8; //default for progressive frames
2368     
2369     for(i=0; i<6; i++) skip_dct[i]=0;
2370     
2371     if(s->adaptive_quant){
2372         s->dquant= s->current_picture.qscale_table[mb_x + mb_y*s->mb_width] - s->qscale;
2373
2374         if(s->out_format==FMT_H263){
2375             if     (s->dquant> 2) s->dquant= 2;
2376             else if(s->dquant<-2) s->dquant=-2;
2377         }
2378             
2379         if(s->codec_id==CODEC_ID_MPEG4){        
2380             if(!s->mb_intra){
2381                 assert(s->dquant==0 || s->mv_type!=MV_TYPE_8X8);
2382
2383                 if(s->mv_dir&MV_DIRECT)
2384                     s->dquant=0;
2385             }
2386         }
2387         s->qscale+= s->dquant;
2388         s->y_dc_scale= s->y_dc_scale_table[ s->qscale ];
2389         s->c_dc_scale= s->c_dc_scale_table[ s->qscale ];
2390     }
2391
2392     if (s->mb_intra) {
2393         UINT8 *ptr;
2394         int wrap_y;
2395         int emu=0;
2396
2397         wrap_y = s->linesize;
2398         ptr = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
2399
2400         if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
2401             ff_emulated_edge_mc(s, ptr, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
2402             ptr= s->edge_emu_buffer;
2403             emu=1;
2404         }
2405         
2406         if(s->flags&CODEC_FLAG_INTERLACED_DCT){
2407             int progressive_score, interlaced_score;
2408             
2409             progressive_score= pix_vcmp16x8(ptr, wrap_y  ) + pix_vcmp16x8(ptr + wrap_y*8, wrap_y );
2410             interlaced_score = pix_vcmp16x8(ptr, wrap_y*2) + pix_vcmp16x8(ptr + wrap_y  , wrap_y*2);
2411             
2412             if(progressive_score > interlaced_score + 100){
2413                 s->interlaced_dct=1;
2414             
2415                 dct_offset= wrap_y;
2416                 wrap_y<<=1;
2417             }else
2418                 s->interlaced_dct=0;
2419         }
2420         
2421         s->dsp.get_pixels(s->block[0], ptr                 , wrap_y);
2422         s->dsp.get_pixels(s->block[1], ptr              + 8, wrap_y);
2423         s->dsp.get_pixels(s->block[2], ptr + dct_offset    , wrap_y);
2424         s->dsp.get_pixels(s->block[3], ptr + dct_offset + 8, wrap_y);
2425
2426         if(s->flags&CODEC_FLAG_GRAY){
2427             skip_dct[4]= 1;
2428             skip_dct[5]= 1;
2429         }else{
2430             int wrap_c = s->uvlinesize;
2431             ptr = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8;
2432             if(emu){
2433                 ff_emulated_edge_mc(s, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2434                 ptr= s->edge_emu_buffer;
2435             }
2436             s->dsp.get_pixels(s->block[4], ptr, wrap_c);
2437
2438             ptr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8;
2439             if(emu){
2440                 ff_emulated_edge_mc(s, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2441                 ptr= s->edge_emu_buffer;
2442             }
2443             s->dsp.get_pixels(s->block[5], ptr, wrap_c);
2444         }
2445     }else{
2446         op_pixels_func (*op_pix)[4];
2447         qpel_mc_func (*op_qpix)[16];
2448         UINT8 *dest_y, *dest_cb, *dest_cr;
2449         UINT8 *ptr_y, *ptr_cb, *ptr_cr;
2450         int wrap_y, wrap_c;
2451         int emu=0;
2452
2453         dest_y  = s->current_picture.data[0] + (mb_y * 16 * s->linesize    ) + mb_x * 16;
2454         dest_cb = s->current_picture.data[1] + (mb_y * 8  * (s->uvlinesize)) + mb_x * 8;
2455         dest_cr = s->current_picture.data[2] + (mb_y * 8  * (s->uvlinesize)) + mb_x * 8;
2456         wrap_y = s->linesize;
2457         wrap_c = s->uvlinesize;
2458         ptr_y  = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
2459         ptr_cb = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8;
2460         ptr_cr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8;
2461
2462         if ((!s->no_rounding) || s->pict_type==B_TYPE){
2463             op_pix = s->dsp.put_pixels_tab;
2464             op_qpix= s->dsp.put_qpel_pixels_tab;
2465         }else{
2466             op_pix = s->dsp.put_no_rnd_pixels_tab;
2467             op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab;
2468         }
2469
2470         if (s->mv_dir & MV_DIR_FORWARD) {
2471             MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
2472             op_pix = s->dsp.avg_pixels_tab;
2473             op_qpix= s->dsp.avg_qpel_pixels_tab;
2474         }
2475         if (s->mv_dir & MV_DIR_BACKWARD) {
2476             MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
2477         }
2478
2479         if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
2480             ff_emulated_edge_mc(s, ptr_y, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
2481             ptr_y= s->edge_emu_buffer;
2482             emu=1;
2483         }
2484         
2485         if(s->flags&CODEC_FLAG_INTERLACED_DCT){
2486             int progressive_score, interlaced_score;
2487             
2488             progressive_score= pix_diff_vcmp16x8(ptr_y           , dest_y           , wrap_y  ) 
2489                              + pix_diff_vcmp16x8(ptr_y + wrap_y*8, dest_y + wrap_y*8, wrap_y  );
2490             interlaced_score = pix_diff_vcmp16x8(ptr_y           , dest_y           , wrap_y*2)
2491                              + pix_diff_vcmp16x8(ptr_y + wrap_y  , dest_y + wrap_y  , wrap_y*2);
2492             
2493             if(progressive_score > interlaced_score + 600){
2494                 s->interlaced_dct=1;
2495             
2496                 dct_offset= wrap_y;
2497                 wrap_y<<=1;
2498             }else
2499                 s->interlaced_dct=0;
2500         }
2501         
2502         s->dsp.diff_pixels(s->block[0], ptr_y                 , dest_y                 , wrap_y);
2503         s->dsp.diff_pixels(s->block[1], ptr_y              + 8, dest_y              + 8, wrap_y);
2504         s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset    , dest_y + dct_offset    , wrap_y);
2505         s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8, dest_y + dct_offset + 8, wrap_y);
2506         
2507         if(s->flags&CODEC_FLAG_GRAY){
2508             skip_dct[4]= 1;
2509             skip_dct[5]= 1;
2510         }else{
2511             if(emu){
2512                 ff_emulated_edge_mc(s, ptr_cb, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2513                 ptr_cb= s->edge_emu_buffer;
2514             }
2515             s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2516             if(emu){
2517                 ff_emulated_edge_mc(s, ptr_cr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2518                 ptr_cr= s->edge_emu_buffer;
2519             }
2520             s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2521         }
2522         /* pre quantization */         
2523         if(s->current_picture.mc_mb_var[s->mb_width*mb_y+ mb_x]<2*s->qscale*s->qscale){
2524             //FIXME optimize
2525             if(s->dsp.pix_abs8x8(ptr_y               , dest_y               , wrap_y) < 20*s->qscale) skip_dct[0]= 1;
2526             if(s->dsp.pix_abs8x8(ptr_y            + 8, dest_y            + 8, wrap_y) < 20*s->qscale) skip_dct[1]= 1;
2527             if(s->dsp.pix_abs8x8(ptr_y +dct_offset   , dest_y +dct_offset   , wrap_y) < 20*s->qscale) skip_dct[2]= 1;
2528             if(s->dsp.pix_abs8x8(ptr_y +dct_offset+ 8, dest_y +dct_offset+ 8, wrap_y) < 20*s->qscale) skip_dct[3]= 1;
2529             if(s->dsp.pix_abs8x8(ptr_cb              , dest_cb              , wrap_c) < 20*s->qscale) skip_dct[4]= 1;
2530             if(s->dsp.pix_abs8x8(ptr_cr              , dest_cr              , wrap_c) < 20*s->qscale) skip_dct[5]= 1;
2531 #if 0
2532 {
2533  static int stat[7];
2534  int num=0;
2535  for(i=0; i<6; i++)
2536   if(skip_dct[i]) num++;
2537  stat[num]++;
2538  
2539  if(s->mb_x==0 && s->mb_y==0){
2540   for(i=0; i<7; i++){
2541    printf("%6d %1d\n", stat[i], i);
2542   }
2543  }
2544 }
2545 #endif
2546         }
2547
2548     }
2549             
2550 #if 0
2551             {
2552                 float adap_parm;
2553                 
2554                 adap_parm = ((s->avg_mb_var << 1) + s->mb_var[s->mb_width*mb_y+mb_x] + 1.0) /
2555                             ((s->mb_var[s->mb_width*mb_y+mb_x] << 1) + s->avg_mb_var + 1.0);
2556             
2557                 printf("\ntype=%c qscale=%2d adap=%0.2f dquant=%4.2f var=%4d avgvar=%4d", 
2558                         (s->mb_type[s->mb_width*mb_y+mb_x] > 0) ? 'I' : 'P', 
2559                         s->qscale, adap_parm, s->qscale*adap_parm,
2560                         s->mb_var[s->mb_width*mb_y+mb_x], s->avg_mb_var);
2561             }
2562 #endif
2563     /* DCT & quantize */
2564     if(s->out_format==FMT_MJPEG){
2565         for(i=0;i<6;i++) {
2566             int overflow;
2567             s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, 8, &overflow);
2568             if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
2569         }
2570     }else{
2571         for(i=0;i<6;i++) {
2572             if(!skip_dct[i]){
2573                 int overflow;
2574                 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2575             // FIXME we could decide to change to quantizer instead of clipping
2576             // JS: I don't think that would be a good idea it could lower quality instead
2577             //     of improve it. Just INTRADC clipping deserves changes in quantizer
2578                 if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
2579             }else
2580                 s->block_last_index[i]= -1;
2581         }
2582         if(s->luma_elim_threshold && !s->mb_intra)
2583             for(i=0; i<4; i++)
2584                 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2585         if(s->chroma_elim_threshold && !s->mb_intra)
2586             for(i=4; i<6; i++)
2587                 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2588     }
2589
2590     if((s->flags&CODEC_FLAG_GRAY) && s->mb_intra){
2591         s->block_last_index[4]=
2592         s->block_last_index[5]= 0;
2593         s->block[4][0]=
2594         s->block[5][0]= (1024 + s->c_dc_scale/2)/ s->c_dc_scale;
2595     }
2596
2597 #ifdef CONFIG_ENCODERS
2598     /* huffman encode */
2599     switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2600     case CODEC_ID_MPEG1VIDEO:
2601         mpeg1_encode_mb(s, s->block, motion_x, motion_y); break;
2602     case CODEC_ID_MPEG4:
2603         mpeg4_encode_mb(s, s->block, motion_x, motion_y); break;
2604     case CODEC_ID_MSMPEG4V2:
2605     case CODEC_ID_MSMPEG4V3:
2606     case CODEC_ID_WMV1:
2607         msmpeg4_encode_mb(s, s->block, motion_x, motion_y); break;
2608     case CODEC_ID_WMV2:
2609          ff_wmv2_encode_mb(s, s->block, motion_x, motion_y); break;
2610     case CODEC_ID_MJPEG:
2611         mjpeg_encode_mb(s, s->block); break;
2612     case CODEC_ID_H263:
2613     case CODEC_ID_H263P:
2614     case CODEC_ID_RV10:
2615         h263_encode_mb(s, s->block, motion_x, motion_y); break;
2616     default:
2617         assert(0);
2618     }
2619 #endif
2620 }
2621
2622 void ff_copy_bits(PutBitContext *pb, UINT8 *src, int length)
2623 {
2624     int bytes= length>>4;
2625     int bits= length&15;
2626     int i;
2627
2628     if(length==0) return;
2629
2630     for(i=0; i<bytes; i++) put_bits(pb, 16, be2me_16(((uint16_t*)src)[i]));
2631     put_bits(pb, bits, be2me_16(((uint16_t*)src)[i])>>(16-bits));
2632 }
2633
2634 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2635     int i;
2636
2637     memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop?
2638
2639     /* mpeg1 */
2640     d->mb_incr= s->mb_incr;
2641     for(i=0; i<3; i++)
2642         d->last_dc[i]= s->last_dc[i];
2643     
2644     /* statistics */
2645     d->mv_bits= s->mv_bits;
2646     d->i_tex_bits= s->i_tex_bits;
2647     d->p_tex_bits= s->p_tex_bits;
2648     d->i_count= s->i_count;
2649     d->f_count= s->f_count;
2650     d->b_count= s->b_count;
2651     d->skip_count= s->skip_count;
2652     d->misc_bits= s->misc_bits;
2653     d->last_bits= 0;
2654
2655     d->mb_skiped= s->mb_skiped;
2656     d->qscale= s->qscale;
2657 }
2658
2659 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2660     int i;
2661
2662     memcpy(d->mv, s->mv, 2*4*2*sizeof(int)); 
2663     memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop?
2664     
2665     /* mpeg1 */
2666     d->mb_incr= s->mb_incr;
2667     for(i=0; i<3; i++)
2668         d->last_dc[i]= s->last_dc[i];
2669     
2670     /* statistics */
2671     d->mv_bits= s->mv_bits;
2672     d->i_tex_bits= s->i_tex_bits;
2673     d->p_tex_bits= s->p_tex_bits;
2674     d->i_count= s->i_count;
2675     d->f_count= s->f_count;
2676     d->b_count= s->b_count;
2677     d->skip_count= s->skip_count;
2678     d->misc_bits= s->misc_bits;
2679
2680     d->mb_intra= s->mb_intra;
2681     d->mb_skiped= s->mb_skiped;
2682     d->mv_type= s->mv_type;
2683     d->mv_dir= s->mv_dir;
2684     d->pb= s->pb;
2685     if(s->data_partitioning){
2686         d->pb2= s->pb2;
2687         d->tex_pb= s->tex_pb;
2688     }
2689     d->block= s->block;
2690     for(i=0; i<6; i++)
2691         d->block_last_index[i]= s->block_last_index[i];
2692     d->interlaced_dct= s->interlaced_dct;
2693     d->qscale= s->qscale;
2694 }
2695
2696 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type, 
2697                            PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2698                            int *dmin, int *next_block, int motion_x, int motion_y)
2699 {
2700     int bits_count;
2701     
2702     copy_context_before_encode(s, backup, type);
2703
2704     s->block= s->blocks[*next_block];
2705     s->pb= pb[*next_block];
2706     if(s->data_partitioning){
2707         s->pb2   = pb2   [*next_block];
2708         s->tex_pb= tex_pb[*next_block];
2709     }
2710
2711     encode_mb(s, motion_x, motion_y);
2712
2713     bits_count= get_bit_count(&s->pb);
2714     if(s->data_partitioning){
2715         bits_count+= get_bit_count(&s->pb2);
2716         bits_count+= get_bit_count(&s->tex_pb);
2717     }
2718
2719     if(bits_count<*dmin){
2720         *dmin= bits_count;
2721         *next_block^=1;
2722
2723         copy_context_after_encode(best, s, type);
2724     }
2725 }
2726                 
2727 static inline int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2728     uint32_t *sq = squareTbl + 256;
2729     int acc=0;
2730     int x,y;
2731     
2732     if(w==16 && h==16) 
2733         return s->dsp.sse[0](NULL, src1, src2, stride);
2734     else if(w==8 && h==8)
2735         return s->dsp.sse[1](NULL, src1, src2, stride);
2736     
2737     for(y=0; y<h; y++){
2738         for(x=0; x<w; x++){
2739             acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2740         } 
2741     }
2742     
2743     assert(acc>=0);
2744     
2745     return acc;
2746 }
2747
2748 static void encode_picture(MpegEncContext *s, int picture_number)
2749 {
2750     int mb_x, mb_y, pdif = 0;
2751     int i;
2752     int bits;
2753     MpegEncContext best_s, backup_s;
2754     UINT8 bit_buf[2][3000];
2755     UINT8 bit_buf2[2][3000];
2756     UINT8 bit_buf_tex[2][3000];
2757     PutBitContext pb[2], pb2[2], tex_pb[2];
2758
2759     for(i=0; i<2; i++){
2760         init_put_bits(&pb    [i], bit_buf    [i], 3000, NULL, NULL);
2761         init_put_bits(&pb2   [i], bit_buf2   [i], 3000, NULL, NULL);
2762         init_put_bits(&tex_pb[i], bit_buf_tex[i], 3000, NULL, NULL);
2763     }
2764
2765     s->picture_number = picture_number;
2766
2767     s->block_wrap[0]=
2768     s->block_wrap[1]=
2769     s->block_wrap[2]=
2770     s->block_wrap[3]= s->mb_width*2 + 2;
2771     s->block_wrap[4]=
2772     s->block_wrap[5]= s->mb_width + 2;
2773     
2774     /* Reset the average MB variance */
2775     s->current_picture.mb_var_sum = 0;
2776     s->current_picture.mc_mb_var_sum = 0;
2777
2778     /* we need to initialize some time vars before we can encode b-frames */
2779     if (s->h263_pred && !s->h263_msmpeg4)
2780         ff_set_mpeg4_time(s, s->picture_number); 
2781
2782     s->scene_change_score=0;
2783     
2784     s->qscale= (int)(s->frame_qscale + 0.5); //FIXME qscale / ... stuff for ME ratedistoration
2785     
2786     if(s->msmpeg4_version){
2787         if(s->pict_type==I_TYPE)
2788             s->no_rounding=1;
2789         else if(s->flipflop_rounding)
2790             s->no_rounding ^= 1;          
2791     }else if(s->out_format == FMT_H263){
2792         if(s->pict_type==I_TYPE)
2793             s->no_rounding=0;
2794         else if(s->pict_type!=B_TYPE)
2795             s->no_rounding ^= 1;          
2796     }
2797     /* Estimate motion for every MB */
2798     s->mb_intra=0; //for the rate distoration & bit compare functions
2799     if(s->pict_type != I_TYPE){
2800         if(s->pict_type != B_TYPE){
2801             if((s->avctx->pre_me && s->last_non_b_pict_type==I_TYPE) || s->avctx->pre_me==2){
2802                 s->me.pre_pass=1;
2803                 s->me.dia_size= s->avctx->pre_dia_size;
2804
2805                 for(mb_y=s->mb_height-1; mb_y >=0 ; mb_y--) {
2806                     for(mb_x=s->mb_width-1; mb_x >=0 ; mb_x--) {
2807                         s->mb_x = mb_x;
2808                         s->mb_y = mb_y;
2809                         ff_pre_estimate_p_frame_motion(s, mb_x, mb_y);
2810                     }
2811                 }
2812                 s->me.pre_pass=0;
2813             }
2814         }
2815
2816         s->me.dia_size= s->avctx->dia_size;
2817         for(mb_y=0; mb_y < s->mb_height; mb_y++) {
2818             s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1;
2819             s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1);
2820             s->block_index[2]= s->block_wrap[0]*(mb_y*2 + 2) - 1;
2821             s->block_index[3]= s->block_wrap[0]*(mb_y*2 + 2);
2822             for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2823                 s->mb_x = mb_x;
2824                 s->mb_y = mb_y;
2825                 s->block_index[0]+=2;
2826                 s->block_index[1]+=2;
2827                 s->block_index[2]+=2;
2828                 s->block_index[3]+=2;
2829                 
2830                 /* compute motion vector & mb_type and store in context */
2831                 if(s->pict_type==B_TYPE)
2832                     ff_estimate_b_frame_motion(s, mb_x, mb_y);
2833                 else
2834                     ff_estimate_p_frame_motion(s, mb_x, mb_y);
2835             }
2836         }
2837     }else /* if(s->pict_type == I_TYPE) */{
2838         /* I-Frame */
2839         //FIXME do we need to zero them?
2840         memset(s->motion_val[0], 0, sizeof(INT16)*(s->mb_width*2 + 2)*(s->mb_height*2 + 2)*2);
2841         memset(s->p_mv_table   , 0, sizeof(INT16)*(s->mb_width+2)*(s->mb_height+2)*2);
2842         memset(s->mb_type      , MB_TYPE_INTRA, sizeof(UINT8)*s->mb_width*s->mb_height);
2843         
2844         if(!s->fixed_qscale){
2845             /* finding spatial complexity for I-frame rate control */
2846             for(mb_y=0; mb_y < s->mb_height; mb_y++) {
2847                 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2848                     int xx = mb_x * 16;
2849                     int yy = mb_y * 16;
2850                     uint8_t *pix = s->new_picture.data[0] + (yy * s->linesize) + xx;
2851                     int varc;
2852                     int sum = s->dsp.pix_sum(pix, s->linesize);
2853     
2854                     varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)(sum*sum))>>8) + 500 + 128)>>8;
2855
2856                     s->current_picture.mb_var [s->mb_width * mb_y + mb_x] = varc;
2857                     s->current_picture.mb_mean[s->mb_width * mb_y + mb_x] = (sum+128)>>8;
2858                     s->current_picture.mb_var_sum    += varc;
2859                 }
2860             }
2861         }
2862     }
2863     emms_c();
2864
2865     if(s->scene_change_score > 0 && s->pict_type == P_TYPE){
2866         s->pict_type= I_TYPE;
2867         memset(s->mb_type   , MB_TYPE_INTRA, sizeof(UINT8)*s->mb_width*s->mb_height);
2868 //printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
2869     }
2870
2871     if(s->pict_type==P_TYPE || s->pict_type==S_TYPE) 
2872         s->f_code= ff_get_best_fcode(s, s->p_mv_table, MB_TYPE_INTER);
2873         ff_fix_long_p_mvs(s);
2874     if(s->pict_type==B_TYPE){
2875         s->f_code= ff_get_best_fcode(s, s->b_forw_mv_table, MB_TYPE_FORWARD);
2876         s->b_code= ff_get_best_fcode(s, s->b_back_mv_table, MB_TYPE_BACKWARD);
2877
2878         ff_fix_long_b_mvs(s, s->b_forw_mv_table, s->f_code, MB_TYPE_FORWARD);
2879         ff_fix_long_b_mvs(s, s->b_back_mv_table, s->b_code, MB_TYPE_BACKWARD);
2880         ff_fix_long_b_mvs(s, s->b_bidir_forw_mv_table, s->f_code, MB_TYPE_BIDIR);
2881         ff_fix_long_b_mvs(s, s->b_bidir_back_mv_table, s->b_code, MB_TYPE_BIDIR);
2882     }
2883     
2884     if (s->fixed_qscale) 
2885         s->frame_qscale = s->current_picture.quality;
2886     else
2887         s->frame_qscale = ff_rate_estimate_qscale(s);
2888
2889     if(s->adaptive_quant){
2890         switch(s->codec_id){
2891         case CODEC_ID_MPEG4:
2892             ff_clean_mpeg4_qscales(s);
2893             break;
2894         case CODEC_ID_H263:
2895         case CODEC_ID_H263P:
2896             ff_clean_h263_qscales(s);
2897             break;
2898         }
2899
2900         s->qscale= s->current_picture.qscale_table[0];
2901     }else
2902         s->qscale= (int)(s->frame_qscale + 0.5);
2903         
2904     if (s->out_format == FMT_MJPEG) {
2905         /* for mjpeg, we do include qscale in the matrix */
2906         s->intra_matrix[0] = ff_mpeg1_default_intra_matrix[0];
2907         for(i=1;i<64;i++){
2908             int j= s->idct_permutation[i];
2909
2910             s->intra_matrix[j] = CLAMP_TO_8BIT((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
2911         }
2912         convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16, 
2913                        s->q_intra_matrix16_bias, s->intra_matrix, s->intra_quant_bias, 8, 8);
2914     }
2915     
2916     //FIXME var duplication
2917     s->current_picture.key_frame= s->pict_type == I_TYPE;
2918     s->current_picture.pict_type= s->pict_type;
2919
2920     if(s->current_picture.key_frame)
2921         s->picture_in_gop_number=0;
2922
2923     s->last_bits= get_bit_count(&s->pb);
2924     switch(s->out_format) {
2925     case FMT_MJPEG:
2926         mjpeg_picture_header(s);
2927         break;
2928     case FMT_H263:
2929         if (s->codec_id == CODEC_ID_WMV2) 
2930             ff_wmv2_encode_picture_header(s, picture_number);
2931         else if (s->h263_msmpeg4) 
2932             msmpeg4_encode_picture_header(s, picture_number);
2933         else if (s->h263_pred)
2934             mpeg4_encode_picture_header(s, picture_number);
2935         else if (s->h263_rv10) 
2936             rv10_encode_picture_header(s, picture_number);
2937         else
2938             h263_encode_picture_header(s, picture_number);
2939         break;
2940     case FMT_MPEG1:
2941         mpeg1_encode_picture_header(s, picture_number);
2942         break;
2943     }
2944     bits= get_bit_count(&s->pb);
2945     s->header_bits= bits - s->last_bits;
2946     s->last_bits= bits;
2947     s->mv_bits=0;
2948     s->misc_bits=0;
2949     s->i_tex_bits=0;
2950     s->p_tex_bits=0;
2951     s->i_count=0;
2952     s->f_count=0;
2953     s->b_count=0;
2954     s->skip_count=0;
2955
2956     for(i=0; i<3; i++){
2957         /* init last dc values */
2958         /* note: quant matrix value (8) is implied here */
2959         s->last_dc[i] = 128;
2960         
2961         s->current_picture.error[i] = 0;
2962     }
2963     s->mb_incr = 1;
2964     s->last_mv[0][0][0] = 0;
2965     s->last_mv[0][0][1] = 0;
2966
2967     if (s->codec_id==CODEC_ID_H263 || s->codec_id==CODEC_ID_H263P)
2968         s->gob_index = ff_h263_get_gob_height(s);
2969
2970     if(s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame)
2971         ff_mpeg4_init_partitions(s);
2972
2973     s->resync_mb_x=0;
2974     s->resync_mb_y=0;
2975     s->first_slice_line = 1;
2976     s->ptr_lastgob = s->pb.buf;
2977     s->ptr_last_mb_line = s->pb.buf;
2978     for(mb_y=0; mb_y < s->mb_height; mb_y++) {
2979         s->y_dc_scale= s->y_dc_scale_table[ s->qscale ];
2980         s->c_dc_scale= s->c_dc_scale_table[ s->qscale ];
2981         
2982         s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1;
2983         s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1);
2984         s->block_index[2]= s->block_wrap[0]*(mb_y*2 + 2) - 1;
2985         s->block_index[3]= s->block_wrap[0]*(mb_y*2 + 2);
2986         s->block_index[4]= s->block_wrap[4]*(mb_y + 1)                    + s->block_wrap[0]*(s->mb_height*2 + 2);
2987         s->block_index[5]= s->block_wrap[4]*(mb_y + 1 + s->mb_height + 2) + s->block_wrap[0]*(s->mb_height*2 + 2);
2988         for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2989             int mb_type= s->mb_type[mb_y * s->mb_width + mb_x];
2990             const int xy= (mb_y+1) * (s->mb_width+2) + mb_x + 1;
2991 //            int d;
2992             int dmin=10000000;
2993
2994             s->mb_x = mb_x;
2995             s->mb_y = mb_y;
2996             s->block_index[0]+=2;
2997             s->block_index[1]+=2;
2998             s->block_index[2]+=2;
2999             s->block_index[3]+=2;
3000             s->block_index[4]++;
3001             s->block_index[5]++;
3002
3003             /* write gob / video packet header  */
3004             if(s->rtp_mode){
3005                 int current_packet_size, is_gob_start;
3006                 
3007                 current_packet_size= pbBufPtr(&s->pb) - s->ptr_lastgob;
3008                 is_gob_start=0;
3009                 
3010                 if(s->codec_id==CODEC_ID_MPEG4){
3011                     if(current_packet_size + s->mb_line_avgsize/s->mb_width >= s->rtp_payload_size
3012                        && s->mb_y + s->mb_x>0){
3013
3014                         if(s->partitioned_frame){
3015                             ff_mpeg4_merge_partitions(s);
3016                             ff_mpeg4_init_partitions(s);
3017                         }
3018                         ff_mpeg4_encode_video_packet_header(s);
3019
3020                         if(s->flags&CODEC_FLAG_PASS1){
3021                             int bits= get_bit_count(&s->pb);
3022                             s->misc_bits+= bits - s->last_bits;
3023                             s->last_bits= bits;
3024                         }
3025                         ff_mpeg4_clean_buffers(s);
3026                         is_gob_start=1;
3027                     }
3028                 }else{
3029                     if(current_packet_size + s->mb_line_avgsize*s->gob_index >= s->rtp_payload_size
3030                        && s->mb_x==0 && s->mb_y>0 && s->mb_y%s->gob_index==0){
3031                        
3032                         h263_encode_gob_header(s, mb_y);                       
3033                         is_gob_start=1;
3034                     }
3035                 }
3036
3037                 if(is_gob_start){
3038                     s->ptr_lastgob = pbBufPtr(&s->pb);
3039                     s->first_slice_line=1;
3040                     s->resync_mb_x=mb_x;
3041                     s->resync_mb_y=mb_y;
3042                 }
3043             }
3044
3045             if(  (s->resync_mb_x   == s->mb_x)
3046                && s->resync_mb_y+1 == s->mb_y){
3047                 s->first_slice_line=0; 
3048             }
3049
3050             if(mb_type & (mb_type-1)){ // more than 1 MB type possible
3051                 int next_block=0;
3052                 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3053
3054                 copy_context_before_encode(&backup_s, s, -1);
3055                 backup_s.pb= s->pb;
3056                 best_s.data_partitioning= s->data_partitioning;
3057                 best_s.partitioned_frame= s->partitioned_frame;
3058                 if(s->data_partitioning){
3059                     backup_s.pb2= s->pb2;
3060                     backup_s.tex_pb= s->tex_pb;
3061                 }
3062
3063                 if(mb_type&MB_TYPE_INTER){
3064                     s->mv_dir = MV_DIR_FORWARD;
3065                     s->mv_type = MV_TYPE_16X16;
3066                     s->mb_intra= 0;
3067                     s->mv[0][0][0] = s->p_mv_table[xy][0];
3068                     s->mv[0][0][1] = s->p_mv_table[xy][1];
3069                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER, pb, pb2, tex_pb, 
3070                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3071                 }
3072                 if(mb_type&MB_TYPE_INTER4V){                 
3073                     s->mv_dir = MV_DIR_FORWARD;
3074                     s->mv_type = MV_TYPE_8X8;
3075                     s->mb_intra= 0;
3076                     for(i=0; i<4; i++){
3077                         s->mv[0][i][0] = s->motion_val[s->block_index[i]][0];
3078                         s->mv[0][i][1] = s->motion_val[s->block_index[i]][1];
3079                     }
3080                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER4V, pb, pb2, tex_pb, 
3081                                  &dmin, &next_block, 0, 0);
3082                 }
3083                 if(mb_type&MB_TYPE_FORWARD){
3084                     s->mv_dir = MV_DIR_FORWARD;
3085                     s->mv_type = MV_TYPE_16X16;
3086                     s->mb_intra= 0;
3087                     s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3088                     s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3089                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_FORWARD, pb, pb2, tex_pb, 
3090                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3091                 }
3092                 if(mb_type&MB_TYPE_BACKWARD){
3093                     s->mv_dir = MV_DIR_BACKWARD;
3094                     s->mv_type = MV_TYPE_16X16;
3095                     s->mb_intra= 0;
3096                     s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3097                     s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3098                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BACKWARD, pb, pb2, tex_pb, 
3099                                  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3100                 }
3101                 if(mb_type&MB_TYPE_BIDIR){
3102                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3103                     s->mv_type = MV_TYPE_16X16;
3104                     s->mb_intra= 0;
3105                     s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3106                     s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3107                     s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3108                     s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3109                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BIDIR, pb, pb2, tex_pb, 
3110                                  &dmin, &next_block, 0, 0);
3111                 }
3112                 if(mb_type&MB_TYPE_DIRECT){
3113                     int mx= s->b_direct_mv_table[xy][0];
3114                     int my= s->b_direct_mv_table[xy][1];
3115                     
3116                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3117                     s->mb_intra= 0;
3118                     ff_mpeg4_set_direct_mv(s, mx, my);
3119                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_DIRECT, pb, pb2, tex_pb, 
3120                                  &dmin, &next_block, mx, my);
3121                 }
3122                 if(mb_type&MB_TYPE_INTRA){
3123                     s->mv_dir = MV_DIR_FORWARD;
3124                     s->mv_type = MV_TYPE_16X16;
3125                     s->mb_intra= 1;
3126                     s->mv[0][0][0] = 0;
3127                     s->mv[0][0][1] = 0;
3128                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTRA, pb, pb2, tex_pb, 
3129                                  &dmin, &next_block, 0, 0);
3130                     /* force cleaning of ac/dc pred stuff if needed ... */
3131                     if(s->h263_pred || s->h263_aic)
3132                         s->mbintra_table[mb_x + mb_y*s->mb_width]=1;
3133                 }
3134                 copy_context_after_encode(s, &best_s, -1);
3135                 
3136                 pb_bits_count= get_bit_count(&s->pb);
3137                 flush_put_bits(&s->pb);
3138                 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3139                 s->pb= backup_s.pb;
3140                 
3141                 if(s->data_partitioning){
3142                     pb2_bits_count= get_bit_count(&s->pb2);
3143                     flush_put_bits(&s->pb2);
3144                     ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3145                     s->pb2= backup_s.pb2;
3146                     
3147                     tex_pb_bits_count= get_bit_count(&s->tex_pb);
3148                     flush_put_bits(&s->tex_pb);
3149                     ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3150                     s->tex_pb= backup_s.tex_pb;
3151                 }
3152                 s->last_bits= get_bit_count(&s->pb);
3153             } else {
3154                 int motion_x, motion_y;
3155                 int intra_score;
3156                 int inter_score= s->current_picture.mb_cmp_score[mb_x + mb_y*s->mb_width];
3157                 
3158               if(!(s->flags&CODEC_FLAG_HQ) && s->pict_type==P_TYPE){
3159                 /* get luma score */
3160                 if((s->avctx->mb_cmp&0xFF)==FF_CMP_SSE){
3161                     intra_score= (s->current_picture.mb_var[mb_x + mb_y*s->mb_width]<<8) - 500; //FIXME dont scale it down so we dont have to fix it
3162                 }else{
3163                     uint8_t *dest_y;
3164
3165                     int mean= s->current_picture.mb_mean[mb_x + mb_y*s->mb_width]; //FIXME
3166                     mean*= 0x01010101;
3167                     
3168                     dest_y  = s->new_picture.data[0] + (mb_y * 16 * s->linesize    ) + mb_x * 16;
3169                 
3170                     for(i=0; i<16; i++){
3171                         *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 0]) = mean;
3172                         *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 4]) = mean;
3173                         *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 8]) = mean;
3174                         *(uint32_t*)(&s->me.scratchpad[i*s->linesize+12]) = mean;
3175                     }
3176
3177                     s->mb_intra=1;
3178                     intra_score= s->dsp.mb_cmp[0](s, s->me.scratchpad, dest_y, s->linesize);
3179                                         
3180 /*                    printf("intra:%7d inter:%7d var:%7d mc_var.%7d\n", intra_score>>8, inter_score>>8, 
3181                         s->current_picture.mb_var[mb_x + mb_y*s->mb_width],
3182                         s->current_picture.mc_mb_var[mb_x + mb_y*s->mb_width]);*/
3183                 }
3184                 
3185                 /* get chroma score */
3186                 if(s->avctx->mb_cmp&FF_CMP_CHROMA){
3187                     int i;
3188                     
3189                     s->mb_intra=1;
3190                     for(i=1; i<3; i++){
3191                         uint8_t *dest_c;
3192                         int mean;
3193                         
3194                         if(s->out_format == FMT_H263){
3195                             mean= (s->dc_val[i][mb_x + (mb_y+1)*(s->mb_width+2)] + 4)>>3; //FIXME not exact but simple ;)
3196                         }else{
3197                             mean= (s->last_dc[i] + 4)>>3;
3198                         }
3199                         dest_c = s->new_picture.data[i] + (mb_y * 8  * (s->uvlinesize)) + mb_x * 8;
3200                         
3201                         mean*= 0x01010101;
3202                         for(i=0; i<8; i++){
3203                             *(uint32_t*)(&s->me.scratchpad[i*s->uvlinesize+ 0]) = mean;
3204                             *(uint32_t*)(&s->me.scratchpad[i*s->uvlinesize+ 4]) = mean;
3205                         }
3206                         
3207                         intra_score+= s->dsp.mb_cmp[1](s, s->me.scratchpad, dest_c, s->uvlinesize);
3208                     }                
3209                 }
3210
3211                 /* bias */
3212                 switch(s->avctx->mb_cmp&0xFF){
3213                 default:
3214                 case FF_CMP_SAD:
3215                     intra_score+= 32*s->qscale;
3216                     break;
3217                 case FF_CMP_SSE:
3218                     intra_score+= 24*s->qscale*s->qscale;
3219                     break;
3220                 case FF_CMP_SATD:
3221                     intra_score+= 96*s->qscale;
3222                     break;
3223                 case FF_CMP_DCT:
3224                     intra_score+= 48*s->qscale;
3225                     break;
3226                 case FF_CMP_BIT:
3227                     intra_score+= 16;
3228                     break;
3229                 case FF_CMP_PSNR:
3230                 case FF_CMP_RD:
3231                     intra_score+= (s->qscale*s->qscale*109*8 + 64)>>7;
3232                     break;
3233                 }
3234
3235                 if(intra_score < inter_score)
3236                     mb_type= MB_TYPE_INTRA;
3237               }  
3238                 
3239                 s->mv_type=MV_TYPE_16X16;
3240                 // only one MB-Type possible
3241                 
3242                 switch(mb_type){
3243                 case MB_TYPE_INTRA:
3244                     s->mv_dir = MV_DIR_FORWARD;
3245                     s->mb_intra= 1;
3246                     motion_x= s->mv[0][0][0] = 0;
3247                     motion_y= s->mv[0][0][1] = 0;
3248                     break;
3249                 case MB_TYPE_INTER:
3250                     s->mv_dir = MV_DIR_FORWARD;
3251                     s->mb_intra= 0;
3252                     motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3253                     motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3254                     break;
3255                 case MB_TYPE_INTER4V:
3256                     s->mv_dir = MV_DIR_FORWARD;
3257                     s->mv_type = MV_TYPE_8X8;
3258                     s->mb_intra= 0;
3259                     for(i=0; i<4; i++){
3260                         s->mv[0][i][0] = s->motion_val[s->block_index[i]][0];
3261                         s->mv[0][i][1] = s->motion_val[s->block_index[i]][1];
3262                     }
3263                     motion_x= motion_y= 0;
3264                     break;
3265                 case MB_TYPE_DIRECT:
3266                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3267                     s->mb_intra= 0;
3268                     motion_x=s->b_direct_mv_table[xy][0];
3269                     motion_y=s->b_direct_mv_table[xy][1];
3270                     ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3271                     break;
3272                 case MB_TYPE_BIDIR:
3273                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3274                     s->mb_intra= 0;
3275                     motion_x=0;
3276                     motion_y=0;
3277                     s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3278                     s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3279                     s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3280                     s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3281                     break;
3282                 case MB_TYPE_BACKWARD:
3283                     s->mv_dir = MV_DIR_BACKWARD;
3284                     s->mb_intra= 0;
3285                     motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3286                     motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3287                     break;
3288                 case MB_TYPE_FORWARD:
3289                     s->mv_dir = MV_DIR_FORWARD;
3290                     s->mb_intra= 0;
3291                     motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3292                     motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3293 //                    printf(" %d %d ", motion_x, motion_y);
3294                     break;
3295                 default:
3296                     motion_x=motion_y=0; //gcc warning fix
3297                     printf("illegal MB type\n");
3298                 }
3299                 encode_mb(s, motion_x, motion_y);
3300             }
3301             /* clean the MV table in IPS frames for direct mode in B frames */
3302             if(s->mb_intra /* && I,P,S_TYPE */){
3303                 s->p_mv_table[xy][0]=0;
3304                 s->p_mv_table[xy][1]=0;
3305             }
3306
3307             MPV_decode_mb(s, s->block);
3308             
3309             if(s->flags&CODEC_FLAG_PSNR){
3310                 int w= 16;
3311                 int h= 16;
3312
3313                 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3314                 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3315
3316                 s->current_picture.error[0] += sse(
3317                     s,
3318                     s->new_picture    .data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3319                     s->current_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3320                     w, h, s->linesize);
3321                 s->current_picture.error[1] += sse(
3322                     s,
3323                     s->new_picture    .data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,
3324                     s->current_picture.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,
3325                     w>>1, h>>1, s->uvlinesize);
3326                 s->current_picture.error[2] += sse(
3327                     s,
3328                     s->new_picture    .data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,
3329                     s->current_picture.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,
3330                     w>>1, h>>1, s->uvlinesize);
3331             }
3332 //printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_width, get_bit_count(&s->pb));
3333         }
3334
3335
3336         /* Obtain average mb_row size for RTP */
3337         if (s->rtp_mode) {
3338             if (mb_y==0)
3339                 s->mb_line_avgsize = pbBufPtr(&s->pb) - s->ptr_last_mb_line;
3340             else {    
3341                 s->mb_line_avgsize = (s->mb_line_avgsize + pbBufPtr(&s->pb) - s->ptr_last_mb_line) >> 1;
3342             }
3343             s->ptr_last_mb_line = pbBufPtr(&s->pb);
3344         }
3345     }
3346     emms_c();
3347
3348     if(s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame)
3349         ff_mpeg4_merge_partitions(s);
3350
3351     if (s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == I_TYPE)
3352         msmpeg4_encode_ext_header(s);
3353
3354     if(s->codec_id==CODEC_ID_MPEG4) 
3355         ff_mpeg4_stuffing(&s->pb);
3356
3357     //if (s->gob_number)
3358     //    fprintf(stderr,"\nNumber of GOB: %d", s->gob_number);
3359     
3360     /* Send the last GOB if RTP */    
3361     if (s->rtp_mode) {
3362         flush_put_bits(&s->pb);
3363         pdif = pbBufPtr(&s->pb) - s->ptr_lastgob;
3364         /* Call the RTP callback to send the last GOB */
3365         if (s->rtp_callback)
3366             s->rtp_callback(s->ptr_lastgob, pdif, s->gob_number);
3367         s->ptr_lastgob = pbBufPtr(&s->pb);
3368         //fprintf(stderr,"\nGOB: %2d size: %d (last)", s->gob_number, pdif);
3369     }
3370 }
3371
3372 static int dct_quantize_trellis_c(MpegEncContext *s, 
3373                         DCTELEM *block, int n,
3374                         int qscale, int *overflow){
3375     const int *qmat;
3376     const UINT8 *scantable= s->intra_scantable.scantable;
3377     int max=0;
3378     unsigned int threshold1, threshold2;
3379     int bias=0;
3380     int run_tab[65];
3381     int level_tab[65];
3382     int score_tab[65];
3383     int last_run=0;
3384     int last_level=0;
3385     int last_score= 0;
3386     int last_i= 0;
3387     int coeff[3][64];
3388     int coeff_count[64];
3389     int lambda, qmul, qadd, start_i, last_non_zero, i;
3390     const int esc_length= s->ac_esc_length;
3391     uint8_t * length;
3392     uint8_t * last_length;
3393     int score_limit=0;
3394     int left_limit= 0;
3395         
3396     s->fdct (block);
3397
3398     qmul= qscale*16;
3399     qadd= ((qscale-1)|1)*8;
3400
3401     if (s->mb_intra) {
3402         int q;
3403         if (!s->h263_aic) {
3404             if (n < 4)
3405                 q = s->y_dc_scale;
3406             else
3407                 q = s->c_dc_scale;
3408             q = q << 3;
3409         } else{
3410             /* For AIC we skip quant/dequant of INTRADC */
3411             q = 1 << 3;
3412             qadd=0;
3413         }
3414             
3415         /* note: block[0] is assumed to be positive */
3416         block[0] = (block[0] + (q >> 1)) / q;
3417         start_i = 1;
3418         last_non_zero = 0;
3419         qmat = s->q_intra_matrix[qscale];
3420         if(s->mpeg_quant || s->codec_id== CODEC_ID_MPEG1VIDEO)
3421             bias= 1<<(QMAT_SHIFT-1);
3422         length     = s->intra_ac_vlc_length;
3423         last_length= s->intra_ac_vlc_last_length;
3424     } else {
3425         start_i = 0;
3426         last_non_zero = -1;
3427         qmat = s->q_inter_matrix[qscale];
3428         length     = s->inter_ac_vlc_length;
3429         last_length= s->inter_ac_vlc_last_length;
3430     }
3431
3432     threshold1= (1<<QMAT_SHIFT) - bias - 1;
3433     threshold2= (threshold1<<1);
3434
3435     for(i=start_i; i<64; i++) {
3436         const int j = scantable[i];
3437         const int k= i-start_i;
3438         int level = block[j];
3439         level = level * qmat[j];
3440
3441 //        if(   bias+level >= (1<<(QMAT_SHIFT - 3))
3442 //           || bias-level >= (1<<(QMAT_SHIFT - 3))){
3443         if(((unsigned)(level+threshold1))>threshold2){
3444             if(level>0){
3445                 level= (bias + level)>>QMAT_SHIFT;
3446                 coeff[0][k]= level;
3447                 coeff[1][k]= level-1;
3448 //                coeff[2][k]= level-2;
3449             }else{
3450                 level= (bias - level)>>QMAT_SHIFT;
3451                 coeff[0][k]= -level;
3452                 coeff[1][k]= -level+1;
3453 //                coeff[2][k]= -level+2;
3454             }
3455             coeff_count[k]= FFMIN(level, 2);
3456             max |=level;
3457             last_non_zero = i;
3458         }else{
3459             coeff[0][k]= (level>>31)|1;
3460             coeff_count[k]= 1;
3461         }
3462     }
3463     
3464     *overflow= s->max_qcoeff < max; //overflow might have happend
3465     
3466     if(last_non_zero < start_i){
3467         memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
3468         return last_non_zero;
3469     }
3470
3471     lambda= (qscale*qscale*64*105 + 64)>>7; //FIXME finetune
3472         
3473     score_tab[0]= 0;
3474     for(i=0; i<=last_non_zero - start_i; i++){
3475         int level_index, run, j;
3476         const int dct_coeff= block[ scantable[i + start_i] ];
3477         const int zero_distoration= dct_coeff*d