--disable-risky support
[ffmpeg.git] / libavcodec / mpegvideo.c
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard.
4  *
5  * This library is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU Lesser General Public
7  * License as published by the Free Software Foundation; either
8  * version 2 of the License, or (at your option) any later version.
9  *
10  * This library is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * Lesser General Public License for more details.
14  *
15  * You should have received a copy of the GNU Lesser General Public
16  * License along with this library; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  *
19  * 4MV & hq & b-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
20  */
21  
22 #include <ctype.h>
23 #include <limits.h>
24 #include "avcodec.h"
25 #include "dsputil.h"
26 #include "mpegvideo.h"
27 #include "simple_idct.h"
28
29 #ifdef USE_FASTMEMCPY
30 #include "fastmemcpy.h"
31 #endif
32
33 //#undef NDEBUG
34 //#include <assert.h>
35
36 static void encode_picture(MpegEncContext *s, int picture_number);
37 static void dct_unquantize_mpeg1_c(MpegEncContext *s, 
38                                    DCTELEM *block, int n, int qscale);
39 static void dct_unquantize_mpeg2_c(MpegEncContext *s,
40                                    DCTELEM *block, int n, int qscale);
41 static void dct_unquantize_h263_c(MpegEncContext *s, 
42                                   DCTELEM *block, int n, int qscale);
43 static void draw_edges_c(UINT8 *buf, int wrap, int width, int height, int w);
44 static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
45 static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
46
47 void (*draw_edges)(UINT8 *buf, int wrap, int width, int height, int w)= draw_edges_c;
48
49
50 /* enable all paranoid tests for rounding, overflows, etc... */
51 //#define PARANOID
52
53 //#define DEBUG
54
55
56 /* for jpeg fast DCT */
57 #define CONST_BITS 14
58
59 static const uint16_t aanscales[64] = {
60     /* precomputed values scaled up by 14 bits */
61     16384, 22725, 21407, 19266, 16384, 12873,  8867,  4520,
62     22725, 31521, 29692, 26722, 22725, 17855, 12299,  6270,
63     21407, 29692, 27969, 25172, 21407, 16819, 11585,  5906,
64     19266, 26722, 25172, 22654, 19266, 15137, 10426,  5315,
65     16384, 22725, 21407, 19266, 16384, 12873,  8867,  4520,
66     12873, 17855, 16819, 15137, 12873, 10114,  6967,  3552,
67     8867 , 12299, 11585, 10426,  8867,  6967,  4799,  2446,
68     4520 ,  6270,  5906,  5315,  4520,  3552,  2446,  1247
69 };
70
71 /* Input permutation for the simple_idct_mmx */
72 static const uint8_t simple_mmx_permutation[64]={
73         0x00, 0x08, 0x04, 0x09, 0x01, 0x0C, 0x05, 0x0D, 
74         0x10, 0x18, 0x14, 0x19, 0x11, 0x1C, 0x15, 0x1D, 
75         0x20, 0x28, 0x24, 0x29, 0x21, 0x2C, 0x25, 0x2D, 
76         0x12, 0x1A, 0x16, 0x1B, 0x13, 0x1E, 0x17, 0x1F, 
77         0x02, 0x0A, 0x06, 0x0B, 0x03, 0x0E, 0x07, 0x0F, 
78         0x30, 0x38, 0x34, 0x39, 0x31, 0x3C, 0x35, 0x3D, 
79         0x22, 0x2A, 0x26, 0x2B, 0x23, 0x2E, 0x27, 0x2F, 
80         0x32, 0x3A, 0x36, 0x3B, 0x33, 0x3E, 0x37, 0x3F,
81 };
82
83 static const uint8_t h263_chroma_roundtab[16] = {
84 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
85     0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
86 };
87
88 static UINT16 (*default_mv_penalty)[MAX_MV*2+1]=NULL;
89 static UINT8 default_fcode_tab[MAX_MV*2+1];
90
91 enum PixelFormat ff_yuv420p_list[2]= {PIX_FMT_YUV420P, -1};
92
93 static void convert_matrix(MpegEncContext *s, int (*qmat)[64], uint16_t (*qmat16)[64], uint16_t (*qmat16_bias)[64],
94                            const UINT16 *quant_matrix, int bias, int qmin, int qmax)
95 {
96     int qscale;
97
98     for(qscale=qmin; qscale<=qmax; qscale++){
99         int i;
100         if (s->fdct == ff_jpeg_fdct_islow) {
101             for(i=0;i<64;i++) {
102                 const int j= s->idct_permutation[i];
103                 /* 16 <= qscale * quant_matrix[i] <= 7905 */
104                 /* 19952         <= aanscales[i] * qscale * quant_matrix[i]           <= 249205026 */
105                 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
106                 /* 3444240       >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
107                 
108                 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / 
109                                 (qscale * quant_matrix[j]));
110             }
111         } else if (s->fdct == fdct_ifast) {
112             for(i=0;i<64;i++) {
113                 const int j= s->idct_permutation[i];
114                 /* 16 <= qscale * quant_matrix[i] <= 7905 */
115                 /* 19952         <= aanscales[i] * qscale * quant_matrix[i]           <= 249205026 */
116                 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
117                 /* 3444240       >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
118                 
119                 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) / 
120                                 (aanscales[i] * qscale * quant_matrix[j]));
121             }
122         } else {
123             for(i=0;i<64;i++) {
124                 const int j= s->idct_permutation[i];
125                 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
126                    So 16           <= qscale * quant_matrix[i]             <= 7905
127                    so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905
128                    so 32768        >= (1<<19) / (qscale * quant_matrix[i]) >= 67
129                 */
130                 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / (qscale * quant_matrix[j]));
131 //                qmat  [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]);
132                 qmat16[qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[j]);
133
134                 if(qmat16[qscale][i]==0 || qmat16[qscale][i]==128*256) qmat16[qscale][i]=128*256-1;
135                 qmat16_bias[qscale][i]= ROUNDED_DIV(bias<<(16-QUANT_BIAS_SHIFT), qmat16[qscale][i]);
136             }
137         }
138     }
139 }
140 // move into common.c perhaps 
141 #define CHECKED_ALLOCZ(p, size)\
142 {\
143     p= av_mallocz(size);\
144     if(p==NULL){\
145         perror("malloc");\
146         goto fail;\
147     }\
148 }
149
150 void ff_init_scantable(MpegEncContext *s, ScanTable *st, const UINT8 *src_scantable){
151     int i;
152     int end;
153     
154     st->scantable= src_scantable;
155
156     for(i=0; i<64; i++){
157         int j;
158         j = src_scantable[i];
159         st->permutated[i] = s->idct_permutation[j];
160 #ifdef ARCH_POWERPC
161         st->inverse[j] = i;
162 #endif
163     }
164     
165     end=-1;
166     for(i=0; i<64; i++){
167         int j;
168         j = st->permutated[i];
169         if(j>end) end=j;
170         st->raster_end[i]= end;
171     }
172 }
173
174 /* XXX: those functions should be suppressed ASAP when all IDCTs are
175  converted */
176 // *FIXME* this is ugly hack using local static
177 static void (*ff_put_pixels_clamped)(const DCTELEM *block, UINT8 *pixels, int line_size);
178 static void (*ff_add_pixels_clamped)(const DCTELEM *block, UINT8 *pixels, int line_size);
179 static void ff_jref_idct_put(UINT8 *dest, int line_size, DCTELEM *block)
180 {
181     j_rev_dct (block);
182     ff_put_pixels_clamped(block, dest, line_size);
183 }
184 static void ff_jref_idct_add(UINT8 *dest, int line_size, DCTELEM *block)
185 {
186     j_rev_dct (block);
187     ff_add_pixels_clamped(block, dest, line_size);
188 }
189
190 /* init common dct for both encoder and decoder */
191 int DCT_common_init(MpegEncContext *s)
192 {
193     int i;
194
195     ff_put_pixels_clamped = s->dsp.put_pixels_clamped;
196     ff_add_pixels_clamped = s->dsp.add_pixels_clamped;
197
198     s->dct_unquantize_h263 = dct_unquantize_h263_c;
199     s->dct_unquantize_mpeg1 = dct_unquantize_mpeg1_c;
200     s->dct_unquantize_mpeg2 = dct_unquantize_mpeg2_c;
201     s->dct_quantize= dct_quantize_c;
202
203     if(s->avctx->dct_algo==FF_DCT_FASTINT)
204         s->fdct = fdct_ifast;
205     else
206         s->fdct = ff_jpeg_fdct_islow; //slow/accurate/default
207
208     if(s->avctx->idct_algo==FF_IDCT_INT){
209         s->idct_put= ff_jref_idct_put;
210         s->idct_add= ff_jref_idct_add;
211         s->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
212     }else{ //accurate/default
213         s->idct_put= simple_idct_put;
214         s->idct_add= simple_idct_add;
215         s->idct_permutation_type= FF_NO_IDCT_PERM;
216     }
217         
218 #ifdef HAVE_MMX
219     MPV_common_init_mmx(s);
220 #endif
221 #ifdef ARCH_ALPHA
222     MPV_common_init_axp(s);
223 #endif
224 #ifdef HAVE_MLIB
225     MPV_common_init_mlib(s);
226 #endif
227 #ifdef HAVE_MMI
228     MPV_common_init_mmi(s);
229 #endif
230 #ifdef ARCH_ARMV4L
231     MPV_common_init_armv4l(s);
232 #endif
233 #ifdef ARCH_POWERPC
234     MPV_common_init_ppc(s);
235 #endif
236
237     s->fast_dct_quantize= s->dct_quantize;
238
239     if(s->flags&CODEC_FLAG_TRELLIS_QUANT){
240         s->dct_quantize= dct_quantize_trellis_c; //move before MPV_common_init_*
241     }
242
243     switch(s->idct_permutation_type){
244     case FF_NO_IDCT_PERM:
245         for(i=0; i<64; i++)
246             s->idct_permutation[i]= i;
247         break;
248     case FF_LIBMPEG2_IDCT_PERM:
249         for(i=0; i<64; i++)
250             s->idct_permutation[i]= (i & 0x38) | ((i & 6) >> 1) | ((i & 1) << 2);
251         break;
252     case FF_SIMPLE_IDCT_PERM:
253         for(i=0; i<64; i++)
254             s->idct_permutation[i]= simple_mmx_permutation[i];
255         break;
256     case FF_TRANSPOSE_IDCT_PERM:
257         for(i=0; i<64; i++)
258             s->idct_permutation[i]= ((i&7)<<3) | (i>>3);
259         break;
260     default:
261         fprintf(stderr, "Internal error, IDCT permutation not set\n");
262         return -1;
263     }
264
265
266     /* load & permutate scantables
267        note: only wmv uses differnt ones 
268     */
269     ff_init_scantable(s, &s->inter_scantable  , ff_zigzag_direct);
270     ff_init_scantable(s, &s->intra_scantable  , ff_zigzag_direct);
271     ff_init_scantable(s, &s->intra_h_scantable, ff_alternate_horizontal_scan);
272     ff_init_scantable(s, &s->intra_v_scantable, ff_alternate_vertical_scan);
273
274     return 0;
275 }
276
277 /**
278  * allocates a Picture
279  * The pixels are allocated/set by calling get_buffer() if shared=0
280  */
281 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
282     
283     if(shared){
284         assert(pic->data[0]);
285         assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
286         pic->type= FF_BUFFER_TYPE_SHARED;
287     }else{
288         int r;
289         
290         assert(!pic->data[0]);
291         
292         r= s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
293         
294         if(r<0 || !pic->age || !pic->type || !pic->data[0]){
295             fprintf(stderr, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
296             return -1;
297         }
298
299         if(s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])){
300             fprintf(stderr, "get_buffer() failed (stride changed)\n");
301             return -1;
302         }
303
304         if(pic->linesize[1] != pic->linesize[2]){
305             fprintf(stderr, "get_buffer() failed (uv stride missmatch)\n");
306             return -1;
307         }
308
309         s->linesize  = pic->linesize[0];
310         s->uvlinesize= pic->linesize[1];
311     }
312     
313     if(pic->qscale_table==NULL){
314         if (s->encoding) {        
315             CHECKED_ALLOCZ(pic->mb_var   , s->mb_num * sizeof(INT16))
316             CHECKED_ALLOCZ(pic->mc_mb_var, s->mb_num * sizeof(INT16))
317             CHECKED_ALLOCZ(pic->mb_mean  , s->mb_num * sizeof(INT8))
318             CHECKED_ALLOCZ(pic->mb_cmp_score, s->mb_num * sizeof(int32_t))
319         }
320
321         CHECKED_ALLOCZ(pic->mbskip_table , s->mb_num * sizeof(UINT8)+1) //the +1 is for the slice end check
322         CHECKED_ALLOCZ(pic->qscale_table , s->mb_num * sizeof(UINT8))
323         pic->qstride= s->mb_width;
324     }
325     
326     //it might be nicer if the application would keep track of these but it would require a API change
327     memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
328     s->prev_pict_types[0]= s->pict_type;
329     if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == B_TYPE)
330         pic->age= INT_MAX; // skiped MBs in b frames are quite rare in mpeg1/2 and its a bit tricky to skip them anyway
331     
332     return 0;
333 fail: //for the CHECKED_ALLOCZ macro
334     return -1;
335 }
336
337 /**
338  * deallocates a picture
339  */
340 static void free_picture(MpegEncContext *s, Picture *pic){
341     int i;
342
343     if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
344         s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
345     }
346
347     av_freep(&pic->mb_var);
348     av_freep(&pic->mc_mb_var);
349     av_freep(&pic->mb_mean);
350     av_freep(&pic->mb_cmp_score);
351     av_freep(&pic->mbskip_table);
352     av_freep(&pic->qscale_table);
353     
354     if(pic->type == FF_BUFFER_TYPE_INTERNAL){
355         for(i=0; i<4; i++){
356             av_freep(&pic->base[i]);
357             pic->data[i]= NULL;
358         }
359         av_freep(&pic->opaque);
360         pic->type= 0;
361     }else if(pic->type == FF_BUFFER_TYPE_SHARED){
362         for(i=0; i<4; i++){
363             pic->base[i]=
364             pic->data[i]= NULL;
365         }
366         pic->type= 0;        
367     }
368 }
369
370 /* init common structure for both encoder and decoder */
371 int MPV_common_init(MpegEncContext *s)
372 {
373     int y_size, c_size, yc_size, i;
374
375     dsputil_init(&s->dsp, s->avctx->dsp_mask);
376     DCT_common_init(s);
377
378     s->flags= s->avctx->flags;
379
380     s->mb_width  = (s->width  + 15) / 16;
381     s->mb_height = (s->height + 15) / 16;
382
383     /* set default edge pos, will be overriden in decode_header if needed */
384     s->h_edge_pos= s->mb_width*16;
385     s->v_edge_pos= s->mb_height*16;
386
387     s->mb_num = s->mb_width * s->mb_height;
388
389     y_size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
390     c_size = (s->mb_width + 2) * (s->mb_height + 2);
391     yc_size = y_size + 2 * c_size;
392
393     /* convert fourcc to upper case */
394     s->avctx->fourcc=   toupper( s->avctx->fourcc     &0xFF)          
395                      + (toupper((s->avctx->fourcc>>8 )&0xFF)<<8 )
396                      + (toupper((s->avctx->fourcc>>16)&0xFF)<<16) 
397                      + (toupper((s->avctx->fourcc>>24)&0xFF)<<24);
398
399     CHECKED_ALLOCZ(s->allocated_edge_emu_buffer, (s->width+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance
400     s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*17;
401
402     s->avctx->coded_frame= (AVFrame*)&s->current_picture;
403
404     if (s->encoding) {
405         int mv_table_size= (s->mb_width+2)*(s->mb_height+2);
406
407         /* Allocate MV tables */
408         CHECKED_ALLOCZ(s->p_mv_table            , mv_table_size * 2 * sizeof(INT16))
409         CHECKED_ALLOCZ(s->b_forw_mv_table       , mv_table_size * 2 * sizeof(INT16))
410         CHECKED_ALLOCZ(s->b_back_mv_table       , mv_table_size * 2 * sizeof(INT16))
411         CHECKED_ALLOCZ(s->b_bidir_forw_mv_table , mv_table_size * 2 * sizeof(INT16))
412         CHECKED_ALLOCZ(s->b_bidir_back_mv_table , mv_table_size * 2 * sizeof(INT16))
413         CHECKED_ALLOCZ(s->b_direct_mv_table     , mv_table_size * 2 * sizeof(INT16))
414
415         //FIXME should be linesize instead of s->width*2 but that isnt known before get_buffer()
416         CHECKED_ALLOCZ(s->me.scratchpad,  s->width*2*16*3*sizeof(uint8_t)) 
417         
418         CHECKED_ALLOCZ(s->me.map      , ME_MAP_SIZE*sizeof(uint32_t))
419         CHECKED_ALLOCZ(s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t))
420
421         if(s->codec_id==CODEC_ID_MPEG4){
422             CHECKED_ALLOCZ(s->tex_pb_buffer, PB_BUFFER_SIZE);
423             CHECKED_ALLOCZ(   s->pb2_buffer, PB_BUFFER_SIZE);
424         }
425         
426         if(s->msmpeg4_version){
427             CHECKED_ALLOCZ(s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int));
428         }
429         CHECKED_ALLOCZ(s->avctx->stats_out, 256);
430     }
431         
432     CHECKED_ALLOCZ(s->error_status_table, s->mb_num*sizeof(UINT8))
433     
434     if (s->out_format == FMT_H263 || s->encoding) {
435         int size;
436         /* Allocate MB type table */
437         CHECKED_ALLOCZ(s->mb_type  , s->mb_num * sizeof(UINT8))
438
439         /* MV prediction */
440         size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
441         CHECKED_ALLOCZ(s->motion_val, size * 2 * sizeof(INT16));
442     }
443
444     if(s->codec_id==CODEC_ID_MPEG4){
445         /* interlaced direct mode decoding tables */
446         CHECKED_ALLOCZ(s->field_mv_table, s->mb_num*2*2 * sizeof(INT16))
447         CHECKED_ALLOCZ(s->field_select_table, s->mb_num*2* sizeof(INT8))
448     }
449     /* 4mv b frame decoding table */
450     //note this is needed for h263 without b frames too (segfault on damaged streams otherwise)
451     CHECKED_ALLOCZ(s->co_located_type_table, s->mb_num * sizeof(UINT8))
452     if (s->out_format == FMT_H263) {
453         /* ac values */
454         CHECKED_ALLOCZ(s->ac_val[0], yc_size * sizeof(INT16) * 16);
455         s->ac_val[1] = s->ac_val[0] + y_size;
456         s->ac_val[2] = s->ac_val[1] + c_size;
457         
458         /* cbp values */
459         CHECKED_ALLOCZ(s->coded_block, y_size);
460         
461         /* divx501 bitstream reorder buffer */
462         CHECKED_ALLOCZ(s->bitstream_buffer, BITSTREAM_BUFFER_SIZE);
463
464         /* cbp, ac_pred, pred_dir */
465         CHECKED_ALLOCZ(s->cbp_table  , s->mb_num * sizeof(UINT8))
466         CHECKED_ALLOCZ(s->pred_dir_table, s->mb_num * sizeof(UINT8))
467     }
468     
469     if (s->h263_pred || s->h263_plus || !s->encoding) {
470         /* dc values */
471         //MN: we need these for error resilience of intra-frames
472         CHECKED_ALLOCZ(s->dc_val[0], yc_size * sizeof(INT16));
473         s->dc_val[1] = s->dc_val[0] + y_size;
474         s->dc_val[2] = s->dc_val[1] + c_size;
475         for(i=0;i<yc_size;i++)
476             s->dc_val[0][i] = 1024;
477     }
478
479     /* which mb is a intra block */
480     CHECKED_ALLOCZ(s->mbintra_table, s->mb_num);
481     memset(s->mbintra_table, 1, s->mb_num);
482     
483     /* default structure is frame */
484     s->picture_structure = PICT_FRAME;
485     
486     /* init macroblock skip table */
487     CHECKED_ALLOCZ(s->mbskip_table, s->mb_num+1);
488     //Note the +1 is for a quicker mpeg4 slice_end detection
489     CHECKED_ALLOCZ(s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
490     
491     s->block= s->blocks[0];
492
493     s->parse_context.state= -1;
494
495     s->context_initialized = 1;
496     return 0;
497  fail:
498     MPV_common_end(s);
499     return -1;
500 }
501
502
503 //extern int sads;
504
505 /* init common structure for both encoder and decoder */
506 void MPV_common_end(MpegEncContext *s)
507 {
508     int i;
509
510     av_freep(&s->mb_type);
511     av_freep(&s->p_mv_table);
512     av_freep(&s->b_forw_mv_table);
513     av_freep(&s->b_back_mv_table);
514     av_freep(&s->b_bidir_forw_mv_table);
515     av_freep(&s->b_bidir_back_mv_table);
516     av_freep(&s->b_direct_mv_table);
517     av_freep(&s->motion_val);
518     av_freep(&s->dc_val[0]);
519     av_freep(&s->ac_val[0]);
520     av_freep(&s->coded_block);
521     av_freep(&s->mbintra_table);
522     av_freep(&s->cbp_table);
523     av_freep(&s->pred_dir_table);
524     av_freep(&s->me.scratchpad);
525     av_freep(&s->me.map);
526     av_freep(&s->me.score_map);
527     
528     av_freep(&s->mbskip_table);
529     av_freep(&s->prev_pict_types);
530     av_freep(&s->bitstream_buffer);
531     av_freep(&s->tex_pb_buffer);
532     av_freep(&s->pb2_buffer);
533     av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
534     av_freep(&s->co_located_type_table);
535     av_freep(&s->field_mv_table);
536     av_freep(&s->field_select_table);
537     av_freep(&s->avctx->stats_out);
538     av_freep(&s->ac_stats);
539     av_freep(&s->error_status_table);
540
541     for(i=0; i<MAX_PICTURE_COUNT; i++){
542         free_picture(s, &s->picture[i]);
543     }
544     s->context_initialized = 0;
545 }
546
547 /* init video encoder */
548 int MPV_encode_init(AVCodecContext *avctx)
549 {
550     MpegEncContext *s = avctx->priv_data;
551     int i;
552
553     avctx->pix_fmt = PIX_FMT_YUV420P;
554
555     s->bit_rate = avctx->bit_rate;
556     s->bit_rate_tolerance = avctx->bit_rate_tolerance;
557     s->frame_rate = avctx->frame_rate;
558     s->width = avctx->width;
559     s->height = avctx->height;
560     if(avctx->gop_size > 600){
561         fprintf(stderr, "Warning keyframe interval too large! reducing it ...\n");
562         avctx->gop_size=600;
563     }
564     s->gop_size = avctx->gop_size;
565     s->rtp_mode = avctx->rtp_mode;
566     s->rtp_payload_size = avctx->rtp_payload_size;
567     if (avctx->rtp_callback)
568         s->rtp_callback = avctx->rtp_callback;
569     s->qmin= avctx->qmin;
570     s->qmax= avctx->qmax;
571     s->max_qdiff= avctx->max_qdiff;
572     s->qcompress= avctx->qcompress;
573     s->qblur= avctx->qblur;
574     s->avctx = avctx;
575     s->flags= avctx->flags;
576     s->max_b_frames= avctx->max_b_frames;
577     s->b_frame_strategy= avctx->b_frame_strategy;
578     s->codec_id= avctx->codec->id;
579     s->luma_elim_threshold  = avctx->luma_elim_threshold;
580     s->chroma_elim_threshold= avctx->chroma_elim_threshold;
581     s->strict_std_compliance= avctx->strict_std_compliance;
582     s->data_partitioning= avctx->flags & CODEC_FLAG_PART;
583     s->quarter_sample= (avctx->flags & CODEC_FLAG_QPEL)!=0;
584     s->mpeg_quant= avctx->mpeg_quant;
585
586     if (s->gop_size <= 1) {
587         s->intra_only = 1;
588         s->gop_size = 12;
589     } else {
590         s->intra_only = 0;
591     }
592
593     s->me_method = avctx->me_method;
594
595     /* Fixed QSCALE */
596     s->fixed_qscale = (avctx->flags & CODEC_FLAG_QSCALE);
597     
598     s->adaptive_quant= (   s->avctx->lumi_masking
599                         || s->avctx->dark_masking
600                         || s->avctx->temporal_cplx_masking 
601                         || s->avctx->spatial_cplx_masking
602                         || s->avctx->p_masking)
603                        && !s->fixed_qscale;
604     
605     s->progressive_sequence= !(avctx->flags & CODEC_FLAG_INTERLACED_DCT);
606
607     switch(avctx->codec->id) {
608     case CODEC_ID_MPEG1VIDEO:
609         s->out_format = FMT_MPEG1;
610         avctx->delay=0; //FIXME not sure, should check the spec
611         break;
612     case CODEC_ID_MJPEG:
613         s->out_format = FMT_MJPEG;
614         s->intra_only = 1; /* force intra only for jpeg */
615         s->mjpeg_write_tables = 1; /* write all tables */
616         s->mjpeg_data_only_frames = 0; /* write all the needed headers */
617         s->mjpeg_vsample[0] = 2; /* set up default sampling factors */
618         s->mjpeg_vsample[1] = 1; /* the only currently supported values */
619         s->mjpeg_vsample[2] = 1; 
620         s->mjpeg_hsample[0] = 2;
621         s->mjpeg_hsample[1] = 1; 
622         s->mjpeg_hsample[2] = 1; 
623         if (mjpeg_init(s) < 0)
624             return -1;
625         avctx->delay=0;
626         s->low_delay=1;
627         break;
628 #ifdef CONFIG_RISKY
629     case CODEC_ID_H263:
630         if (h263_get_picture_format(s->width, s->height) == 7) {
631             printf("Input picture size isn't suitable for h263 codec! try h263+\n");
632             return -1;
633         }
634         s->out_format = FMT_H263;
635         avctx->delay=0;
636         s->low_delay=1;
637         break;
638     case CODEC_ID_H263P:
639         s->out_format = FMT_H263;
640         s->h263_plus = 1;
641         s->unrestricted_mv = 1;
642         s->h263_aic = 1;
643         
644         /* These are just to be sure */
645         s->umvplus = 0;
646         s->umvplus_dec = 0;
647         avctx->delay=0;
648         s->low_delay=1;
649         break;
650     case CODEC_ID_RV10:
651         s->out_format = FMT_H263;
652         s->h263_rv10 = 1;
653         avctx->delay=0;
654         s->low_delay=1;
655         break;
656     case CODEC_ID_MPEG4:
657         s->out_format = FMT_H263;
658         s->h263_pred = 1;
659         s->unrestricted_mv = 1;
660         s->low_delay= s->max_b_frames ? 0 : 1;
661         avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
662         break;
663     case CODEC_ID_MSMPEG4V1:
664         s->out_format = FMT_H263;
665         s->h263_msmpeg4 = 1;
666         s->h263_pred = 1;
667         s->unrestricted_mv = 1;
668         s->msmpeg4_version= 1;
669         avctx->delay=0;
670         s->low_delay=1;
671         break;
672     case CODEC_ID_MSMPEG4V2:
673         s->out_format = FMT_H263;
674         s->h263_msmpeg4 = 1;
675         s->h263_pred = 1;
676         s->unrestricted_mv = 1;
677         s->msmpeg4_version= 2;
678         avctx->delay=0;
679         s->low_delay=1;
680         break;
681     case CODEC_ID_MSMPEG4V3:
682         s->out_format = FMT_H263;
683         s->h263_msmpeg4 = 1;
684         s->h263_pred = 1;
685         s->unrestricted_mv = 1;
686         s->msmpeg4_version= 3;
687         avctx->delay=0;
688         s->low_delay=1;
689         break;
690     case CODEC_ID_WMV1:
691         s->out_format = FMT_H263;
692         s->h263_msmpeg4 = 1;
693         s->h263_pred = 1;
694         s->unrestricted_mv = 1;
695         s->msmpeg4_version= 4;
696         avctx->delay=0;
697         s->low_delay=1;
698         break;
699     case CODEC_ID_WMV2:
700         s->out_format = FMT_H263;
701         s->h263_msmpeg4 = 1;
702         s->h263_pred = 1;
703         s->unrestricted_mv = 1;
704         s->msmpeg4_version= 5;
705         avctx->delay=0;
706         s->low_delay=1;
707         break;
708 #endif
709     default:
710         return -1;
711     }
712     
713     { /* set up some save defaults, some codecs might override them later */
714         static int done=0;
715         if(!done){
716             int i;
717             done=1;
718
719             default_mv_penalty= av_mallocz( sizeof(UINT16)*(MAX_FCODE+1)*(2*MAX_MV+1) );
720             memset(default_mv_penalty, 0, sizeof(UINT16)*(MAX_FCODE+1)*(2*MAX_MV+1));
721             memset(default_fcode_tab , 0, sizeof(UINT8)*(2*MAX_MV+1));
722
723             for(i=-16; i<16; i++){
724                 default_fcode_tab[i + MAX_MV]= 1;
725             }
726         }
727     }
728     s->me.mv_penalty= default_mv_penalty;
729     s->fcode_tab= default_fcode_tab;
730     s->y_dc_scale_table=
731     s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
732  
733     /* dont use mv_penalty table for crap MV as it would be confused */
734     //FIXME remove after fixing / removing old ME
735     if (s->me_method < ME_EPZS) s->me.mv_penalty = default_mv_penalty;
736
737     s->encoding = 1;
738
739     /* init */
740     if (MPV_common_init(s) < 0)
741         return -1;
742     
743     ff_init_me(s);
744
745 #ifdef CONFIG_ENCODERS
746 #ifdef CONFIG_RISKY
747     if (s->out_format == FMT_H263)
748         h263_encode_init(s);
749     if(s->msmpeg4_version)
750         ff_msmpeg4_encode_init(s);
751 #endif
752     if (s->out_format == FMT_MPEG1)
753         ff_mpeg1_encode_init(s);
754 #endif
755
756     /* init default q matrix */
757     for(i=0;i<64;i++) {
758         int j= s->idct_permutation[i];
759 #ifdef CONFIG_RISKY
760         if(s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){
761             s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
762             s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
763         }else if(s->out_format == FMT_H263){
764             s->intra_matrix[j] =
765             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
766         }else
767 #endif
768         { /* mpeg1 */
769             s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
770             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
771         }
772     }
773
774     /* precompute matrix */
775     /* for mjpeg, we do include qscale in the matrix */
776     if (s->out_format != FMT_MJPEG) {
777         convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16, s->q_intra_matrix16_bias, 
778                        s->intra_matrix, s->intra_quant_bias, 1, 31);
779         convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16, s->q_inter_matrix16_bias, 
780                        s->inter_matrix, s->inter_quant_bias, 1, 31);
781     }
782
783     if(ff_rate_control_init(s) < 0)
784         return -1;
785
786     s->picture_number = 0;
787     s->picture_in_gop_number = 0;
788     s->fake_picture_number = 0;
789     /* motion detector init */
790     s->f_code = 1;
791     s->b_code = 1;
792
793     return 0;
794 }
795
796 int MPV_encode_end(AVCodecContext *avctx)
797 {
798     MpegEncContext *s = avctx->priv_data;
799
800 #ifdef STATS
801     print_stats();
802 #endif
803
804     ff_rate_control_uninit(s);
805
806     MPV_common_end(s);
807     if (s->out_format == FMT_MJPEG)
808         mjpeg_close(s);
809       
810     return 0;
811 }
812
813 void init_rl(RLTable *rl)
814 {
815     INT8 max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
816     UINT8 index_run[MAX_RUN+1];
817     int last, run, level, start, end, i;
818
819     /* compute max_level[], max_run[] and index_run[] */
820     for(last=0;last<2;last++) {
821         if (last == 0) {
822             start = 0;
823             end = rl->last;
824         } else {
825             start = rl->last;
826             end = rl->n;
827         }
828
829         memset(max_level, 0, MAX_RUN + 1);
830         memset(max_run, 0, MAX_LEVEL + 1);
831         memset(index_run, rl->n, MAX_RUN + 1);
832         for(i=start;i<end;i++) {
833             run = rl->table_run[i];
834             level = rl->table_level[i];
835             if (index_run[run] == rl->n)
836                 index_run[run] = i;
837             if (level > max_level[run])
838                 max_level[run] = level;
839             if (run > max_run[level])
840                 max_run[level] = run;
841         }
842         rl->max_level[last] = av_malloc(MAX_RUN + 1);
843         memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
844         rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
845         memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
846         rl->index_run[last] = av_malloc(MAX_RUN + 1);
847         memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
848     }
849 }
850
851 /* draw the edges of width 'w' of an image of size width, height */
852 //FIXME check that this is ok for mpeg4 interlaced
853 static void draw_edges_c(UINT8 *buf, int wrap, int width, int height, int w)
854 {
855     UINT8 *ptr, *last_line;
856     int i;
857
858     last_line = buf + (height - 1) * wrap;
859     for(i=0;i<w;i++) {
860         /* top and bottom */
861         memcpy(buf - (i + 1) * wrap, buf, width);
862         memcpy(last_line + (i + 1) * wrap, last_line, width);
863     }
864     /* left and right */
865     ptr = buf;
866     for(i=0;i<height;i++) {
867         memset(ptr - w, ptr[0], w);
868         memset(ptr + width, ptr[width-1], w);
869         ptr += wrap;
870     }
871     /* corners */
872     for(i=0;i<w;i++) {
873         memset(buf - (i + 1) * wrap - w, buf[0], w); /* top left */
874         memset(buf - (i + 1) * wrap + width, buf[width-1], w); /* top right */
875         memset(last_line + (i + 1) * wrap - w, last_line[0], w); /* top left */
876         memset(last_line + (i + 1) * wrap + width, last_line[width-1], w); /* top right */
877     }
878 }
879
880 static int find_unused_picture(MpegEncContext *s, int shared){
881     int i;
882     
883     if(shared){
884         for(i=0; i<MAX_PICTURE_COUNT; i++){
885             if(s->picture[i].data[0]==NULL && s->picture[i].type==0) break;
886         }
887     }else{
888         for(i=0; i<MAX_PICTURE_COUNT; i++){
889             if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) break;
890         }
891         for(i=0; i<MAX_PICTURE_COUNT; i++){
892             if(s->picture[i].data[0]==NULL) break;
893         }
894     }
895
896     assert(i<MAX_PICTURE_COUNT);
897     return i;
898 }
899
900 /* generic function for encode/decode called before a frame is coded/decoded */
901 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
902 {
903     int i;
904     AVFrame *pic;
905
906     s->mb_skiped = 0;
907     
908     /* mark&release old frames */
909     if (s->pict_type != B_TYPE && s->last_picture.data[0]) {
910         for(i=0; i<MAX_PICTURE_COUNT; i++){
911 //printf("%8X %d %d %X %X\n", s->picture[i].data[0], s->picture[i].type, i, s->next_picture.data[0], s->last_picture.data[0]);
912             if(s->picture[i].data[0] == s->last_picture.data[0]){
913 //                s->picture[i].reference=0;
914                 avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
915                 break;
916             }    
917         }
918         assert(i<MAX_PICTURE_COUNT);
919
920         /* release forgotten pictures */
921         /* if(mpeg124/h263) */
922         if(!s->encoding){
923             for(i=0; i<MAX_PICTURE_COUNT; i++){
924                 if(s->picture[i].data[0] && s->picture[i].data[0] != s->next_picture.data[0] && s->picture[i].reference){
925                     fprintf(stderr, "releasing zombie picture\n");
926                     avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);                
927                 }
928             }
929         }
930     }
931 alloc:
932     if(!s->encoding){
933         i= find_unused_picture(s, 0);
934     
935         pic= (AVFrame*)&s->picture[i];
936         pic->reference= s->pict_type != B_TYPE;
937         pic->coded_picture_number= s->current_picture.coded_picture_number+1;
938         
939         alloc_picture(s, (Picture*)pic, 0);
940
941         s->current_picture= s->picture[i];
942     }
943
944     if (s->pict_type != B_TYPE) {
945         s->last_picture= s->next_picture;
946         s->next_picture= s->current_picture;
947     }
948     
949     if(s->pict_type != I_TYPE && s->last_picture.data[0]==NULL){
950         fprintf(stderr, "warning: first frame is no keyframe\n");
951         assert(s->pict_type != B_TYPE); //these should have been dropped if we dont have a reference
952         goto alloc;
953     }
954    
955     s->hurry_up= s->avctx->hurry_up;
956     s->error_resilience= avctx->error_resilience;
957
958     /* set dequantizer, we cant do it during init as it might change for mpeg4
959        and we cant do it in the header decode as init isnt called for mpeg4 there yet */
960     if(s->out_format == FMT_H263){
961         if(s->mpeg_quant)
962             s->dct_unquantize = s->dct_unquantize_mpeg2;
963         else
964             s->dct_unquantize = s->dct_unquantize_h263;
965     }else 
966         s->dct_unquantize = s->dct_unquantize_mpeg1;
967
968     return 0;
969 }
970
971 /* generic function for encode/decode called after a frame has been coded/decoded */
972 void MPV_frame_end(MpegEncContext *s)
973 {
974     int i;
975     /* draw edge for correct motion prediction if outside */
976     if(s->codec_id!=CODEC_ID_SVQ1){
977         if (s->pict_type != B_TYPE && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
978             draw_edges(s->current_picture.data[0], s->linesize  , s->h_edge_pos   , s->v_edge_pos   , EDGE_WIDTH  );
979             draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
980             draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
981         }
982     }
983     emms_c();
984     
985     s->last_pict_type    = s->pict_type;
986     if(s->pict_type!=B_TYPE){
987         s->last_non_b_pict_type= s->pict_type;
988     }
989     
990     s->current_picture.quality= s->qscale; //FIXME get average of qscale_table
991     s->current_picture.pict_type= s->pict_type;
992     s->current_picture.key_frame= s->pict_type == I_TYPE;
993     
994     /* copy back current_picture variables */
995     for(i=0; i<MAX_PICTURE_COUNT; i++){
996         if(s->picture[i].data[0] == s->current_picture.data[0]){
997             s->picture[i]= s->current_picture;
998             break;
999         }    
1000     }
1001     assert(i<MAX_PICTURE_COUNT);
1002
1003     /* release non refernce frames */
1004     for(i=0; i<MAX_PICTURE_COUNT; i++){
1005         if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/)
1006             s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
1007     }
1008     if(s->avctx->debug&FF_DEBUG_SKIP){
1009         int x,y;        
1010         for(y=0; y<s->mb_height; y++){
1011             for(x=0; x<s->mb_width; x++){
1012                 int count= s->mbskip_table[x + y*s->mb_width];
1013                 if(count>9) count=9;
1014                 printf(" %1d", count);
1015             }
1016             printf("\n");
1017         }
1018         printf("pict type: %d\n", s->pict_type);
1019     }
1020 }
1021
1022 static int get_sae(uint8_t *src, int ref, int stride){
1023     int x,y;
1024     int acc=0;
1025     
1026     for(y=0; y<16; y++){
1027         for(x=0; x<16; x++){
1028             acc+= ABS(src[x+y*stride] - ref);
1029         }
1030     }
1031     
1032     return acc;
1033 }
1034
1035 static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride){
1036     int x, y, w, h;
1037     int acc=0;
1038     
1039     w= s->width &~15;
1040     h= s->height&~15;
1041     
1042     for(y=0; y<h; y+=16){
1043         for(x=0; x<w; x+=16){
1044             int offset= x + y*stride;
1045             int sad = s->dsp.pix_abs16x16(src + offset, ref + offset, stride);
1046             int mean= (s->dsp.pix_sum(src + offset, stride) + 128)>>8;
1047             int sae = get_sae(src + offset, mean, stride);
1048             
1049             acc+= sae + 500 < sad;
1050         }
1051     }
1052     return acc;
1053 }
1054
1055
1056 static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
1057     AVFrame *pic;
1058     int i;
1059     const int encoding_delay= s->max_b_frames;
1060     int direct=1;
1061
1062     if(encoding_delay && !(s->flags&CODEC_FLAG_INPUT_PRESERVED)) direct=0;
1063     if(pic_arg->linesize[0] != s->linesize) direct=0;
1064     if(pic_arg->linesize[1] != s->uvlinesize) direct=0;
1065     if(pic_arg->linesize[2] != s->uvlinesize) direct=0;
1066   
1067 //    printf("%d %d %d %d\n",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize);
1068     
1069     if(direct){
1070         i= find_unused_picture(s, 1);
1071
1072         pic= (AVFrame*)&s->picture[i];
1073         pic->reference= 1;
1074     
1075         for(i=0; i<4; i++){
1076             pic->data[i]= pic_arg->data[i];
1077             pic->linesize[i]= pic_arg->linesize[i];
1078         }
1079         alloc_picture(s, (Picture*)pic, 1);
1080     }else{
1081         i= find_unused_picture(s, 0);
1082
1083         pic= (AVFrame*)&s->picture[i];
1084         pic->reference= 1;
1085
1086         alloc_picture(s, (Picture*)pic, 0);
1087
1088         if(   pic->data[0] == pic_arg->data[0] 
1089            && pic->data[1] == pic_arg->data[1]
1090            && pic->data[2] == pic_arg->data[2]){
1091        // empty
1092         }else{
1093             int h_chroma_shift, v_chroma_shift;
1094         
1095             avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1096         
1097             for(i=0; i<3; i++){
1098                 int src_stride= pic_arg->linesize[i];
1099                 int dst_stride= i ? s->uvlinesize : s->linesize;
1100                 int h_shift= i ? h_chroma_shift : 0;
1101                 int v_shift= i ? v_chroma_shift : 0;
1102                 int w= s->width >>h_shift;
1103                 int h= s->height>>v_shift;
1104                 uint8_t *src= pic_arg->data[i];
1105                 uint8_t *dst= pic->data[i];
1106             
1107                 if(src_stride==dst_stride)
1108                     memcpy(dst, src, src_stride*h);
1109                 else{
1110                     while(h--){
1111                         memcpy(dst, src, w);
1112                         dst += dst_stride;
1113                         src += src_stride;
1114                     }
1115                 }
1116             }
1117         }
1118     }
1119     pic->quality= pic_arg->quality;
1120     pic->pict_type= pic_arg->pict_type;
1121     pic->pts = pic_arg->pts;
1122     
1123     if(s->input_picture[encoding_delay])
1124         pic->display_picture_number= s->input_picture[encoding_delay]->display_picture_number + 1;
1125
1126     /* shift buffer entries */
1127     for(i=1; i<MAX_PICTURE_COUNT /*s->encoding_delay+1*/; i++)
1128         s->input_picture[i-1]= s->input_picture[i];
1129         
1130     s->input_picture[encoding_delay]= (Picture*)pic;
1131
1132     return 0;
1133 }
1134
1135 static void select_input_picture(MpegEncContext *s){
1136     int i;
1137     const int encoding_delay= s->max_b_frames;
1138     int coded_pic_num=0;    
1139
1140     if(s->reordered_input_picture[0])
1141         coded_pic_num= s->reordered_input_picture[0]->coded_picture_number + 1;
1142
1143     for(i=1; i<MAX_PICTURE_COUNT; i++)
1144         s->reordered_input_picture[i-1]= s->reordered_input_picture[i];
1145     s->reordered_input_picture[MAX_PICTURE_COUNT-1]= NULL;
1146
1147     /* set next picture types & ordering */
1148     if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){
1149         if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture.data[0]==NULL || s->intra_only){
1150             s->reordered_input_picture[0]= s->input_picture[0];
1151             s->reordered_input_picture[0]->pict_type= I_TYPE;
1152             s->reordered_input_picture[0]->coded_picture_number= coded_pic_num;
1153         }else{
1154             int b_frames;
1155             
1156             if(s->flags&CODEC_FLAG_PASS2){
1157                 for(i=0; i<s->max_b_frames+1; i++){
1158                     int pict_num= s->input_picture[0]->display_picture_number + i;
1159                     int pict_type= s->rc_context.entry[pict_num].new_pict_type;
1160                     s->input_picture[i]->pict_type= pict_type;
1161                     
1162                     if(i + 1 >= s->rc_context.num_entries) break;
1163                 }
1164             }
1165
1166             if(s->input_picture[0]->pict_type){
1167                 /* user selected pict_type */
1168                 for(b_frames=0; b_frames<s->max_b_frames+1; b_frames++){
1169                     if(s->input_picture[b_frames]->pict_type!=B_TYPE) break;
1170                 }
1171             
1172                 if(b_frames > s->max_b_frames){
1173                     fprintf(stderr, "warning, too many bframes in a row\n");
1174                     b_frames = s->max_b_frames;
1175                 }
1176             }else if(s->b_frame_strategy==0){
1177                 b_frames= s->max_b_frames;
1178             }else if(s->b_frame_strategy==1){
1179                 for(i=1; i<s->max_b_frames+1; i++){
1180                     if(s->input_picture[i]->b_frame_score==0){
1181                         s->input_picture[i]->b_frame_score= 
1182                             get_intra_count(s, s->input_picture[i  ]->data[0], 
1183                                                s->input_picture[i-1]->data[0], s->linesize) + 1;
1184                     }
1185                 }
1186                 for(i=0; i<s->max_b_frames; i++){
1187                     if(s->input_picture[i]->b_frame_score - 1 > s->mb_num/40) break;
1188                 }
1189                                 
1190                 b_frames= FFMAX(0, i-1);
1191                 
1192                 /* reset scores */
1193                 for(i=0; i<b_frames+1; i++){
1194                     s->input_picture[i]->b_frame_score=0;
1195                 }
1196             }else{
1197                 fprintf(stderr, "illegal b frame strategy\n");
1198                 b_frames=0;
1199             }
1200
1201             emms_c();
1202 //static int b_count=0;
1203 //b_count+= b_frames;
1204 //printf("b_frames: %d\n", b_count);
1205                         
1206             s->reordered_input_picture[0]= s->input_picture[b_frames];
1207             if(   s->picture_in_gop_number + b_frames >= s->gop_size 
1208                || s->reordered_input_picture[0]->pict_type== I_TYPE)
1209                 s->reordered_input_picture[0]->pict_type= I_TYPE;
1210             else
1211                 s->reordered_input_picture[0]->pict_type= P_TYPE;
1212             s->reordered_input_picture[0]->coded_picture_number= coded_pic_num;
1213             for(i=0; i<b_frames; i++){
1214                 coded_pic_num++;
1215                 s->reordered_input_picture[i+1]= s->input_picture[i];
1216                 s->reordered_input_picture[i+1]->pict_type= B_TYPE;
1217                 s->reordered_input_picture[i+1]->coded_picture_number= coded_pic_num;
1218             }
1219         }
1220     }
1221     
1222     if(s->reordered_input_picture[0]){
1223        s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE;
1224
1225         if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED){
1226             int i= find_unused_picture(s, 0);
1227             Picture *pic= &s->picture[i];
1228
1229             s->new_picture= *s->reordered_input_picture[0];
1230
1231             /* mark us unused / free shared pic */
1232             for(i=0; i<4; i++)
1233                 s->reordered_input_picture[0]->data[i]= NULL;
1234             s->reordered_input_picture[0]->type= 0;
1235             
1236             pic->pict_type = s->reordered_input_picture[0]->pict_type;
1237             pic->quality   = s->reordered_input_picture[0]->quality;
1238             pic->coded_picture_number = s->reordered_input_picture[0]->coded_picture_number;
1239             pic->reference = s->reordered_input_picture[0]->reference;
1240             
1241             alloc_picture(s, pic, 0);
1242
1243             s->current_picture= *pic;
1244         }else{
1245             assert(   s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_USER 
1246                    || s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL);
1247             
1248             s->new_picture= *s->reordered_input_picture[0];
1249
1250             for(i=0; i<4; i++){
1251                 s->reordered_input_picture[0]->data[i]-=16; //FIXME dirty
1252             }
1253             s->current_picture= *s->reordered_input_picture[0];
1254         }
1255     
1256         s->picture_number= s->new_picture.display_picture_number;
1257 //printf("dpn:%d\n", s->picture_number);
1258     }else{
1259        memset(&s->new_picture, 0, sizeof(Picture));
1260     }
1261 }
1262
1263 int MPV_encode_picture(AVCodecContext *avctx,
1264                        unsigned char *buf, int buf_size, void *data)
1265 {
1266     MpegEncContext *s = avctx->priv_data;
1267     AVFrame *pic_arg = data;
1268     int i;
1269
1270     init_put_bits(&s->pb, buf, buf_size, NULL, NULL);
1271
1272     s->picture_in_gop_number++;
1273
1274     load_input_picture(s, pic_arg);
1275     
1276     select_input_picture(s);
1277     
1278     /* output? */
1279     if(s->new_picture.data[0]){
1280
1281         s->pict_type= s->new_picture.pict_type;
1282         if (s->fixed_qscale){ /* the ratecontrol needs the last qscale so we dont touch it for CBR */
1283             s->qscale= (int)(s->new_picture.quality+0.5);
1284             assert(s->qscale);
1285         }
1286 //emms_c();
1287 //printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale);
1288         MPV_frame_start(s, avctx);
1289
1290         encode_picture(s, s->picture_number);
1291         
1292         avctx->real_pict_num  = s->picture_number;
1293         avctx->header_bits = s->header_bits;
1294         avctx->mv_bits     = s->mv_bits;
1295         avctx->misc_bits   = s->misc_bits;
1296         avctx->i_tex_bits  = s->i_tex_bits;
1297         avctx->p_tex_bits  = s->p_tex_bits;
1298         avctx->i_count     = s->i_count;
1299         avctx->p_count     = s->mb_num - s->i_count - s->skip_count; //FIXME f/b_count in avctx
1300         avctx->skip_count  = s->skip_count;
1301
1302         MPV_frame_end(s);
1303
1304         if (s->out_format == FMT_MJPEG)
1305             mjpeg_picture_trailer(s);
1306         
1307         if(s->flags&CODEC_FLAG_PASS1)
1308             ff_write_pass1_stats(s);
1309     }
1310
1311     s->input_picture_number++;
1312
1313     flush_put_bits(&s->pb);
1314     s->frame_bits  = (pbBufPtr(&s->pb) - s->pb.buf) * 8;
1315     
1316     s->total_bits += s->frame_bits;
1317     avctx->frame_bits  = s->frame_bits;
1318
1319     for(i=0; i<4; i++){
1320         avctx->error[i] += s->current_picture.error[i];
1321     }
1322     
1323     return pbBufPtr(&s->pb) - s->pb.buf;
1324 }
1325
1326 static inline void gmc1_motion(MpegEncContext *s,
1327                                UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
1328                                int dest_offset,
1329                                UINT8 **ref_picture, int src_offset)
1330 {
1331     UINT8 *ptr;
1332     int offset, src_x, src_y, linesize, uvlinesize;
1333     int motion_x, motion_y;
1334     int emu=0;
1335
1336     motion_x= s->sprite_offset[0][0];
1337     motion_y= s->sprite_offset[0][1];
1338     src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy+1));
1339     src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy+1));
1340     motion_x<<=(3-s->sprite_warping_accuracy);
1341     motion_y<<=(3-s->sprite_warping_accuracy);
1342     src_x = clip(src_x, -16, s->width);
1343     if (src_x == s->width)
1344         motion_x =0;
1345     src_y = clip(src_y, -16, s->height);
1346     if (src_y == s->height)
1347         motion_y =0;
1348
1349     linesize = s->linesize;
1350     uvlinesize = s->uvlinesize;
1351     
1352     ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset;
1353
1354     dest_y+=dest_offset;
1355     if(s->flags&CODEC_FLAG_EMU_EDGE){
1356         if(src_x<0 || src_y<0 || src_x + 17 >= s->h_edge_pos
1357                               || src_y + 17 >= s->v_edge_pos){
1358             ff_emulated_edge_mc(s, ptr, linesize, 17, 17, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
1359             ptr= s->edge_emu_buffer;
1360         }
1361     }
1362     
1363     if((motion_x|motion_y)&7){
1364         s->dsp.gmc1(dest_y  , ptr  , linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
1365         s->dsp.gmc1(dest_y+8, ptr+8, linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
1366     }else{
1367         int dxy;
1368         
1369         dxy= ((motion_x>>3)&1) | ((motion_y>>2)&2);
1370         if (s->no_rounding){
1371             s->dsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
1372         }else{
1373             s->dsp.put_pixels_tab       [0][dxy](dest_y, ptr, linesize, 16);
1374         }
1375     }
1376     
1377     if(s->flags&CODEC_FLAG_GRAY) return;
1378
1379     motion_x= s->sprite_offset[1][0];
1380     motion_y= s->sprite_offset[1][1];
1381     src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy+1));
1382     src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy+1));
1383     motion_x<<=(3-s->sprite_warping_accuracy);
1384     motion_y<<=(3-s->sprite_warping_accuracy);
1385     src_x = clip(src_x, -8, s->width>>1);
1386     if (src_x == s->width>>1)
1387         motion_x =0;
1388     src_y = clip(src_y, -8, s->height>>1);
1389     if (src_y == s->height>>1)
1390         motion_y =0;
1391
1392     offset = (src_y * uvlinesize) + src_x + (src_offset>>1);
1393     ptr = ref_picture[1] + offset;
1394     if(s->flags&CODEC_FLAG_EMU_EDGE){
1395         if(src_x<0 || src_y<0 || src_x + 9 >= s->h_edge_pos>>1
1396                               || src_y + 9 >= s->v_edge_pos>>1){
1397             ff_emulated_edge_mc(s, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1398             ptr= s->edge_emu_buffer;
1399             emu=1;
1400         }
1401     }
1402     s->dsp.gmc1(dest_cb + (dest_offset>>1), ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
1403     
1404     ptr = ref_picture[2] + offset;
1405     if(emu){
1406         ff_emulated_edge_mc(s, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1407         ptr= s->edge_emu_buffer;
1408     }
1409     s->dsp.gmc1(dest_cr + (dest_offset>>1), ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
1410     
1411     return;
1412 }
1413
1414 static inline void gmc_motion(MpegEncContext *s,
1415                                UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
1416                                int dest_offset,
1417                                UINT8 **ref_picture, int src_offset)
1418 {
1419     UINT8 *ptr;
1420     int linesize, uvlinesize;
1421     const int a= s->sprite_warping_accuracy;
1422     int ox, oy;
1423
1424     linesize = s->linesize;
1425     uvlinesize = s->uvlinesize;
1426
1427     ptr = ref_picture[0] + src_offset;
1428
1429     dest_y+=dest_offset;
1430     
1431     ox= s->sprite_offset[0][0] + s->sprite_delta[0][0]*s->mb_x*16 + s->sprite_delta[0][1]*s->mb_y*16;
1432     oy= s->sprite_offset[0][1] + s->sprite_delta[1][0]*s->mb_x*16 + s->sprite_delta[1][1]*s->mb_y*16;
1433
1434     s->dsp.gmc(dest_y, ptr, linesize, 16,
1435            ox, 
1436            oy, 
1437            s->sprite_delta[0][0], s->sprite_delta[0][1],
1438            s->sprite_delta[1][0], s->sprite_delta[1][1], 
1439            a+1, (1<<(2*a+1)) - s->no_rounding,
1440            s->h_edge_pos, s->v_edge_pos);
1441     s->dsp.gmc(dest_y+8, ptr, linesize, 16,
1442            ox + s->sprite_delta[0][0]*8, 
1443            oy + s->sprite_delta[1][0]*8, 
1444            s->sprite_delta[0][0], s->sprite_delta[0][1],
1445            s->sprite_delta[1][0], s->sprite_delta[1][1], 
1446            a+1, (1<<(2*a+1)) - s->no_rounding,
1447            s->h_edge_pos, s->v_edge_pos);
1448
1449     if(s->flags&CODEC_FLAG_GRAY) return;
1450
1451
1452     dest_cb+=dest_offset>>1;
1453     dest_cr+=dest_offset>>1;
1454     
1455     ox= s->sprite_offset[1][0] + s->sprite_delta[0][0]*s->mb_x*8 + s->sprite_delta[0][1]*s->mb_y*8;
1456     oy= s->sprite_offset[1][1] + s->sprite_delta[1][0]*s->mb_x*8 + s->sprite_delta[1][1]*s->mb_y*8;
1457
1458     ptr = ref_picture[1] + (src_offset>>1);
1459     s->dsp.gmc(dest_cb, ptr, uvlinesize, 8,
1460            ox, 
1461            oy, 
1462            s->sprite_delta[0][0], s->sprite_delta[0][1],
1463            s->sprite_delta[1][0], s->sprite_delta[1][1], 
1464            a+1, (1<<(2*a+1)) - s->no_rounding,
1465            s->h_edge_pos>>1, s->v_edge_pos>>1);
1466     
1467     ptr = ref_picture[2] + (src_offset>>1);
1468     s->dsp.gmc(dest_cr, ptr, uvlinesize, 8,
1469            ox, 
1470            oy, 
1471            s->sprite_delta[0][0], s->sprite_delta[0][1],
1472            s->sprite_delta[1][0], s->sprite_delta[1][1], 
1473            a+1, (1<<(2*a+1)) - s->no_rounding,
1474            s->h_edge_pos>>1, s->v_edge_pos>>1);
1475 }
1476
1477
1478 void ff_emulated_edge_mc(MpegEncContext *s, UINT8 *src, int linesize, int block_w, int block_h, 
1479                                     int src_x, int src_y, int w, int h){
1480     int x, y;
1481     int start_y, start_x, end_y, end_x;
1482     UINT8 *buf= s->edge_emu_buffer;
1483
1484     if(src_y>= h){
1485         src+= (h-1-src_y)*linesize;
1486         src_y=h-1;
1487     }else if(src_y<=-block_h){
1488         src+= (1-block_h-src_y)*linesize;
1489         src_y=1-block_h;
1490     }
1491     if(src_x>= w){
1492         src+= (w-1-src_x);
1493         src_x=w-1;
1494     }else if(src_x<=-block_w){
1495         src+= (1-block_w-src_x);
1496         src_x=1-block_w;
1497     }
1498
1499     start_y= FFMAX(0, -src_y);
1500     start_x= FFMAX(0, -src_x);
1501     end_y= FFMIN(block_h, h-src_y);
1502     end_x= FFMIN(block_w, w-src_x);
1503
1504     // copy existing part
1505     for(y=start_y; y<end_y; y++){
1506         for(x=start_x; x<end_x; x++){
1507             buf[x + y*linesize]= src[x + y*linesize];
1508         }
1509     }
1510
1511     //top
1512     for(y=0; y<start_y; y++){
1513         for(x=start_x; x<end_x; x++){
1514             buf[x + y*linesize]= buf[x + start_y*linesize];
1515         }
1516     }
1517
1518     //bottom
1519     for(y=end_y; y<block_h; y++){
1520         for(x=start_x; x<end_x; x++){
1521             buf[x + y*linesize]= buf[x + (end_y-1)*linesize];
1522         }
1523     }
1524                                     
1525     for(y=0; y<block_h; y++){
1526        //left
1527         for(x=0; x<start_x; x++){
1528             buf[x + y*linesize]= buf[start_x + y*linesize];
1529         }
1530        
1531        //right
1532         for(x=end_x; x<block_w; x++){
1533             buf[x + y*linesize]= buf[end_x - 1 + y*linesize];
1534         }
1535     }
1536 }
1537
1538
1539 /* apply one mpeg motion vector to the three components */
1540 static inline void mpeg_motion(MpegEncContext *s,
1541                                UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
1542                                int dest_offset,
1543                                UINT8 **ref_picture, int src_offset,
1544                                int field_based, op_pixels_func (*pix_op)[4],
1545                                int motion_x, int motion_y, int h)
1546 {
1547     UINT8 *ptr;
1548     int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize;
1549     int emu=0;
1550 #if 0    
1551 if(s->quarter_sample)
1552 {
1553     motion_x>>=1;
1554     motion_y>>=1;
1555 }
1556 #endif
1557     dxy = ((motion_y & 1) << 1) | (motion_x & 1);
1558     src_x = s->mb_x * 16 + (motion_x >> 1);
1559     src_y = s->mb_y * (16 >> field_based) + (motion_y >> 1);
1560                 
1561     /* WARNING: do no forget half pels */
1562     height = s->height >> field_based;
1563     v_edge_pos = s->v_edge_pos >> field_based;
1564     src_x = clip(src_x, -16, s->width);
1565     if (src_x == s->width)
1566         dxy &= ~1;
1567     src_y = clip(src_y, -16, height);
1568     if (src_y == height)
1569         dxy &= ~2;
1570     linesize   = s->linesize << field_based;
1571     uvlinesize = s->uvlinesize << field_based;
1572     ptr = ref_picture[0] + (src_y * linesize) + (src_x) + src_offset;
1573     dest_y += dest_offset;
1574
1575     if(s->flags&CODEC_FLAG_EMU_EDGE){
1576         if(src_x<0 || src_y<0 || src_x + (motion_x&1) + 16 > s->h_edge_pos
1577                               || src_y + (motion_y&1) + h  > v_edge_pos){
1578             ff_emulated_edge_mc(s, ptr - src_offset, s->linesize, 17, 17+field_based, 
1579                              src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
1580             ptr= s->edge_emu_buffer + src_offset;
1581             emu=1;
1582         }
1583     }
1584     pix_op[0][dxy](dest_y, ptr, linesize, h);
1585
1586     if(s->flags&CODEC_FLAG_GRAY) return;
1587
1588     if (s->out_format == FMT_H263) {
1589         dxy = 0;
1590         if ((motion_x & 3) != 0)
1591             dxy |= 1;
1592         if ((motion_y & 3) != 0)
1593             dxy |= 2;
1594         mx = motion_x >> 2;
1595         my = motion_y >> 2;
1596     } else {
1597         mx = motion_x / 2;
1598         my = motion_y / 2;
1599         dxy = ((my & 1) << 1) | (mx & 1);
1600         mx >>= 1;
1601         my >>= 1;
1602     }
1603     
1604     src_x = s->mb_x * 8 + mx;
1605     src_y = s->mb_y * (8 >> field_based) + my;
1606     src_x = clip(src_x, -8, s->width >> 1);
1607     if (src_x == (s->width >> 1))
1608         dxy &= ~1;
1609     src_y = clip(src_y, -8, height >> 1);
1610     if (src_y == (height >> 1))
1611         dxy &= ~2;
1612     offset = (src_y * uvlinesize) + src_x + (src_offset >> 1);
1613     ptr = ref_picture[1] + offset;
1614     if(emu){
1615         ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based, 
1616                          src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
1617         ptr= s->edge_emu_buffer + (src_offset >> 1);
1618     }
1619     pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
1620
1621     ptr = ref_picture[2] + offset;
1622     if(emu){
1623         ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based, 
1624                          src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
1625         ptr= s->edge_emu_buffer + (src_offset >> 1);
1626     }
1627     pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
1628 }
1629
1630 static inline void qpel_motion(MpegEncContext *s,
1631                                UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
1632                                int dest_offset,
1633                                UINT8 **ref_picture, int src_offset,
1634                                int field_based, op_pixels_func (*pix_op)[4],
1635                                qpel_mc_func (*qpix_op)[16],
1636                                int motion_x, int motion_y, int h)
1637 {
1638     UINT8 *ptr;
1639     int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize;
1640     int emu=0;
1641
1642     dxy = ((motion_y & 3) << 2) | (motion_x & 3);
1643     src_x = s->mb_x * 16 + (motion_x >> 2);
1644     src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
1645
1646     height = s->height >> field_based;
1647     v_edge_pos = s->v_edge_pos >> field_based;
1648     src_x = clip(src_x, -16, s->width);
1649     if (src_x == s->width)
1650         dxy &= ~3;
1651     src_y = clip(src_y, -16, height);
1652     if (src_y == height)
1653         dxy &= ~12;
1654     linesize = s->linesize << field_based;
1655     uvlinesize = s->uvlinesize << field_based;
1656     ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset;
1657     dest_y += dest_offset;
1658 //printf("%d %d %d\n", src_x, src_y, dxy);
1659     
1660     if(s->flags&CODEC_FLAG_EMU_EDGE){
1661         if(src_x<0 || src_y<0 || src_x + (motion_x&3) + 16 > s->h_edge_pos
1662                               || src_y + (motion_y&3) + h  > v_edge_pos){
1663             ff_emulated_edge_mc(s, ptr - src_offset, s->linesize, 17, 17+field_based, 
1664                              src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
1665             ptr= s->edge_emu_buffer + src_offset;
1666             emu=1;
1667         }
1668     }
1669     if(!field_based)
1670         qpix_op[0][dxy](dest_y, ptr, linesize);
1671     else{
1672         //damn interlaced mode
1673         //FIXME boundary mirroring is not exactly correct here
1674         qpix_op[1][dxy](dest_y  , ptr  , linesize);
1675         qpix_op[1][dxy](dest_y+8, ptr+8, linesize);
1676     }
1677
1678     if(s->flags&CODEC_FLAG_GRAY) return;
1679
1680     if(field_based){
1681         mx= motion_x/2;
1682         my= motion_y>>1;
1683     }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA){
1684         mx= (motion_x>>1)|(motion_x&1);
1685         my= (motion_y>>1)|(motion_y&1);
1686     }else{
1687         mx= motion_x/2;
1688         my= motion_y/2;
1689     }
1690     mx= (mx>>1)|(mx&1);
1691     my= (my>>1)|(my&1);
1692     dxy= (mx&1) | ((my&1)<<1);
1693     mx>>=1;
1694     my>>=1;
1695
1696     src_x = s->mb_x * 8 + mx;
1697     src_y = s->mb_y * (8 >> field_based) + my;
1698     src_x = clip(src_x, -8, s->width >> 1);
1699     if (src_x == (s->width >> 1))
1700         dxy &= ~1;
1701     src_y = clip(src_y, -8, height >> 1);
1702     if (src_y == (height >> 1))
1703         dxy &= ~2;
1704
1705     offset = (src_y * uvlinesize) + src_x + (src_offset >> 1);
1706     ptr = ref_picture[1] + offset;
1707     if(emu){
1708         ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based, 
1709                          src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
1710         ptr= s->edge_emu_buffer + (src_offset >> 1);
1711     }
1712     pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr,  uvlinesize, h >> 1);
1713     
1714     ptr = ref_picture[2] + offset;
1715     if(emu){
1716         ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based, 
1717                          src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
1718         ptr= s->edge_emu_buffer + (src_offset >> 1);
1719     }
1720     pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr,  uvlinesize, h >> 1);
1721 }
1722
1723 inline int ff_h263_round_chroma(int x){
1724     if (x >= 0)
1725         return  (h263_chroma_roundtab[x & 0xf] + ((x >> 3) & ~1));
1726     else {
1727         x = -x;
1728         return -(h263_chroma_roundtab[x & 0xf] + ((x >> 3) & ~1));
1729     }
1730 }
1731
1732 static inline void MPV_motion(MpegEncContext *s, 
1733                               UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
1734                               int dir, UINT8 **ref_picture, 
1735                               op_pixels_func (*pix_op)[4], qpel_mc_func (*qpix_op)[16])
1736 {
1737     int dxy, offset, mx, my, src_x, src_y, motion_x, motion_y;
1738     int mb_x, mb_y, i;
1739     UINT8 *ptr, *dest;
1740     int emu=0;
1741
1742     mb_x = s->mb_x;
1743     mb_y = s->mb_y;
1744
1745     switch(s->mv_type) {
1746     case MV_TYPE_16X16:
1747 #ifdef CONFIG_RISKY
1748         if(s->mcsel){
1749             if(s->real_sprite_warping_points==1){
1750                 gmc1_motion(s, dest_y, dest_cb, dest_cr, 0,
1751                             ref_picture, 0);
1752             }else{
1753                 gmc_motion(s, dest_y, dest_cb, dest_cr, 0,
1754                             ref_picture, 0);
1755             }
1756         }else if(s->quarter_sample){
1757             qpel_motion(s, dest_y, dest_cb, dest_cr, 0,
1758                         ref_picture, 0,
1759                         0, pix_op, qpix_op,
1760                         s->mv[dir][0][0], s->mv[dir][0][1], 16);
1761         }else if(s->mspel){
1762             ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
1763                         ref_picture, pix_op,
1764                         s->mv[dir][0][0], s->mv[dir][0][1], 16);
1765         }else
1766 #endif
1767         {
1768             mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
1769                         ref_picture, 0,
1770                         0, pix_op,
1771                         s->mv[dir][0][0], s->mv[dir][0][1], 16);
1772         }           
1773         break;
1774     case MV_TYPE_8X8:
1775         mx = 0;
1776         my = 0;
1777         if(s->quarter_sample){
1778             for(i=0;i<4;i++) {
1779                 motion_x = s->mv[dir][i][0];
1780                 motion_y = s->mv[dir][i][1];
1781
1782                 dxy = ((motion_y & 3) << 2) | (motion_x & 3);
1783                 src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
1784                 src_y = mb_y * 16 + (motion_y >> 2) + (i >>1) * 8;
1785                     
1786                 /* WARNING: do no forget half pels */
1787                 src_x = clip(src_x, -16, s->width);
1788                 if (src_x == s->width)
1789                     dxy &= ~3;
1790                 src_y = clip(src_y, -16, s->height);
1791                 if (src_y == s->height)
1792                     dxy &= ~12;
1793                     
1794                 ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
1795                 if(s->flags&CODEC_FLAG_EMU_EDGE){
1796                     if(src_x<0 || src_y<0 || src_x + (motion_x&3) + 8 > s->h_edge_pos
1797                                           || src_y + (motion_y&3) + 8 > s->v_edge_pos){
1798                         ff_emulated_edge_mc(s, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
1799                         ptr= s->edge_emu_buffer;
1800                     }
1801                 }
1802                 dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
1803                 qpix_op[1][dxy](dest, ptr, s->linesize);
1804
1805                 mx += s->mv[dir][i][0]/2;
1806                 my += s->mv[dir][i][1]/2;
1807             }
1808         }else{
1809             for(i=0;i<4;i++) {
1810                 motion_x = s->mv[dir][i][0];
1811                 motion_y = s->mv[dir][i][1];
1812
1813                 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
1814                 src_x = mb_x * 16 + (motion_x >> 1) + (i & 1) * 8;
1815                 src_y = mb_y * 16 + (motion_y >> 1) + (i >>1) * 8;
1816                     
1817                 /* WARNING: do no forget half pels */
1818                 src_x = clip(src_x, -16, s->width);
1819                 if (src_x == s->width)
1820                     dxy &= ~1;
1821                 src_y = clip(src_y, -16, s->height);
1822                 if (src_y == s->height)
1823                     dxy &= ~2;
1824                     
1825                 ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
1826                 if(s->flags&CODEC_FLAG_EMU_EDGE){
1827                     if(src_x<0 || src_y<0 || src_x + (motion_x&1) + 8 > s->h_edge_pos
1828                                           || src_y + (motion_y&1) + 8 > s->v_edge_pos){
1829                         ff_emulated_edge_mc(s, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
1830                         ptr= s->edge_emu_buffer;
1831                     }
1832                 }
1833                 dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
1834                 pix_op[1][dxy](dest, ptr, s->linesize, 8);
1835
1836                 mx += s->mv[dir][i][0];
1837                 my += s->mv[dir][i][1];
1838             }
1839         }
1840
1841         if(s->flags&CODEC_FLAG_GRAY) break;
1842         /* In case of 8X8, we construct a single chroma motion vector
1843            with a special rounding */
1844         mx= ff_h263_round_chroma(mx);
1845         my= ff_h263_round_chroma(my);
1846         dxy = ((my & 1) << 1) | (mx & 1);
1847         mx >>= 1;
1848         my >>= 1;
1849
1850         src_x = mb_x * 8 + mx;
1851         src_y = mb_y * 8 + my;
1852         src_x = clip(src_x, -8, s->width/2);
1853         if (src_x == s->width/2)
1854             dxy &= ~1;
1855         src_y = clip(src_y, -8, s->height/2);
1856         if (src_y == s->height/2)
1857             dxy &= ~2;
1858         
1859         offset = (src_y * (s->uvlinesize)) + src_x;
1860         ptr = ref_picture[1] + offset;
1861         if(s->flags&CODEC_FLAG_EMU_EDGE){
1862                 if(src_x<0 || src_y<0 || src_x + (dxy &1) + 8 > s->h_edge_pos>>1
1863                                       || src_y + (dxy>>1) + 8 > s->v_edge_pos>>1){
1864                     ff_emulated_edge_mc(s, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1865                     ptr= s->edge_emu_buffer;
1866                     emu=1;
1867                 }
1868             }
1869         pix_op[1][dxy](dest_cb, ptr, s->uvlinesize, 8);
1870
1871         ptr = ref_picture[2] + offset;
1872         if(emu){
1873             ff_emulated_edge_mc(s, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1874             ptr= s->edge_emu_buffer;
1875         }
1876         pix_op[1][dxy](dest_cr, ptr, s->uvlinesize, 8);
1877         break;
1878     case MV_TYPE_FIELD:
1879         if (s->picture_structure == PICT_FRAME) {
1880             if(s->quarter_sample){
1881                 /* top field */
1882                 qpel_motion(s, dest_y, dest_cb, dest_cr, 0,
1883                             ref_picture, s->field_select[dir][0] ? s->linesize : 0,
1884                             1, pix_op, qpix_op,
1885                             s->mv[dir][0][0], s->mv[dir][0][1], 8);
1886                 /* bottom field */
1887                 qpel_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
1888                             ref_picture, s->field_select[dir][1] ? s->linesize : 0,
1889                             1, pix_op, qpix_op,
1890                             s->mv[dir][1][0], s->mv[dir][1][1], 8);
1891             }else{
1892                 /* top field */       
1893                 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
1894                             ref_picture, s->field_select[dir][0] ? s->linesize : 0,
1895                             1, pix_op,
1896                             s->mv[dir][0][0], s->mv[dir][0][1], 8);
1897                 /* bottom field */
1898                 mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
1899                             ref_picture, s->field_select[dir][1] ? s->linesize : 0,
1900                             1, pix_op,
1901                             s->mv[dir][1][0], s->mv[dir][1][1], 8);
1902             }
1903         } else {
1904             
1905
1906         }
1907         break;
1908     }
1909 }
1910
1911
1912 /* put block[] to dest[] */
1913 static inline void put_dct(MpegEncContext *s, 
1914                            DCTELEM *block, int i, UINT8 *dest, int line_size)
1915 {
1916     s->dct_unquantize(s, block, i, s->qscale);
1917     s->idct_put (dest, line_size, block);
1918 }
1919
1920 /* add block[] to dest[] */
1921 static inline void add_dct(MpegEncContext *s, 
1922                            DCTELEM *block, int i, UINT8 *dest, int line_size)
1923 {
1924     if (s->block_last_index[i] >= 0) {
1925         s->idct_add (dest, line_size, block);
1926     }
1927 }
1928
1929 static inline void add_dequant_dct(MpegEncContext *s, 
1930                            DCTELEM *block, int i, UINT8 *dest, int line_size)
1931 {
1932     if (s->block_last_index[i] >= 0) {
1933         s->dct_unquantize(s, block, i, s->qscale);
1934
1935         s->idct_add (dest, line_size, block);
1936     }
1937 }
1938
1939 /**
1940  * cleans dc, ac, coded_block for the current non intra MB
1941  */
1942 void ff_clean_intra_table_entries(MpegEncContext *s)
1943 {
1944     int wrap = s->block_wrap[0];
1945     int xy = s->block_index[0];
1946     
1947     s->dc_val[0][xy           ] = 
1948     s->dc_val[0][xy + 1       ] = 
1949     s->dc_val[0][xy     + wrap] =
1950     s->dc_val[0][xy + 1 + wrap] = 1024;
1951     /* ac pred */
1952     memset(s->ac_val[0][xy       ], 0, 32 * sizeof(INT16));
1953     memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(INT16));
1954     if (s->msmpeg4_version>=3) {
1955         s->coded_block[xy           ] =
1956         s->coded_block[xy + 1       ] =
1957         s->coded_block[xy     + wrap] =
1958         s->coded_block[xy + 1 + wrap] = 0;
1959     }
1960     /* chroma */
1961     wrap = s->block_wrap[4];
1962     xy = s->mb_x + 1 + (s->mb_y + 1) * wrap;
1963     s->dc_val[1][xy] =
1964     s->dc_val[2][xy] = 1024;
1965     /* ac pred */
1966     memset(s->ac_val[1][xy], 0, 16 * sizeof(INT16));
1967     memset(s->ac_val[2][xy], 0, 16 * sizeof(INT16));
1968     
1969     s->mbintra_table[s->mb_x + s->mb_y*s->mb_width]= 0;
1970 }
1971
1972 /* generic function called after a macroblock has been parsed by the
1973    decoder or after it has been encoded by the encoder.
1974
1975    Important variables used:
1976    s->mb_intra : true if intra macroblock
1977    s->mv_dir   : motion vector direction
1978    s->mv_type  : motion vector type
1979    s->mv       : motion vector
1980    s->interlaced_dct : true if interlaced dct used (mpeg2)
1981  */
1982 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
1983 {
1984     int mb_x, mb_y;
1985     const int mb_xy = s->mb_y * s->mb_width + s->mb_x;
1986
1987     mb_x = s->mb_x;
1988     mb_y = s->mb_y;
1989
1990     s->current_picture.qscale_table[mb_xy]= s->qscale;
1991
1992     /* update DC predictors for P macroblocks */
1993     if (!s->mb_intra) {
1994         if (s->h263_pred || s->h263_aic) {
1995             if(s->mbintra_table[mb_xy])
1996                 ff_clean_intra_table_entries(s);
1997         } else {
1998             s->last_dc[0] =
1999             s->last_dc[1] =
2000             s->last_dc[2] = 128 << s->intra_dc_precision;
2001         }
2002     }
2003     else if (s->h263_pred || s->h263_aic)
2004         s->mbintra_table[mb_xy]=1;
2005
2006     /* update motion predictor, not for B-frames as they need the motion_val from the last P/S-Frame */
2007     if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE) { //FIXME move into h263.c if possible, format specific stuff shouldnt be here
2008         //FIXME a lot of thet is only needed for !low_delay
2009         const int wrap = s->block_wrap[0];
2010         const int xy = s->block_index[0];
2011         const int mb_index= s->mb_x + s->mb_y*s->mb_width;
2012         if(s->mv_type == MV_TYPE_8X8){
2013             s->co_located_type_table[mb_index]= CO_LOCATED_TYPE_4MV;
2014         } else {
2015             int motion_x, motion_y;
2016             if (s->mb_intra) {
2017                 motion_x = 0;
2018                 motion_y = 0;
2019                 if(s->co_located_type_table)
2020                     s->co_located_type_table[mb_index]= 0;
2021             } else if (s->mv_type == MV_TYPE_16X16) {
2022                 motion_x = s->mv[0][0][0];
2023                 motion_y = s->mv[0][0][1];
2024                 if(s->co_located_type_table)
2025                     s->co_located_type_table[mb_index]= 0;
2026             } else /*if (s->mv_type == MV_TYPE_FIELD)*/ {
2027                 int i;
2028                 motion_x = s->mv[0][0][0] + s->mv[0][1][0];
2029                 motion_y = s->mv[0][0][1] + s->mv[0][1][1];
2030                 motion_x = (motion_x>>1) | (motion_x&1);
2031                 for(i=0; i<2; i++){
2032                     s->field_mv_table[mb_index][i][0]= s->mv[0][i][0];
2033                     s->field_mv_table[mb_index][i][1]= s->mv[0][i][1];
2034                     s->field_select_table[mb_index][i]= s->field_select[0][i];
2035                 }
2036                 s->co_located_type_table[mb_index]= CO_LOCATED_TYPE_FIELDMV;
2037             }
2038             /* no update if 8X8 because it has been done during parsing */
2039             s->motion_val[xy][0] = motion_x;
2040             s->motion_val[xy][1] = motion_y;
2041             s->motion_val[xy + 1][0] = motion_x;
2042             s->motion_val[xy + 1][1] = motion_y;
2043             s->motion_val[xy + wrap][0] = motion_x;
2044             s->motion_val[xy + wrap][1] = motion_y;
2045             s->motion_val[xy + 1 + wrap][0] = motion_x;
2046             s->motion_val[xy + 1 + wrap][1] = motion_y;
2047         }
2048     }
2049     
2050     if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==B_TYPE))) { //FIXME precalc
2051         UINT8 *dest_y, *dest_cb, *dest_cr;
2052         int dct_linesize, dct_offset;
2053         op_pixels_func (*op_pix)[4];
2054         qpel_mc_func (*op_qpix)[16];
2055
2056         /* avoid copy if macroblock skipped in last frame too */
2057         if (s->pict_type != B_TYPE) {
2058             s->current_picture.mbskip_table[mb_xy]= s->mb_skiped;
2059         }
2060
2061         /* skip only during decoding as we might trash the buffers during encoding a bit */
2062         if(!s->encoding){
2063             UINT8 *mbskip_ptr = &s->mbskip_table[mb_xy];
2064             const int age= s->current_picture.age;
2065
2066             assert(age);
2067
2068             if (s->mb_skiped) {
2069                 s->mb_skiped= 0;
2070                 assert(s->pict_type!=I_TYPE);
2071  
2072                 (*mbskip_ptr) ++; /* indicate that this time we skiped it */
2073                 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2074
2075                 /* if previous was skipped too, then nothing to do !  */
2076                 if (*mbskip_ptr >= age && s->current_picture.reference){
2077                     return;
2078                 }
2079             } else if(!s->current_picture.reference){
2080                 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
2081                 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2082             } else{
2083                 *mbskip_ptr = 0; /* not skipped */
2084             }
2085         }else
2086             s->mb_skiped= 0;
2087
2088         if(s->pict_type==B_TYPE && s->avctx->draw_horiz_band){
2089             dest_y  = s->current_picture.data[0] + mb_x * 16;
2090             dest_cb = s->current_picture.data[1] + mb_x * 8;
2091             dest_cr = s->current_picture.data[2] + mb_x * 8;
2092         }else{
2093             dest_y  = s->current_picture.data[0] + (mb_y * 16* s->linesize  ) + mb_x * 16;
2094             dest_cb = s->current_picture.data[1] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
2095             dest_cr = s->current_picture.data[2] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
2096         }
2097
2098         if (s->interlaced_dct) {
2099             dct_linesize = s->linesize * 2;
2100             dct_offset = s->linesize;
2101         } else {
2102             dct_linesize = s->linesize;
2103             dct_offset = s->linesize * 8;
2104         }
2105
2106         if (!s->mb_intra) {
2107             /* motion handling */
2108             /* decoding or more than one mb_type (MC was allready done otherwise) */
2109             if((!s->encoding) || (s->mb_type[mb_xy]&(s->mb_type[mb_xy]-1))){
2110                 if ((!s->no_rounding) || s->pict_type==B_TYPE){                
2111                     op_pix = s->dsp.put_pixels_tab;
2112                     op_qpix= s->dsp.put_qpel_pixels_tab;
2113                 }else{
2114                     op_pix = s->dsp.put_no_rnd_pixels_tab;
2115                     op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab;
2116                 }
2117
2118                 if (s->mv_dir & MV_DIR_FORWARD) {
2119                     MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
2120                     op_pix = s->dsp.avg_pixels_tab;
2121                     op_qpix= s->dsp.avg_qpel_pixels_tab;
2122                 }
2123                 if (s->mv_dir & MV_DIR_BACKWARD) {
2124                     MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
2125                 }
2126             }
2127
2128             /* skip dequant / idct if we are really late ;) */
2129             if(s->hurry_up>1) return;
2130
2131             /* add dct residue */
2132             if(s->encoding || !(   s->mpeg2 || s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO 
2133                                 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2134                 add_dequant_dct(s, block[0], 0, dest_y, dct_linesize);
2135                 add_dequant_dct(s, block[1], 1, dest_y + 8, dct_linesize);
2136                 add_dequant_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
2137                 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
2138
2139                 if(!(s->flags&CODEC_FLAG_GRAY)){
2140                     add_dequant_dct(s, block[4], 4, dest_cb, s->uvlinesize);
2141                     add_dequant_dct(s, block[5], 5, dest_cr, s->uvlinesize);
2142                 }
2143             } else if(s->codec_id != CODEC_ID_WMV2){
2144                 add_dct(s, block[0], 0, dest_y, dct_linesize);
2145                 add_dct(s, block[1], 1, dest_y + 8, dct_linesize);
2146                 add_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
2147                 add_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
2148
2149                 if(!(s->flags&CODEC_FLAG_GRAY)){
2150                     add_dct(s, block[4], 4, dest_cb, s->uvlinesize);
2151                     add_dct(s, block[5], 5, dest_cr, s->uvlinesize);
2152                 }
2153             } 
2154 #ifdef CONFIG_RISKY
2155             else{
2156                 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2157             }
2158 #endif
2159         } else {
2160             /* dct only in intra block */
2161             if(s->encoding || !(s->mpeg2 || s->codec_id==CODEC_ID_MPEG1VIDEO)){
2162                 put_dct(s, block[0], 0, dest_y, dct_linesize);
2163                 put_dct(s, block[1], 1, dest_y + 8, dct_linesize);
2164                 put_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
2165                 put_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
2166
2167                 if(!(s->flags&CODEC_FLAG_GRAY)){
2168                     put_dct(s, block[4], 4, dest_cb, s->uvlinesize);
2169                     put_dct(s, block[5], 5, dest_cr, s->uvlinesize);
2170                 }
2171             }else{
2172                 s->idct_put(dest_y                 , dct_linesize, block[0]);
2173                 s->idct_put(dest_y              + 8, dct_linesize, block[1]);
2174                 s->idct_put(dest_y + dct_offset    , dct_linesize, block[2]);
2175                 s->idct_put(dest_y + dct_offset + 8, dct_linesize, block[3]);
2176
2177                 if(!(s->flags&CODEC_FLAG_GRAY)){
2178                     s->idct_put(dest_cb, s->uvlinesize, block[4]);
2179                     s->idct_put(dest_cr, s->uvlinesize, block[5]);
2180                 }
2181             }
2182         }
2183     }
2184 }
2185
2186 static inline void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
2187 {
2188     static const char tab[64]=
2189         {3,2,2,1,1,1,1,1,
2190          1,1,1,1,1,1,1,1,
2191          1,1,1,1,1,1,1,1,
2192          0,0,0,0,0,0,0,0,
2193          0,0,0,0,0,0,0,0,
2194          0,0,0,0,0,0,0,0,
2195          0,0,0,0,0,0,0,0,
2196          0,0,0,0,0,0,0,0};
2197     int score=0;
2198     int run=0;
2199     int i;
2200     DCTELEM *block= s->block[n];
2201     const int last_index= s->block_last_index[n];
2202     int skip_dc;
2203
2204     if(threshold<0){
2205         skip_dc=0;
2206         threshold= -threshold;
2207     }else
2208         skip_dc=1;
2209
2210     /* are all which we could set to zero are allready zero? */
2211     if(last_index<=skip_dc - 1) return;
2212
2213     for(i=0; i<=last_index; i++){
2214         const int j = s->intra_scantable.permutated[i];
2215         const int level = ABS(block[j]);
2216         if(level==1){
2217             if(skip_dc && i==0) continue;
2218             score+= tab[run];
2219             run=0;
2220         }else if(level>1){
2221             return;
2222         }else{
2223             run++;
2224         }
2225     }
2226     if(score >= threshold) return;
2227     for(i=skip_dc; i<=last_index; i++){
2228         const int j = s->intra_scantable.permutated[i];
2229         block[j]=0;
2230     }
2231     if(block[0]) s->block_last_index[n]= 0;
2232     else         s->block_last_index[n]= -1;
2233 }
2234
2235 static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block, int last_index)
2236 {
2237     int i;
2238     const int maxlevel= s->max_qcoeff;
2239     const int minlevel= s->min_qcoeff;
2240     
2241     if(s->mb_intra){
2242         i=1; //skip clipping of intra dc
2243     }else
2244         i=0;
2245     
2246     for(;i<=last_index; i++){
2247         const int j= s->intra_scantable.permutated[i];
2248         int level = block[j];
2249        
2250         if     (level>maxlevel) level=maxlevel;
2251         else if(level<minlevel) level=minlevel;
2252         block[j]= level;
2253     }
2254 }
2255
2256 static inline void requantize_coeffs(MpegEncContext *s, DCTELEM block[64], int oldq, int newq, int n)
2257 {
2258     int i;
2259
2260     if(s->mb_intra){
2261         i=1; //skip clipping of intra dc
2262          //FIXME requantize, note (mpeg1/h263/h263p-aic dont need it,...)
2263     }else
2264         i=0;
2265     
2266     for(;i<=s->block_last_index[n]; i++){
2267         const int j = s->intra_scantable.permutated[i];
2268         int level = block[j];
2269         
2270         block[j]= ROUNDED_DIV(level*oldq, newq);
2271     }
2272
2273     for(i=s->block_last_index[n]; i>=0; i--){
2274         const int j = s->intra_scantable.permutated[i];
2275         if(block[j]) break;
2276     }
2277     s->block_last_index[n]= i;
2278 }
2279
2280 static inline void auto_requantize_coeffs(MpegEncContext *s, DCTELEM block[6][64])
2281 {
2282     int i,n, newq;
2283     const int maxlevel= s->max_qcoeff;
2284     const int minlevel= s->min_qcoeff;
2285     int largest=0, smallest=0;
2286
2287     assert(s->adaptive_quant);
2288     
2289     for(n=0; n<6; n++){
2290         if(s->mb_intra){
2291             i=1; //skip clipping of intra dc
2292              //FIXME requantize, note (mpeg1/h263/h263p-aic dont need it,...)
2293         }else
2294             i=0;
2295
2296         for(;i<=s->block_last_index[n]; i++){
2297             const int j = s->intra_scantable.permutated[i];
2298             int level = block[n][j];
2299             if(largest  < level) largest = level;
2300             if(smallest > level) smallest= level;
2301         }
2302     }
2303     
2304     for(newq=s->qscale+1; newq<32; newq++){
2305         if(   ROUNDED_DIV(smallest*s->qscale, newq) >= minlevel
2306            && ROUNDED_DIV(largest *s->qscale, newq) <= maxlevel) 
2307             break;
2308     }
2309         
2310     if(s->out_format==FMT_H263){
2311         /* h263 like formats cannot change qscale by more than 2 easiely */
2312         if(s->avctx->qmin + 2 < newq)
2313             newq= s->avctx->qmin + 2;
2314     }
2315
2316     for(n=0; n<6; n++){
2317         requantize_coeffs(s, block[n], s->qscale, newq, n);
2318         clip_coeffs(s, block[n], s->block_last_index[n]);
2319     }
2320      
2321     s->dquant+= newq - s->qscale;
2322     s->qscale= newq;
2323 }
2324 #if 0
2325 static int pix_vcmp16x8(UINT8 *s, int stride){ //FIXME move to dsputil & optimize
2326     int score=0;
2327     int x,y;
2328     
2329     for(y=0; y<7; y++){
2330         for(x=0; x<16; x+=4){
2331             score+= ABS(s[x  ] - s[x  +stride]) + ABS(s[x+1] - s[x+1+stride]) 
2332                    +ABS(s[x+2] - s[x+2+stride]) + ABS(s[x+3] - s[x+3+stride]);
2333         }
2334         s+= stride;
2335     }
2336     
2337     return score;
2338 }
2339
2340 static int pix_diff_vcmp16x8(UINT8 *s1, UINT8*s2, int stride){ //FIXME move to dsputil & optimize
2341     int score=0;
2342     int x,y;
2343     
2344     for(y=0; y<7; y++){
2345         for(x=0; x<16; x++){
2346             score+= ABS(s1[x  ] - s2[x ] - s1[x  +stride] + s2[x +stride]);
2347         }
2348         s1+= stride;
2349         s2+= stride;
2350     }
2351     
2352     return score;
2353 }
2354 #else
2355 #define SQ(a) ((a)*(a))
2356
2357 static int pix_vcmp16x8(UINT8 *s, int stride){ //FIXME move to dsputil & optimize
2358     int score=0;
2359     int x,y;
2360     
2361     for(y=0; y<7; y++){
2362         for(x=0; x<16; x+=4){
2363             score+= SQ(s[x  ] - s[x  +stride]) + SQ(s[x+1] - s[x+1+stride]) 
2364                    +SQ(s[x+2] - s[x+2+stride]) + SQ(s[x+3] - s[x+3+stride]);
2365         }
2366         s+= stride;
2367     }
2368     
2369     return score;
2370 }
2371
2372 static int pix_diff_vcmp16x8(UINT8 *s1, UINT8*s2, int stride){ //FIXME move to dsputil & optimize
2373     int score=0;
2374     int x,y;
2375     
2376     for(y=0; y<7; y++){
2377         for(x=0; x<16; x++){
2378             score+= SQ(s1[x  ] - s2[x ] - s1[x  +stride] + s2[x +stride]);
2379         }
2380         s1+= stride;
2381         s2+= stride;
2382     }
2383     
2384     return score;
2385 }
2386
2387 #endif
2388
2389 void ff_draw_horiz_band(MpegEncContext *s){
2390     if (    s->avctx->draw_horiz_band 
2391         && (s->last_picture.data[0] || s->low_delay) ) {
2392         UINT8 *src_ptr[3];
2393         int y, h, offset;
2394         y = s->mb_y * 16;
2395         h = s->height - y;
2396         if (h > 16)
2397             h = 16;
2398
2399         if(s->pict_type==B_TYPE)
2400             offset = 0;
2401         else
2402             offset = y * s->linesize;
2403
2404         if(s->pict_type==B_TYPE || s->low_delay){
2405             src_ptr[0] = s->current_picture.data[0] + offset;
2406             src_ptr[1] = s->current_picture.data[1] + (offset >> 2);
2407             src_ptr[2] = s->current_picture.data[2] + (offset >> 2);
2408         } else {
2409             src_ptr[0] = s->last_picture.data[0] + offset;
2410             src_ptr[1] = s->last_picture.data[1] + (offset >> 2);
2411             src_ptr[2] = s->last_picture.data[2] + (offset >> 2);
2412         }
2413         emms_c();
2414
2415         s->avctx->draw_horiz_band(s->avctx, src_ptr, s->linesize,
2416                                y, s->width, h);
2417     }
2418 }
2419
2420 static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2421 {
2422     const int mb_x= s->mb_x;
2423     const int mb_y= s->mb_y;
2424     int i;
2425     int skip_dct[6];
2426     int dct_offset   = s->linesize*8; //default for progressive frames
2427     
2428     for(i=0; i<6; i++) skip_dct[i]=0;
2429     
2430     if(s->adaptive_quant){
2431         s->dquant= s->current_picture.qscale_table[mb_x + mb_y*s->mb_width] - s->qscale;
2432
2433         if(s->out_format==FMT_H263){
2434             if     (s->dquant> 2) s->dquant= 2;
2435             else if(s->dquant<-2) s->dquant=-2;
2436         }
2437             
2438         if(s->codec_id==CODEC_ID_MPEG4){        
2439             if(!s->mb_intra){
2440                 assert(s->dquant==0 || s->mv_type!=MV_TYPE_8X8);
2441
2442                 if(s->mv_dir&MV_DIRECT)
2443                     s->dquant=0;
2444             }
2445         }
2446         s->qscale+= s->dquant;
2447         s->y_dc_scale= s->y_dc_scale_table[ s->qscale ];
2448         s->c_dc_scale= s->c_dc_scale_table[ s->qscale ];
2449     }
2450
2451     if (s->mb_intra) {
2452         UINT8 *ptr;
2453         int wrap_y;
2454         int emu=0;
2455
2456         wrap_y = s->linesize;
2457         ptr = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
2458
2459         if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
2460             ff_emulated_edge_mc(s, ptr, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
2461             ptr= s->edge_emu_buffer;
2462             emu=1;
2463         }
2464         
2465         if(s->flags&CODEC_FLAG_INTERLACED_DCT){
2466             int progressive_score, interlaced_score;
2467             
2468             progressive_score= pix_vcmp16x8(ptr, wrap_y  ) + pix_vcmp16x8(ptr + wrap_y*8, wrap_y );
2469             interlaced_score = pix_vcmp16x8(ptr, wrap_y*2) + pix_vcmp16x8(ptr + wrap_y  , wrap_y*2);
2470             
2471             if(progressive_score > interlaced_score + 100){
2472                 s->interlaced_dct=1;
2473             
2474                 dct_offset= wrap_y;
2475                 wrap_y<<=1;
2476             }else
2477                 s->interlaced_dct=0;
2478         }
2479         
2480         s->dsp.get_pixels(s->block[0], ptr                 , wrap_y);
2481         s->dsp.get_pixels(s->block[1], ptr              + 8, wrap_y);
2482         s->dsp.get_pixels(s->block[2], ptr + dct_offset    , wrap_y);
2483         s->dsp.get_pixels(s->block[3], ptr + dct_offset + 8, wrap_y);
2484
2485         if(s->flags&CODEC_FLAG_GRAY){
2486             skip_dct[4]= 1;
2487             skip_dct[5]= 1;
2488         }else{
2489             int wrap_c = s->uvlinesize;
2490             ptr = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8;
2491             if(emu){
2492                 ff_emulated_edge_mc(s, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2493                 ptr= s->edge_emu_buffer;
2494             }
2495             s->dsp.get_pixels(s->block[4], ptr, wrap_c);
2496
2497             ptr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8;
2498             if(emu){
2499                 ff_emulated_edge_mc(s, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2500                 ptr= s->edge_emu_buffer;
2501             }
2502             s->dsp.get_pixels(s->block[5], ptr, wrap_c);
2503         }
2504     }else{
2505         op_pixels_func (*op_pix)[4];
2506         qpel_mc_func (*op_qpix)[16];
2507         UINT8 *dest_y, *dest_cb, *dest_cr;
2508         UINT8 *ptr_y, *ptr_cb, *ptr_cr;
2509         int wrap_y, wrap_c;
2510         int emu=0;
2511
2512         dest_y  = s->current_picture.data[0] + (mb_y * 16 * s->linesize    ) + mb_x * 16;
2513         dest_cb = s->current_picture.data[1] + (mb_y * 8  * (s->uvlinesize)) + mb_x * 8;
2514         dest_cr = s->current_picture.data[2] + (mb_y * 8  * (s->uvlinesize)) + mb_x * 8;
2515         wrap_y = s->linesize;
2516         wrap_c = s->uvlinesize;
2517         ptr_y  = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
2518         ptr_cb = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8;
2519         ptr_cr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8;
2520
2521         if ((!s->no_rounding) || s->pict_type==B_TYPE){
2522             op_pix = s->dsp.put_pixels_tab;
2523             op_qpix= s->dsp.put_qpel_pixels_tab;
2524         }else{
2525             op_pix = s->dsp.put_no_rnd_pixels_tab;
2526             op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab;
2527         }
2528
2529         if (s->mv_dir & MV_DIR_FORWARD) {
2530             MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
2531             op_pix = s->dsp.avg_pixels_tab;
2532             op_qpix= s->dsp.avg_qpel_pixels_tab;
2533         }
2534         if (s->mv_dir & MV_DIR_BACKWARD) {
2535             MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
2536         }
2537
2538         if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
2539             ff_emulated_edge_mc(s, ptr_y, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
2540             ptr_y= s->edge_emu_buffer;
2541             emu=1;
2542         }
2543         
2544         if(s->flags&CODEC_FLAG_INTERLACED_DCT){
2545             int progressive_score, interlaced_score;
2546             
2547             progressive_score= pix_diff_vcmp16x8(ptr_y           , dest_y           , wrap_y  ) 
2548                              + pix_diff_vcmp16x8(ptr_y + wrap_y*8, dest_y + wrap_y*8, wrap_y  );
2549             interlaced_score = pix_diff_vcmp16x8(ptr_y           , dest_y           , wrap_y*2)
2550                              + pix_diff_vcmp16x8(ptr_y + wrap_y  , dest_y + wrap_y  , wrap_y*2);
2551             
2552             if(progressive_score > interlaced_score + 600){
2553                 s->interlaced_dct=1;
2554             
2555                 dct_offset= wrap_y;
2556                 wrap_y<<=1;
2557             }else
2558                 s->interlaced_dct=0;
2559         }
2560         
2561         s->dsp.diff_pixels(s->block[0], ptr_y                 , dest_y                 , wrap_y);
2562         s->dsp.diff_pixels(s->block[1], ptr_y              + 8, dest_y              + 8, wrap_y);
2563         s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset    , dest_y + dct_offset    , wrap_y);
2564         s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8, dest_y + dct_offset + 8, wrap_y);
2565         
2566         if(s->flags&CODEC_FLAG_GRAY){
2567             skip_dct[4]= 1;
2568             skip_dct[5]= 1;
2569         }else{
2570             if(emu){
2571                 ff_emulated_edge_mc(s, ptr_cb, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2572                 ptr_cb= s->edge_emu_buffer;
2573             }
2574             s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2575             if(emu){
2576                 ff_emulated_edge_mc(s, ptr_cr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2577                 ptr_cr= s->edge_emu_buffer;
2578             }
2579             s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2580         }
2581         /* pre quantization */         
2582         if(s->current_picture.mc_mb_var[s->mb_width*mb_y+ mb_x]<2*s->qscale*s->qscale){
2583             //FIXME optimize
2584             if(s->dsp.pix_abs8x8(ptr_y               , dest_y               , wrap_y) < 20*s->qscale) skip_dct[0]= 1;
2585             if(s->dsp.pix_abs8x8(ptr_y            + 8, dest_y            + 8, wrap_y) < 20*s->qscale) skip_dct[1]= 1;
2586             if(s->dsp.pix_abs8x8(ptr_y +dct_offset   , dest_y +dct_offset   , wrap_y) < 20*s->qscale) skip_dct[2]= 1;
2587             if(s->dsp.pix_abs8x8(ptr_y +dct_offset+ 8, dest_y +dct_offset+ 8, wrap_y) < 20*s->qscale) skip_dct[3]= 1;
2588             if(s->dsp.pix_abs8x8(ptr_cb              , dest_cb              , wrap_c) < 20*s->qscale) skip_dct[4]= 1;
2589             if(s->dsp.pix_abs8x8(ptr_cr              , dest_cr              , wrap_c) < 20*s->qscale) skip_dct[5]= 1;
2590 #if 0
2591 {
2592  static int stat[7];
2593  int num=0;
2594  for(i=0; i<6; i++)
2595   if(skip_dct[i]) num++;
2596  stat[num]++;
2597  
2598  if(s->mb_x==0 && s->mb_y==0){
2599   for(i=0; i<7; i++){
2600    printf("%6d %1d\n", stat[i], i);
2601   }
2602  }
2603 }
2604 #endif
2605         }
2606
2607     }
2608             
2609 #if 0
2610             {
2611                 float adap_parm;
2612                 
2613                 adap_parm = ((s->avg_mb_var << 1) + s->mb_var[s->mb_width*mb_y+mb_x] + 1.0) /
2614                             ((s->mb_var[s->mb_width*mb_y+mb_x] << 1) + s->avg_mb_var + 1.0);
2615             
2616                 printf("\ntype=%c qscale=%2d adap=%0.2f dquant=%4.2f var=%4d avgvar=%4d", 
2617                         (s->mb_type[s->mb_width*mb_y+mb_x] > 0) ? 'I' : 'P', 
2618                         s->qscale, adap_parm, s->qscale*adap_parm,
2619                         s->mb_var[s->mb_width*mb_y+mb_x], s->avg_mb_var);
2620             }
2621 #endif
2622     /* DCT & quantize */
2623     if(s->out_format==FMT_MJPEG){
2624         for(i=0;i<6;i++) {
2625             int overflow;
2626             s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, 8, &overflow);
2627             if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
2628         }
2629     }else{
2630         for(i=0;i<6;i++) {
2631             if(!skip_dct[i]){
2632                 int overflow;
2633                 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2634             // FIXME we could decide to change to quantizer instead of clipping
2635             // JS: I don't think that would be a good idea it could lower quality instead
2636             //     of improve it. Just INTRADC clipping deserves changes in quantizer
2637                 if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
2638             }else
2639                 s->block_last_index[i]= -1;
2640         }
2641         if(s->luma_elim_threshold && !s->mb_intra)
2642             for(i=0; i<4; i++)
2643                 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2644         if(s->chroma_elim_threshold && !s->mb_intra)
2645             for(i=4; i<6; i++)
2646                 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2647     }
2648
2649     if((s->flags&CODEC_FLAG_GRAY) && s->mb_intra){
2650         s->block_last_index[4]=
2651         s->block_last_index[5]= 0;
2652         s->block[4][0]=
2653         s->block[5][0]= (1024 + s->c_dc_scale/2)/ s->c_dc_scale;
2654     }
2655
2656 #ifdef CONFIG_ENCODERS
2657     /* huffman encode */
2658     switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2659     case CODEC_ID_MPEG1VIDEO:
2660         mpeg1_encode_mb(s, s->block, motion_x, motion_y); break;
2661 #ifdef CONFIG_RISKY
2662     case CODEC_ID_MPEG4:
2663         mpeg4_encode_mb(s, s->block, motion_x, motion_y); break;
2664     case CODEC_ID_MSMPEG4V2:
2665     case CODEC_ID_MSMPEG4V3:
2666     case CODEC_ID_WMV1:
2667         msmpeg4_encode_mb(s, s->block, motion_x, motion_y); break;
2668     case CODEC_ID_WMV2:
2669          ff_wmv2_encode_mb(s, s->block, motion_x, motion_y); break;
2670     case CODEC_ID_H263:
2671     case CODEC_ID_H263P:
2672     case CODEC_ID_RV10:
2673         h263_encode_mb(s, s->block, motion_x, motion_y); break;
2674 #endif
2675     case CODEC_ID_MJPEG:
2676         mjpeg_encode_mb(s, s->block); break;
2677     default:
2678         assert(0);
2679     }
2680 #endif
2681 }
2682
2683 /**
2684  * combines the (truncated) bitstream to a complete frame
2685  * @returns -1 if no complete frame could be created
2686  */
2687 int ff_combine_frame( MpegEncContext *s, int next, uint8_t **buf, int *buf_size){
2688     ParseContext *pc= &s->parse_context;
2689         
2690     pc->last_index= pc->index;
2691
2692     if(next==-1){
2693         pc->buffer= av_fast_realloc(pc->buffer, &pc->buffer_size, (*buf_size) + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
2694
2695         memcpy(&pc->buffer[pc->index], *buf, *buf_size);
2696         pc->index += *buf_size;
2697         return -1;
2698     }
2699
2700     if(pc->index){
2701         pc->buffer= av_fast_realloc(pc->buffer, &pc->buffer_size, next + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
2702
2703         memcpy(&pc->buffer[pc->index], *buf, next + FF_INPUT_BUFFER_PADDING_SIZE );
2704         pc->index = 0;
2705         *buf= pc->buffer;
2706         *buf_size= pc->last_index + next;
2707     }
2708
2709     return 0;
2710 }
2711
2712 void ff_copy_bits(PutBitContext *pb, UINT8 *src, int length)
2713 {
2714     int bytes= length>>4;
2715     int bits= length&15;
2716     int i;
2717
2718     if(length==0) return;
2719
2720     for(i=0; i<bytes; i++) put_bits(pb, 16, be2me_16(((uint16_t*)src)[i]));
2721     put_bits(pb, bits, be2me_16(((uint16_t*)src)[i])>>(16-bits));
2722 }
2723
2724 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2725     int i;
2726
2727     memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop?
2728
2729     /* mpeg1 */
2730     d->mb_incr= s->mb_incr;
2731     for(i=0; i<3; i++)
2732         d->last_dc[i]= s->last_dc[i];
2733     
2734     /* statistics */
2735     d->mv_bits= s->mv_bits;
2736     d->i_tex_bits= s->i_tex_bits;
2737     d->p_tex_bits= s->p_tex_bits;
2738     d->i_count= s->i_count;
2739     d->f_count= s->f_count;
2740     d->b_count= s->b_count;
2741     d->skip_count= s->skip_count;
2742     d->misc_bits= s->misc_bits;
2743     d->last_bits= 0;
2744
2745     d->mb_skiped= s->mb_skiped;
2746     d->qscale= s->qscale;
2747 }
2748
2749 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2750     int i;
2751
2752     memcpy(d->mv, s->mv, 2*4*2*sizeof(int)); 
2753     memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop?
2754     
2755     /* mpeg1 */
2756     d->mb_incr= s->mb_incr;
2757     for(i=0; i<3; i++)
2758         d->last_dc[i]= s->last_dc[i];
2759     
2760     /* statistics */
2761     d->mv_bits= s->mv_bits;
2762     d->i_tex_bits= s->i_tex_bits;
2763     d->p_tex_bits= s->p_tex_bits;
2764     d->i_count= s->i_count;
2765     d->f_count= s->f_count;
2766     d->b_count= s->b_count;
2767     d->skip_count= s->skip_count;
2768     d->misc_bits= s->misc_bits;
2769
2770     d->mb_intra= s->mb_intra;
2771     d->mb_skiped= s->mb_skiped;
2772     d->mv_type= s->mv_type;
2773     d->mv_dir= s->mv_dir;
2774     d->pb= s->pb;
2775     if(s->data_partitioning){
2776         d->pb2= s->pb2;
2777         d->tex_pb= s->tex_pb;
2778     }
2779     d->block= s->block;
2780     for(i=0; i<6; i++)
2781         d->block_last_index[i]= s->block_last_index[i];
2782     d->interlaced_dct= s->interlaced_dct;
2783     d->qscale= s->qscale;
2784 }
2785
2786 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type, 
2787                            PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2788                            int *dmin, int *next_block, int motion_x, int motion_y)
2789 {
2790     int bits_count;
2791     
2792     copy_context_before_encode(s, backup, type);
2793
2794     s->block= s->blocks[*next_block];
2795     s->pb= pb[*next_block];
2796     if(s->data_partitioning){
2797         s->pb2   = pb2   [*next_block];
2798         s->tex_pb= tex_pb[*next_block];
2799     }
2800
2801     encode_mb(s, motion_x, motion_y);
2802
2803     bits_count= get_bit_count(&s->pb);
2804     if(s->data_partitioning){
2805         bits_count+= get_bit_count(&s->pb2);
2806         bits_count+= get_bit_count(&s->tex_pb);
2807     }
2808
2809     if(bits_count<*dmin){
2810         *dmin= bits_count;
2811         *next_block^=1;
2812
2813         copy_context_after_encode(best, s, type);
2814     }
2815 }
2816                 
2817 static inline int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2818     uint32_t *sq = squareTbl + 256;
2819     int acc=0;
2820     int x,y;
2821     
2822     if(w==16 && h==16) 
2823         return s->dsp.sse[0](NULL, src1, src2, stride);
2824     else if(w==8 && h==8)
2825         return s->dsp.sse[1](NULL, src1, src2, stride);
2826     
2827     for(y=0; y<h; y++){
2828         for(x=0; x<w; x++){
2829             acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2830         } 
2831     }
2832     
2833     assert(acc>=0);
2834     
2835     return acc;
2836 }
2837
2838 static void encode_picture(MpegEncContext *s, int picture_number)
2839 {
2840     int mb_x, mb_y, pdif = 0;
2841     int i;
2842     int bits;
2843     MpegEncContext best_s, backup_s;
2844     UINT8 bit_buf[2][3000];
2845     UINT8 bit_buf2[2][3000];
2846     UINT8 bit_buf_tex[2][3000];
2847     PutBitContext pb[2], pb2[2], tex_pb[2];
2848
2849     for(i=0; i<2; i++){
2850         init_put_bits(&pb    [i], bit_buf    [i], 3000, NULL, NULL);
2851         init_put_bits(&pb2   [i], bit_buf2   [i], 3000, NULL, NULL);
2852         init_put_bits(&tex_pb[i], bit_buf_tex[i], 3000, NULL, NULL);
2853     }
2854
2855     s->picture_number = picture_number;
2856
2857     s->block_wrap[0]=
2858     s->block_wrap[1]=
2859     s->block_wrap[2]=
2860     s->block_wrap[3]= s->mb_width*2 + 2;
2861     s->block_wrap[4]=
2862     s->block_wrap[5]= s->mb_width + 2;
2863     
2864     /* Reset the average MB variance */
2865     s->current_picture.mb_var_sum = 0;
2866     s->current_picture.mc_mb_var_sum = 0;
2867
2868 #ifdef CONFIG_RISKY
2869     /* we need to initialize some time vars before we can encode b-frames */
2870     if (s->h263_pred && !s->h263_msmpeg4)
2871         ff_set_mpeg4_time(s, s->picture_number); 
2872 #endif
2873         
2874     s->scene_change_score=0;
2875     
2876     s->qscale= (int)(s->frame_qscale + 0.5); //FIXME qscale / ... stuff for ME ratedistoration
2877     
2878     if(s->msmpeg4_version){
2879         if(s->pict_type==I_TYPE)
2880             s->no_rounding=1;
2881         else if(s->flipflop_rounding)
2882             s->no_rounding ^= 1;          
2883     }else if(s->out_format == FMT_H263){
2884         if(s->pict_type==I_TYPE)
2885             s->no_rounding=0;
2886         else if(s->pict_type!=B_TYPE)
2887             s->no_rounding ^= 1;          
2888     }
2889     /* Estimate motion for every MB */
2890     s->mb_intra=0; //for the rate distoration & bit compare functions
2891     if(s->pict_type != I_TYPE){
2892         if(s->pict_type != B_TYPE){
2893             if((s->avctx->pre_me && s->last_non_b_pict_type==I_TYPE) || s->avctx->pre_me==2){
2894                 s->me.pre_pass=1;
2895                 s->me.dia_size= s->avctx->pre_dia_size;
2896
2897                 for(mb_y=s->mb_height-1; mb_y >=0 ; mb_y--) {
2898                     for(mb_x=s->mb_width-1; mb_x >=0 ; mb_x--) {
2899                         s->mb_x = mb_x;
2900                         s->mb_y = mb_y;
2901                         ff_pre_estimate_p_frame_motion(s, mb_x, mb_y);
2902                     }
2903                 }
2904                 s->me.pre_pass=0;
2905             }
2906         }
2907
2908         s->me.dia_size= s->avctx->dia_size;
2909         for(mb_y=0; mb_y < s->mb_height; mb_y++) {
2910             s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1;
2911             s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1);
2912             s->block_index[2]= s->block_wrap[0]*(mb_y*2 + 2) - 1;
2913             s->block_index[3]= s->block_wrap[0]*(mb_y*2 + 2);
2914             for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2915                 s->mb_x = mb_x;
2916                 s->mb_y = mb_y;
2917                 s->block_index[0]+=2;
2918                 s->block_index[1]+=2;
2919                 s->block_index[2]+=2;
2920                 s->block_index[3]+=2;
2921                 
2922                 /* compute motion vector & mb_type and store in context */
2923                 if(s->pict_type==B_TYPE)
2924                     ff_estimate_b_frame_motion(s, mb_x, mb_y);
2925                 else
2926                     ff_estimate_p_frame_motion(s, mb_x, mb_y);
2927             }
2928         }
2929     }else /* if(s->pict_type == I_TYPE) */{
2930         /* I-Frame */
2931         //FIXME do we need to zero them?
2932         memset(s->motion_val[0], 0, sizeof(INT16)*(s->mb_width*2 + 2)*(s->mb_height*2 + 2)*2);
2933         memset(s->p_mv_table   , 0, sizeof(INT16)*(s->mb_width+2)*(s->mb_height+2)*2);
2934         memset(s->mb_type      , MB_TYPE_INTRA, sizeof(UINT8)*s->mb_width*s->mb_height);
2935         
2936         if(!s->fixed_qscale){
2937             /* finding spatial complexity for I-frame rate control */
2938             for(mb_y=0; mb_y < s->mb_height; mb_y++) {
2939                 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2940                     int xx = mb_x * 16;
2941                     int yy = mb_y * 16;
2942                     uint8_t *pix = s->new_picture.data[0] + (yy * s->linesize) + xx;
2943                     int varc;
2944                     int sum = s->dsp.pix_sum(pix, s->linesize);
2945     
2946                     varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)(sum*sum))>>8) + 500 + 128)>>8;
2947
2948                     s->current_picture.mb_var [s->mb_width * mb_y + mb_x] = varc;
2949                     s->current_picture.mb_mean[s->mb_width * mb_y + mb_x] = (sum+128)>>8;
2950                     s->current_picture.mb_var_sum    += varc;
2951                 }
2952             }
2953         }
2954     }
2955     emms_c();
2956
2957     if(s->scene_change_score > 0 && s->pict_type == P_TYPE){
2958         s->pict_type= I_TYPE;
2959         memset(s->mb_type   , MB_TYPE_INTRA, sizeof(UINT8)*s->mb_width*s->mb_height);
2960 //printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
2961     }
2962
2963     if(s->pict_type==P_TYPE || s->pict_type==S_TYPE) 
2964         s->f_code= ff_get_best_fcode(s, s->p_mv_table, MB_TYPE_INTER);
2965         ff_fix_long_p_mvs(s);
2966     if(s->pict_type==B_TYPE){
2967         s->f_code= ff_get_best_fcode(s, s->b_forw_mv_table, MB_TYPE_FORWARD);
2968         s->b_code= ff_get_best_fcode(s, s->b_back_mv_table, MB_TYPE_BACKWARD);
2969
2970         ff_fix_long_b_mvs(s, s->b_forw_mv_table, s->f_code, MB_TYPE_FORWARD);
2971         ff_fix_long_b_mvs(s, s->b_back_mv_table, s->b_code, MB_TYPE_BACKWARD);
2972         ff_fix_long_b_mvs(s, s->b_bidir_forw_mv_table, s->f_code, MB_TYPE_BIDIR);
2973         ff_fix_long_b_mvs(s, s->b_bidir_back_mv_table, s->b_code, MB_TYPE_BIDIR);
2974     }
2975     
2976     if (s->fixed_qscale) 
2977         s->frame_qscale = s->current_picture.quality;
2978     else
2979         s->frame_qscale = ff_rate_estimate_qscale(s);
2980
2981     if(s->adaptive_quant){
2982 #ifdef CONFIG_RISKY
2983         switch(s->codec_id){
2984         case CODEC_ID_MPEG4:
2985             ff_clean_mpeg4_qscales(s);
2986             break;
2987         case CODEC_ID_H263:
2988         case CODEC_ID_H263P:
2989             ff_clean_h263_qscales(s);
2990             break;
2991         }
2992 #endif
2993
2994         s->qscale= s->current_picture.qscale_table[0];
2995     }else
2996         s->qscale= (int)(s->frame_qscale + 0.5);
2997         
2998     if (s->out_format == FMT_MJPEG) {
2999         /* for mjpeg, we do include qscale in the matrix */
3000         s->intra_matrix[0] = ff_mpeg1_default_intra_matrix[0];
3001         for(i=1;i<64;i++){
3002             int j= s->idct_permutation[i];
3003
3004             s->intra_matrix[j] = CLAMP_TO_8BIT((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3005         }
3006         convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16, 
3007                        s->q_intra_matrix16_bias, s->intra_matrix, s->intra_quant_bias, 8, 8);
3008     }
3009     
3010     //FIXME var duplication
3011     s->current_picture.key_frame= s->pict_type == I_TYPE;
3012     s->current_picture.pict_type= s->pict_type;
3013
3014     if(s->current_picture.key_frame)
3015         s->picture_in_gop_number=0;
3016
3017     s->last_bits= get_bit_count(&s->pb);
3018     switch(s->out_format) {
3019     case FMT_MJPEG:
3020         mjpeg_picture_header(s);
3021         break;
3022 #ifdef CONFIG_RISKY
3023     case FMT_H263:
3024         if (s->codec_id == CODEC_ID_WMV2) 
3025             ff_wmv2_encode_picture_header(s, picture_number);
3026         else if (s->h263_msmpeg4) 
3027             msmpeg4_encode_picture_header(s, picture_number);
3028         else if (s->h263_pred)
3029             mpeg4_encode_picture_header(s, picture_number);
3030         else if (s->h263_rv10) 
3031             rv10_encode_picture_header(s, picture_number);
3032         else
3033             h263_encode_picture_header(s, picture_number);
3034         break;
3035 #endif
3036     case FMT_MPEG1:
3037         mpeg1_encode_picture_header(s, picture_number);
3038         break;
3039     }
3040     bits= get_bit_count(&s->pb);
3041     s->header_bits= bits - s->last_bits;
3042     s->last_bits= bits;
3043     s->mv_bits=0;
3044     s->misc_bits=0;
3045     s->i_tex_bits=0;
3046     s->p_tex_bits=0;
3047     s->i_count=0;
3048     s->f_count=0;
3049     s->b_count=0;
3050     s->skip_count=0;
3051
3052     for(i=0; i<3; i++){
3053         /* init last dc values */
3054         /* note: quant matrix value (8) is implied here */
3055         s->last_dc[i] = 128;
3056         
3057         s->current_picture.error[i] = 0;
3058     }
3059     s->mb_incr = 1;
3060     s->last_mv[0][0][0] = 0;
3061     s->last_mv[0][0][1] = 0;
3062
3063 #ifdef CONFIG_RISKY
3064     if (s->codec_id==CODEC_ID_H263 || s->codec_id==CODEC_ID_H263P)
3065         s->gob_index = ff_h263_get_gob_height(s);
3066
3067     if(s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame)
3068         ff_mpeg4_init_partitions(s);
3069 #endif
3070
3071     s->resync_mb_x=0;
3072     s->resync_mb_y=0;
3073     s->first_slice_line = 1;
3074     s->ptr_lastgob = s->pb.buf;
3075     s->ptr_last_mb_line = s->pb.buf;
3076     for(mb_y=0; mb_y < s->mb_height; mb_y++) {
3077         s->y_dc_scale= s->y_dc_scale_table[ s->qscale ];
3078         s->c_dc_scale= s->c_dc_scale_table[ s->qscale ];
3079         
3080         s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1;
3081         s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1);
3082         s->block_index[2]= s->block_wrap[0]*(mb_y*2 + 2) - 1;
3083         s->block_index[3]= s->block_wrap[0]*(mb_y*2 + 2);
3084         s->block_index[4]= s->block_wrap[4]*(mb_y + 1)                    + s->block_wrap[0]*(s->mb_height*2 + 2);
3085         s->block_index[5]= s->block_wrap[4]*(mb_y + 1 + s->mb_height + 2) + s->block_wrap[0]*(s->mb_height*2 + 2);
3086         for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3087             int mb_type= s->mb_type[mb_y * s->mb_width + mb_x];
3088             const int xy= (mb_y+1) * (s->mb_width+2) + mb_x + 1;
3089 //            int d;
3090             int dmin=10000000;
3091
3092             s->mb_x = mb_x;
3093             s->mb_y = mb_y;
3094             s->block_index[0]+=2;
3095             s->block_index[1]+=2;
3096             s->block_index[2]+=2;
3097             s->block_index[3]+=2;
3098             s->block_index[4]++;
3099             s->block_index[5]++;
3100
3101             /* write gob / video packet header  */
3102 #ifdef CONFIG_RISKY
3103             if(s->rtp_mode){
3104                 int current_packet_size, is_gob_start;
3105                 
3106                 current_packet_size= pbBufPtr(&s->pb) - s->ptr_lastgob;
3107                 is_gob_start=0;
3108                 
3109                 if(s->codec_id==CODEC_ID_MPEG4){
3110                     if(current_packet_size + s->mb_line_avgsize/s->mb_width >= s->rtp_payload_size
3111                        && s->mb_y + s->mb_x>0){
3112
3113                         if(s->partitioned_frame){
3114                             ff_mpeg4_merge_partitions(s);
3115                             ff_mpeg4_init_partitions(s);
3116                         }
3117                         ff_mpeg4_encode_video_packet_header(s);
3118
3119                         if(s->flags&CODEC_FLAG_PASS1){
3120                             int bits= get_bit_count(&s->pb);
3121                             s->misc_bits+= bits - s->last_bits;
3122                             s->last_bits= bits;
3123                         }
3124                         ff_mpeg4_clean_buffers(s);
3125                         is_gob_start=1;
3126                     }
3127                 }else{
3128                     if(current_packet_size + s->mb_line_avgsize*s->gob_index >= s->rtp_payload_size
3129                        && s->mb_x==0 && s->mb_y>0 && s->mb_y%s->gob_index==0){
3130                        
3131                         h263_encode_gob_header(s, mb_y);                       
3132                         is_gob_start=1;
3133                     }
3134                 }
3135
3136                 if(is_gob_start){
3137                     s->ptr_lastgob = pbBufPtr(&s->pb);
3138                     s->first_slice_line=1;
3139                     s->resync_mb_x=mb_x;
3140                     s->resync_mb_y=mb_y;
3141                 }
3142             }
3143 #endif
3144
3145             if(  (s->resync_mb_x   == s->mb_x)
3146                && s->resync_mb_y+1 == s->mb_y){
3147                 s->first_slice_line=0; 
3148             }
3149
3150             if(mb_type & (mb_type-1)){ // more than 1 MB type possible
3151                 int next_block=0;
3152                 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3153
3154                 copy_context_before_encode(&backup_s, s, -1);
3155                 backup_s.pb= s->pb;
3156                 best_s.data_partitioning= s->data_partitioning;
3157                 best_s.partitioned_frame= s->partitioned_frame;
3158                 if(s->data_partitioning){
3159                     backup_s.pb2= s->pb2;
3160                     backup_s.tex_pb= s->tex_pb;
3161                 }
3162
3163                 if(mb_type&MB_TYPE_INTER){
3164                     s->mv_dir = MV_DIR_FORWARD;
3165                     s->mv_type = MV_TYPE_16X16;
3166                     s->mb_intra= 0;
3167                     s->mv[0][0][0] = s->p_mv_table[xy][0];
3168                     s->mv[0][0][1] = s->p_mv_table[xy][1];
3169                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER, pb, pb2, tex_pb, 
3170                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3171                 }
3172                 if(mb_type&MB_TYPE_INTER4V){                 
3173                     s->mv_dir = MV_DIR_FORWARD;
3174                     s->mv_type = MV_TYPE_8X8;
3175                     s->mb_intra= 0;
3176                     for(i=0; i<4; i++){
3177                         s->mv[0][i][0] = s->motion_val[s->block_index[i]][0];
3178                         s->mv[0][i][1] = s->motion_val[s->block_index[i]][1];
3179                     }
3180                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER4V, pb, pb2, tex_pb, 
3181                                  &dmin, &next_block, 0, 0);
3182                 }
3183                 if(mb_type&MB_TYPE_FORWARD){
3184                     s->mv_dir = MV_DIR_FORWARD;
3185                     s->mv_type = MV_TYPE_16X16;
3186                     s->mb_intra= 0;
3187                     s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3188                     s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3189                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_FORWARD, pb, pb2, tex_pb, 
3190                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3191                 }
3192                 if(mb_type&MB_TYPE_BACKWARD){
3193                     s->mv_dir = MV_DIR_BACKWARD;
3194                     s->mv_type = MV_TYPE_16X16;
3195                     s->mb_intra= 0;
3196                     s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3197                     s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3198                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BACKWARD, pb, pb2, tex_pb, 
3199                                  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3200                 }
3201                 if(mb_type&MB_TYPE_BIDIR){
3202                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3203                     s->mv_type = MV_TYPE_16X16;
3204                     s->mb_intra= 0;
3205                     s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3206                     s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3207                     s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3208                     s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3209                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BIDIR, pb, pb2, tex_pb, 
3210                                  &dmin, &next_block, 0, 0);
3211                 }
3212                 if(mb_type&MB_TYPE_DIRECT){
3213                     int mx= s->b_direct_mv_table[xy][0];
3214                     int my= s->b_direct_mv_table[xy][1];
3215                     
3216                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3217                     s->mb_intra= 0;
3218 #ifdef CONFIG_RISKY
3219                     ff_mpeg4_set_direct_mv(s, mx, my);
3220 #endif
3221                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_DIRECT, pb, pb2, tex_pb, 
3222                                  &dmin, &next_block, mx, my);
3223                 }
3224                 if(mb_type&MB_TYPE_INTRA){
3225                     s->mv_dir = MV_DIR_FORWARD;
3226                     s->mv_type = MV_TYPE_16X16;
3227                     s->mb_intra= 1;
3228                     s->mv[0][0][0] = 0;
3229                     s->mv[0][0][1] = 0;
3230                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTRA, pb, pb2, tex_pb, 
3231                                  &dmin, &next_block, 0, 0);
3232                     /* force cleaning of ac/dc pred stuff if needed ... */
3233                     if(s->h263_pred || s->h263_aic)
3234                         s->mbintra_table[mb_x + mb_y*s->mb_width]=1;
3235                 }
3236                 copy_context_after_encode(s, &best_s, -1);
3237                 
3238                 pb_bits_count= get_bit_count(&s->pb);
3239                 flush_put_bits(&s->pb);
3240                 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3241                 s->pb= backup_s.pb;
3242                 
3243                 if(s->data_partitioning){
3244                     pb2_bits_count= get_bit_count(&s->pb2);
3245                     flush_put_bits(&s->pb2);
3246                     ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3247                     s->pb2= backup_s.pb2;
3248                     
3249                     tex_pb_bits_count= get_bit_count(&s->tex_pb);
3250                     flush_put_bits(&s->tex_pb);
3251                     ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3252                     s->tex_pb= backup_s.tex_pb;
3253                 }
3254                 s->last_bits= get_bit_count(&s->pb);
3255             } else {
3256                 int motion_x, motion_y;
3257                 int intra_score;
3258                 int inter_score= s->current_picture.mb_cmp_score[mb_x + mb_y*s->mb_width];
3259                 
3260               if(!(s->flags&CODEC_FLAG_HQ) && s->pict_type==P_TYPE){
3261                 /* get luma score */
3262                 if((s->avctx->mb_cmp&0xFF)==FF_CMP_SSE){
3263                     intra_score= (s->current_picture.mb_var[mb_x + mb_y*s->mb_width]<<8) - 500; //FIXME dont scale it down so we dont have to fix it
3264                 }else{
3265                     uint8_t *dest_y;
3266
3267                     int mean= s->current_picture.mb_mean[mb_x + mb_y*s->mb_width]; //FIXME
3268                     mean*= 0x01010101;
3269                     
3270                     dest_y  = s->new_picture.data[0] + (mb_y * 16 * s->linesize    ) + mb_x * 16;
3271                 
3272                     for(i=0; i<16; i++){
3273                         *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 0]) = mean;
3274                         *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 4]) = mean;
3275                         *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 8]) = mean;
3276                         *(uint32_t*)(&s->me.scratchpad[i*s->linesize+12]) = mean;
3277                     }
3278
3279                     s->mb_intra=1;
3280                     intra_score= s->dsp.mb_cmp[0](s, s->me.scratchpad, dest_y, s->linesize);
3281                                         
3282 /*                    printf("intra:%7d inter:%7d var:%7d mc_var.%7d\n", intra_score>>8, inter_score>>8, 
3283                         s->current_picture.mb_var[mb_x + mb_y*s->mb_width],
3284                         s->current_picture.mc_mb_var[mb_x + mb_y*s->mb_width]);*/
3285                 }
3286                 
3287                 /* get chroma score */
3288                 if(s->avctx->mb_cmp&FF_CMP_CHROMA){
3289                     int i;
3290                     
3291                     s->mb_intra=1;
3292                     for(i=1; i<3; i++){
3293                         uint8_t *dest_c;
3294                         int mean;
3295                         
3296                         if(s->out_format == FMT_H263){
3297                             mean= (s->dc_val[i][mb_x + (mb_y+1)*(s->mb_width+2)] + 4)>>3; //FIXME not exact but simple ;)
3298                         }else{
3299                             mean= (s->last_dc[i] + 4)>>3;
3300                         }
3301                         dest_c = s->new_picture.data[i] + (mb_y * 8  * (s->uvlinesize)) + mb_x * 8;
3302                         
3303                         mean*= 0x01010101;
3304                         for(i=0; i<8; i++){
3305                             *(uint32_t*)(&s->me.scratchpad[i*s->uvlinesize+ 0]) = mean;
3306                             *(uint32_t*)(&s->me.scratchpad[i*s->uvlinesize+ 4]) = mean;
3307                         }
3308                         
3309                         intra_score+= s->dsp.mb_cmp[1](s, s->me.scratchpad, dest_c, s->uvlinesize);
3310                     }                
3311                 }
3312
3313                 /* bias */
3314                 switch(s->avctx->mb_cmp&0xFF){
3315                 default:
3316                 case FF_CMP_SAD:
3317                     intra_score+= 32*s->qscale;
3318                     break;
3319                 case FF_CMP_SSE:
3320                     intra_score+= 24*s->qscale*s->qscale;
3321                     break;
3322                 case FF_CMP_SATD:
3323                     intra_score+= 96*s->qscale;
3324                     break;
3325                 case FF_CMP_DCT:
3326                     intra_score+= 48*s->qscale;
3327                     break;
3328                 case FF_CMP_BIT:
3329                     intra_score+= 16;
3330                     break;
3331                 case FF_CMP_PSNR:
3332                 case FF_CMP_RD:
3333                     intra_score+= (s->qscale*s->qscale*109*8 + 64)>>7;
3334                     break;
3335                 }
3336
3337                 if(intra_score < inter_score)
3338                     mb_type= MB_TYPE_INTRA;
3339               }  
3340                 
3341                 s->mv_type=MV_TYPE_16X16;
3342                 // only one MB-Type possible
3343                 
3344                 switch(mb_type){
3345                 case MB_TYPE_INTRA:
3346                     s->mv_dir = MV_DIR_FORWARD;
3347                     s->mb_intra= 1;
3348                     motion_x= s->mv[0][0][0] = 0;
3349                     motion_y= s->mv[0][0][1] = 0;
3350                     break;
3351                 case MB_TYPE_INTER:
3352                     s->mv_dir = MV_DIR_FORWARD;
3353                     s->mb_intra= 0;
3354                     motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3355                     motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3356                     break;
3357                 case MB_TYPE_INTER4V:
3358                     s->mv_dir = MV_DIR_FORWARD;
3359                     s->mv_type = MV_TYPE_8X8;
3360                     s->mb_intra= 0;
3361                     for(i=0; i<4; i++){
3362                         s->mv[0][i][0] = s->motion_val[s->block_index[i]][0];
3363                         s->mv[0][i][1] = s->motion_val[s->block_index[i]][1];
3364                     }
3365                     motion_x= motion_y= 0;
3366                     break;
3367                 case MB_TYPE_DIRECT:
3368                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3369                     s->mb_intra= 0;
3370                     motion_x=s->b_direct_mv_table[xy][0];
3371                     motion_y=s->b_direct_mv_table[xy][1];
3372 #ifdef CONFIG_RISKY
3373                     ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3374 #endif
3375                     break;
3376                 case MB_TYPE_BIDIR:
3377                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3378                     s->mb_intra= 0;
3379                     motion_x=0;
3380                     motion_y=0;
3381                     s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3382                     s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3383                     s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3384                     s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3385                     break;
3386                 case MB_TYPE_BACKWARD:
3387                     s->mv_dir = MV_DIR_BACKWARD;
3388                     s->mb_intra= 0;
3389                     motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3390                     motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3391                     break;
3392                 case MB_TYPE_FORWARD:
3393                     s->mv_dir = MV_DIR_FORWARD;
3394                     s->mb_intra= 0;
3395                     motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3396                     motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3397 //                    printf(" %d %d ", motion_x, motion_y);
3398                     break;
3399                 default:
3400                     motion_x=motion_y=0; //gcc warning fix
3401                     printf("illegal MB type\n");
3402                 }
3403                 encode_mb(s, motion_x, motion_y);
3404             }
3405             /* clean the MV table in IPS frames for direct mode in B frames */
3406             if(s->mb_intra /* && I,P,S_TYPE */){
3407                 s->p_mv_table[xy][0]=0;
3408                 s->p_mv_table[xy][1]=0;
3409             }
3410
3411             MPV_decode_mb(s, s->block);
3412             
3413             if(s->flags&CODEC_FLAG_PSNR){
3414                 int w= 16;
3415                 int h= 16;
3416
3417                 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3418                 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3419
3420                 s->current_picture.error[0] += sse(
3421                     s,
3422                     s->new_picture    .data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3423                     s->current_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3424                     w, h, s->linesize);
3425                 s->current_picture.error[1] += sse(
3426                     s,
3427                     s->new_picture    .data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,
3428                     s->current_picture.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,
3429                     w>>1, h>>1, s->uvlinesize);
3430                 s->current_picture.error[2] += sse(
3431                     s,
3432                     s->new_picture    .data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,
3433                     s->current_picture.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,
3434                     w>>1, h>>1, s->uvlinesize);
3435             }
3436 //printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_width, get_bit_count(&s->pb));
3437         }
3438
3439
3440         /* Obtain average mb_row size for RTP */
3441         if (s->rtp_mode) {
3442             if (mb_y==0)
3443                 s->mb_line_avgsize = pbBufPtr(&s->pb) - s->ptr_last_mb_line;
3444             else {    
3445                 s->mb_line_avgsize = (s->mb_line_avgsize + pbBufPtr(&s->pb) - s->ptr_last_mb_line) >> 1;
3446             }
3447             s->ptr_last_mb_line = pbBufPtr(&s->pb);
3448         }
3449     }
3450     emms_c();
3451
3452 #ifdef CONFIG_RISKY
3453     if(s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame)
3454         ff_mpeg4_merge_partitions(s);
3455
3456     if (s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == I_TYPE)
3457         msmpeg4_encode_ext_header(s);
3458
3459     if(s->codec_id==CODEC_ID_MPEG4) 
3460         ff_mpeg4_stuffing(&s->pb);
3461 #endif
3462
3463     //if (s->gob_number)
3464     //    fprintf(stderr,"\nNumber of GOB: %d", s->gob_number);
3465     
3466     /* Send the last GOB if RTP */    
3467     if (s->rtp_mode) {
3468         flush_put_bits(&s->pb);
3469         pdif = pbBufPtr(&s->pb) - s->ptr_lastgob;
3470         /* Call the RTP callback to send the last GOB */
3471         if (s->rtp_callback)
3472             s->rtp_callback(s->ptr_lastgob, pdif, s->gob_number);
3473         s->ptr_lastgob = pbBufPtr(&s->pb);
3474         //fprintf(stderr,"\nGOB: %2d size: %d (last)", s->gob_number, pdif);
3475     }
3476 }
3477
3478 static int dct_quantize_trellis_c(MpegEncContext *s, 
3479                         DCTELEM *block, int n,
3480                         int qscale, int *overflow){
3481     const int *qmat;
3482     const UINT8 *scantable= s->intra_scantable.scantable;
3483     int max=0;