ffd71a69be897ccaa52395cd3c4b1d4d7bdf2559
[ffmpeg.git] / libavcodec / mpegvideo.c
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard.
4  *
5  * This library is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU Lesser General Public
7  * License as published by the Free Software Foundation; either
8  * version 2 of the License, or (at your option) any later version.
9  *
10  * This library is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * Lesser General Public License for more details.
14  *
15  * You should have received a copy of the GNU Lesser General Public
16  * License along with this library; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  *
19  * 4MV & hq & b-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
20  */
21  
22 /**
23  * @file mpegvideo.c
24  * The simplest mpeg encoder (well, it was the simplest!).
25  */ 
26  
27 #include <ctype.h>
28 #include <limits.h>
29 #include "avcodec.h"
30 #include "dsputil.h"
31 #include "mpegvideo.h"
32
33 #ifdef USE_FASTMEMCPY
34 #include "fastmemcpy.h"
35 #endif
36
37 //#undef NDEBUG
38 //#include <assert.h>
39
40 #ifdef CONFIG_ENCODERS
41 static void encode_picture(MpegEncContext *s, int picture_number);
42 #endif //CONFIG_ENCODERS
43 static void dct_unquantize_mpeg1_c(MpegEncContext *s, 
44                                    DCTELEM *block, int n, int qscale);
45 static void dct_unquantize_mpeg2_c(MpegEncContext *s,
46                                    DCTELEM *block, int n, int qscale);
47 static void dct_unquantize_h263_c(MpegEncContext *s, 
48                                   DCTELEM *block, int n, int qscale);
49 static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w);
50 #ifdef CONFIG_ENCODERS
51 static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
52 static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
53 static int sse_mb(MpegEncContext *s);
54 #endif //CONFIG_ENCODERS
55
56 #ifdef HAVE_XVMC
57 extern int  XVMC_field_start(MpegEncContext*s, AVCodecContext *avctx);
58 extern void XVMC_field_end(MpegEncContext *s);
59 extern void XVMC_decode_mb(MpegEncContext *s, DCTELEM block[6][64]);
60 #endif
61
62 void (*draw_edges)(uint8_t *buf, int wrap, int width, int height, int w)= draw_edges_c;
63
64
65 /* enable all paranoid tests for rounding, overflows, etc... */
66 //#define PARANOID
67
68 //#define DEBUG
69
70
71 /* for jpeg fast DCT */
72 #define CONST_BITS 14
73
74 static const uint16_t aanscales[64] = {
75     /* precomputed values scaled up by 14 bits */
76     16384, 22725, 21407, 19266, 16384, 12873,  8867,  4520,
77     22725, 31521, 29692, 26722, 22725, 17855, 12299,  6270,
78     21407, 29692, 27969, 25172, 21407, 16819, 11585,  5906,
79     19266, 26722, 25172, 22654, 19266, 15137, 10426,  5315,
80     16384, 22725, 21407, 19266, 16384, 12873,  8867,  4520,
81     12873, 17855, 16819, 15137, 12873, 10114,  6967,  3552,
82     8867 , 12299, 11585, 10426,  8867,  6967,  4799,  2446,
83     4520 ,  6270,  5906,  5315,  4520,  3552,  2446,  1247
84 };
85
86 static const uint8_t h263_chroma_roundtab[16] = {
87 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
88     0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
89 };
90
91 #ifdef CONFIG_ENCODERS
92 static uint8_t (*default_mv_penalty)[MAX_MV*2+1]=NULL;
93 static uint8_t default_fcode_tab[MAX_MV*2+1];
94
95 enum PixelFormat ff_yuv420p_list[2]= {PIX_FMT_YUV420P, -1};
96
97 static void convert_matrix(MpegEncContext *s, int (*qmat)[64], uint16_t (*qmat16)[64], uint16_t (*qmat16_bias)[64],
98                            const uint16_t *quant_matrix, int bias, int qmin, int qmax)
99 {
100     int qscale;
101
102     for(qscale=qmin; qscale<=qmax; qscale++){
103         int i;
104         if (s->dsp.fdct == ff_jpeg_fdct_islow) {
105             for(i=0;i<64;i++) {
106                 const int j= s->dsp.idct_permutation[i];
107                 /* 16 <= qscale * quant_matrix[i] <= 7905 */
108                 /* 19952         <= aanscales[i] * qscale * quant_matrix[i]           <= 249205026 */
109                 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
110                 /* 3444240       >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
111                 
112                 qmat[qscale][i] = (int)((uint64_t_C(1) << QMAT_SHIFT) / 
113                                 (qscale * quant_matrix[j]));
114             }
115         } else if (s->dsp.fdct == fdct_ifast) {
116             for(i=0;i<64;i++) {
117                 const int j= s->dsp.idct_permutation[i];
118                 /* 16 <= qscale * quant_matrix[i] <= 7905 */
119                 /* 19952         <= aanscales[i] * qscale * quant_matrix[i]           <= 249205026 */
120                 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
121                 /* 3444240       >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
122                 
123                 qmat[qscale][i] = (int)((uint64_t_C(1) << (QMAT_SHIFT + 14)) / 
124                                 (aanscales[i] * qscale * quant_matrix[j]));
125             }
126         } else {
127             for(i=0;i<64;i++) {
128                 const int j= s->dsp.idct_permutation[i];
129                 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
130                    So 16           <= qscale * quant_matrix[i]             <= 7905
131                    so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905
132                    so 32768        >= (1<<19) / (qscale * quant_matrix[i]) >= 67
133                 */
134                 qmat[qscale][i] = (int)((uint64_t_C(1) << QMAT_SHIFT) / (qscale * quant_matrix[j]));
135 //                qmat  [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]);
136                 qmat16[qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[j]);
137
138                 if(qmat16[qscale][i]==0 || qmat16[qscale][i]==128*256) qmat16[qscale][i]=128*256-1;
139                 qmat16_bias[qscale][i]= ROUNDED_DIV(bias<<(16-QUANT_BIAS_SHIFT), qmat16[qscale][i]);
140             }
141         }
142     }
143 }
144 #endif //CONFIG_ENCODERS
145
146 void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable){
147     int i;
148     int end;
149     
150     st->scantable= src_scantable;
151
152     for(i=0; i<64; i++){
153         int j;
154         j = src_scantable[i];
155         st->permutated[i] = permutation[j];
156 #ifdef ARCH_POWERPC
157         st->inverse[j] = i;
158 #endif
159     }
160     
161     end=-1;
162     for(i=0; i<64; i++){
163         int j;
164         j = st->permutated[i];
165         if(j>end) end=j;
166         st->raster_end[i]= end;
167     }
168 }
169
170 void ff_write_quant_matrix(PutBitContext *pb, int16_t *matrix){
171     int i;
172
173     if(matrix){
174         put_bits(pb, 1, 1);
175         for(i=0;i<64;i++) {
176             put_bits(pb, 8, matrix[ ff_zigzag_direct[i] ]);
177         }
178     }else
179         put_bits(pb, 1, 0);
180 }
181
182 /* init common dct for both encoder and decoder */
183 int DCT_common_init(MpegEncContext *s)
184 {
185     s->dct_unquantize_h263 = dct_unquantize_h263_c;
186     s->dct_unquantize_mpeg1 = dct_unquantize_mpeg1_c;
187     s->dct_unquantize_mpeg2 = dct_unquantize_mpeg2_c;
188
189 #ifdef CONFIG_ENCODERS
190     s->dct_quantize= dct_quantize_c;
191 #endif
192         
193 #ifdef HAVE_MMX
194     MPV_common_init_mmx(s);
195 #endif
196 #ifdef ARCH_ALPHA
197     MPV_common_init_axp(s);
198 #endif
199 #ifdef HAVE_MLIB
200     MPV_common_init_mlib(s);
201 #endif
202 #ifdef HAVE_MMI
203     MPV_common_init_mmi(s);
204 #endif
205 #ifdef ARCH_ARMV4L
206     MPV_common_init_armv4l(s);
207 #endif
208 #ifdef ARCH_POWERPC
209     MPV_common_init_ppc(s);
210 #endif
211
212 #ifdef CONFIG_ENCODERS
213     s->fast_dct_quantize= s->dct_quantize;
214
215     if(s->flags&CODEC_FLAG_TRELLIS_QUANT){
216         s->dct_quantize= dct_quantize_trellis_c; //move before MPV_common_init_*
217     }
218
219 #endif //CONFIG_ENCODERS
220
221     /* load & permutate scantables
222        note: only wmv uses differnt ones 
223     */
224     ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_zigzag_direct);
225     ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_zigzag_direct);
226     ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
227     ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
228
229     s->picture_structure= PICT_FRAME;
230     
231     return 0;
232 }
233
234 /**
235  * allocates a Picture
236  * The pixels are allocated/set by calling get_buffer() if shared=0
237  */
238 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
239     const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) doesnt sig11
240     const int mb_array_size= s->mb_stride*s->mb_height;
241     int i;
242     
243     if(shared){
244         assert(pic->data[0]);
245         assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
246         pic->type= FF_BUFFER_TYPE_SHARED;
247     }else{
248         int r;
249         
250         assert(!pic->data[0]);
251         
252         r= s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
253         
254         if(r<0 || !pic->age || !pic->type || !pic->data[0]){
255             fprintf(stderr, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
256             return -1;
257         }
258
259         if(s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])){
260             fprintf(stderr, "get_buffer() failed (stride changed)\n");
261             return -1;
262         }
263
264         if(pic->linesize[1] != pic->linesize[2]){
265             fprintf(stderr, "get_buffer() failed (uv stride missmatch)\n");
266             return -1;
267         }
268
269         s->linesize  = pic->linesize[0];
270         s->uvlinesize= pic->linesize[1];
271     }
272     
273     if(pic->qscale_table==NULL){
274         if (s->encoding) {        
275             CHECKED_ALLOCZ(pic->mb_var   , mb_array_size * sizeof(int16_t))
276             CHECKED_ALLOCZ(pic->mc_mb_var, mb_array_size * sizeof(int16_t))
277             CHECKED_ALLOCZ(pic->mb_mean  , mb_array_size * sizeof(int8_t))
278             CHECKED_ALLOCZ(pic->mb_cmp_score, mb_array_size * sizeof(int32_t))
279         }
280
281         CHECKED_ALLOCZ(pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2) //the +2 is for the slice end check
282         CHECKED_ALLOCZ(pic->qscale_table , mb_array_size * sizeof(uint8_t))
283         CHECKED_ALLOCZ(pic->mb_type_base , big_mb_num    * sizeof(int))
284         pic->mb_type= pic->mb_type_base + s->mb_stride+1;
285         if(s->out_format == FMT_H264){
286             for(i=0; i<2; i++){
287                 CHECKED_ALLOCZ(pic->motion_val[i], 2 * 16 * s->mb_num * sizeof(uint16_t))
288                 CHECKED_ALLOCZ(pic->ref_index[i] , 4 * s->mb_num * sizeof(uint8_t))
289             }
290         }
291         pic->qstride= s->mb_stride;
292     }
293
294     //it might be nicer if the application would keep track of these but it would require a API change
295     memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
296     s->prev_pict_types[0]= s->pict_type;
297     if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == B_TYPE)
298         pic->age= INT_MAX; // skiped MBs in b frames are quite rare in mpeg1/2 and its a bit tricky to skip them anyway
299     
300     return 0;
301 fail: //for the CHECKED_ALLOCZ macro
302     return -1;
303 }
304
305 /**
306  * deallocates a picture
307  */
308 static void free_picture(MpegEncContext *s, Picture *pic){
309     int i;
310
311     if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
312         s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
313     }
314
315     av_freep(&pic->mb_var);
316     av_freep(&pic->mc_mb_var);
317     av_freep(&pic->mb_mean);
318     av_freep(&pic->mb_cmp_score);
319     av_freep(&pic->mbskip_table);
320     av_freep(&pic->qscale_table);
321     av_freep(&pic->mb_type_base);
322     pic->mb_type= NULL;
323     for(i=0; i<2; i++){
324         av_freep(&pic->motion_val[i]);
325         av_freep(&pic->ref_index[i]);
326     }
327     
328     if(pic->type == FF_BUFFER_TYPE_SHARED){
329         for(i=0; i<4; i++){
330             pic->base[i]=
331             pic->data[i]= NULL;
332         }
333         pic->type= 0;        
334     }
335 }
336
337 /* init common structure for both encoder and decoder */
338 int MPV_common_init(MpegEncContext *s)
339 {
340     int y_size, c_size, yc_size, i, mb_array_size, x, y;
341
342     dsputil_init(&s->dsp, s->avctx);
343     DCT_common_init(s);
344
345     s->flags= s->avctx->flags;
346
347     s->mb_width  = (s->width  + 15) / 16;
348     s->mb_height = (s->height + 15) / 16;
349     s->mb_stride = s->mb_width + 1;
350     mb_array_size= s->mb_height * s->mb_stride;
351
352     /* set default edge pos, will be overriden in decode_header if needed */
353     s->h_edge_pos= s->mb_width*16;
354     s->v_edge_pos= s->mb_height*16;
355
356     s->mb_num = s->mb_width * s->mb_height;
357     
358     s->block_wrap[0]=
359     s->block_wrap[1]=
360     s->block_wrap[2]=
361     s->block_wrap[3]= s->mb_width*2 + 2;
362     s->block_wrap[4]=
363     s->block_wrap[5]= s->mb_width + 2;
364
365     y_size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
366     c_size = (s->mb_width + 2) * (s->mb_height + 2);
367     yc_size = y_size + 2 * c_size;
368
369     /* convert fourcc to upper case */
370     s->avctx->codec_tag=   toupper( s->avctx->codec_tag     &0xFF)          
371                         + (toupper((s->avctx->codec_tag>>8 )&0xFF)<<8 )
372                         + (toupper((s->avctx->codec_tag>>16)&0xFF)<<16) 
373                         + (toupper((s->avctx->codec_tag>>24)&0xFF)<<24);
374
375     CHECKED_ALLOCZ(s->allocated_edge_emu_buffer, (s->width+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance
376     s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*17;
377
378     s->avctx->coded_frame= (AVFrame*)&s->current_picture;
379
380     CHECKED_ALLOCZ(s->mb_index2xy, (s->mb_num+1)*sizeof(int)) //error ressilience code looks cleaner with this
381     for(y=0; y<s->mb_height; y++){
382         for(x=0; x<s->mb_width; x++){
383             s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
384         }
385     }
386     s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
387     
388     if (s->encoding) {
389         int mv_table_size= s->mb_stride * (s->mb_height+2) + 1;
390
391         /* Allocate MV tables */
392         CHECKED_ALLOCZ(s->p_mv_table_base            , mv_table_size * 2 * sizeof(int16_t))
393         CHECKED_ALLOCZ(s->b_forw_mv_table_base       , mv_table_size * 2 * sizeof(int16_t))
394         CHECKED_ALLOCZ(s->b_back_mv_table_base       , mv_table_size * 2 * sizeof(int16_t))
395         CHECKED_ALLOCZ(s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
396         CHECKED_ALLOCZ(s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
397         CHECKED_ALLOCZ(s->b_direct_mv_table_base     , mv_table_size * 2 * sizeof(int16_t))
398         s->p_mv_table           = s->p_mv_table_base            + s->mb_stride + 1;
399         s->b_forw_mv_table      = s->b_forw_mv_table_base       + s->mb_stride + 1;
400         s->b_back_mv_table      = s->b_back_mv_table_base       + s->mb_stride + 1;
401         s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
402         s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
403         s->b_direct_mv_table    = s->b_direct_mv_table_base     + s->mb_stride + 1;
404
405         //FIXME should be linesize instead of s->width*2 but that isnt known before get_buffer()
406         CHECKED_ALLOCZ(s->me.scratchpad,  s->width*2*16*3*sizeof(uint8_t)) 
407         
408         CHECKED_ALLOCZ(s->me.map      , ME_MAP_SIZE*sizeof(uint32_t))
409         CHECKED_ALLOCZ(s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t))
410
411         if(s->codec_id==CODEC_ID_MPEG4){
412             CHECKED_ALLOCZ(s->tex_pb_buffer, PB_BUFFER_SIZE);
413             CHECKED_ALLOCZ(   s->pb2_buffer, PB_BUFFER_SIZE);
414         }
415         
416         if(s->msmpeg4_version){
417             CHECKED_ALLOCZ(s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int));
418         }
419         CHECKED_ALLOCZ(s->avctx->stats_out, 256);
420
421         /* Allocate MB type table */
422         CHECKED_ALLOCZ(s->mb_type  , mb_array_size * sizeof(uint8_t)) //needed for encoding
423     }
424         
425     CHECKED_ALLOCZ(s->error_status_table, mb_array_size*sizeof(uint8_t))
426     
427     if (s->out_format == FMT_H263 || s->encoding) {
428         int size;
429
430         /* MV prediction */
431         size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
432         CHECKED_ALLOCZ(s->motion_val, size * 2 * sizeof(int16_t));
433     }
434
435     if(s->codec_id==CODEC_ID_MPEG4){
436         /* interlaced direct mode decoding tables */
437         CHECKED_ALLOCZ(s->field_mv_table, mb_array_size*2*2 * sizeof(int16_t))
438         CHECKED_ALLOCZ(s->field_select_table, mb_array_size*2* sizeof(int8_t))
439     }
440     if (s->out_format == FMT_H263) {
441         /* ac values */
442         CHECKED_ALLOCZ(s->ac_val[0], yc_size * sizeof(int16_t) * 16);
443         s->ac_val[1] = s->ac_val[0] + y_size;
444         s->ac_val[2] = s->ac_val[1] + c_size;
445         
446         /* cbp values */
447         CHECKED_ALLOCZ(s->coded_block, y_size);
448         
449         /* divx501 bitstream reorder buffer */
450         CHECKED_ALLOCZ(s->bitstream_buffer, BITSTREAM_BUFFER_SIZE);
451
452         /* cbp, ac_pred, pred_dir */
453         CHECKED_ALLOCZ(s->cbp_table  , mb_array_size * sizeof(uint8_t))
454         CHECKED_ALLOCZ(s->pred_dir_table, mb_array_size * sizeof(uint8_t))
455     }
456     
457     if (s->h263_pred || s->h263_plus || !s->encoding) {
458         /* dc values */
459         //MN: we need these for error resilience of intra-frames
460         CHECKED_ALLOCZ(s->dc_val[0], yc_size * sizeof(int16_t));
461         s->dc_val[1] = s->dc_val[0] + y_size;
462         s->dc_val[2] = s->dc_val[1] + c_size;
463         for(i=0;i<yc_size;i++)
464             s->dc_val[0][i] = 1024;
465     }
466
467     /* which mb is a intra block */
468     CHECKED_ALLOCZ(s->mbintra_table, mb_array_size);
469     memset(s->mbintra_table, 1, mb_array_size);
470     
471     /* default structure is frame */
472     s->picture_structure = PICT_FRAME;
473     
474     /* init macroblock skip table */
475     CHECKED_ALLOCZ(s->mbskip_table, mb_array_size+2);
476     //Note the +1 is for a quicker mpeg4 slice_end detection
477     CHECKED_ALLOCZ(s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
478     
479     s->block= s->blocks[0];
480
481     s->parse_context.state= -1;
482
483     s->context_initialized = 1;
484     return 0;
485  fail:
486     MPV_common_end(s);
487     return -1;
488 }
489
490
491 //extern int sads;
492
493 /* init common structure for both encoder and decoder */
494 void MPV_common_end(MpegEncContext *s)
495 {
496     int i;
497
498     av_freep(&s->parse_context.buffer);
499     s->parse_context.buffer_size=0;
500
501     av_freep(&s->mb_type);
502     av_freep(&s->p_mv_table_base);
503     av_freep(&s->b_forw_mv_table_base);
504     av_freep(&s->b_back_mv_table_base);
505     av_freep(&s->b_bidir_forw_mv_table_base);
506     av_freep(&s->b_bidir_back_mv_table_base);
507     av_freep(&s->b_direct_mv_table_base);
508     s->p_mv_table= NULL;
509     s->b_forw_mv_table= NULL;
510     s->b_back_mv_table= NULL;
511     s->b_bidir_forw_mv_table= NULL;
512     s->b_bidir_back_mv_table= NULL;
513     s->b_direct_mv_table= NULL;
514     
515     av_freep(&s->motion_val);
516     av_freep(&s->dc_val[0]);
517     av_freep(&s->ac_val[0]);
518     av_freep(&s->coded_block);
519     av_freep(&s->mbintra_table);
520     av_freep(&s->cbp_table);
521     av_freep(&s->pred_dir_table);
522     av_freep(&s->me.scratchpad);
523     av_freep(&s->me.map);
524     av_freep(&s->me.score_map);
525     
526     av_freep(&s->mbskip_table);
527     av_freep(&s->prev_pict_types);
528     av_freep(&s->bitstream_buffer);
529     av_freep(&s->tex_pb_buffer);
530     av_freep(&s->pb2_buffer);
531     av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
532     av_freep(&s->field_mv_table);
533     av_freep(&s->field_select_table);
534     av_freep(&s->avctx->stats_out);
535     av_freep(&s->ac_stats);
536     av_freep(&s->error_status_table);
537     av_freep(&s->mb_index2xy);
538
539     for(i=0; i<MAX_PICTURE_COUNT; i++){
540         free_picture(s, &s->picture[i]);
541     }
542     avcodec_default_free_buffers(s->avctx);
543     s->context_initialized = 0;
544 }
545
546 #ifdef CONFIG_ENCODERS
547
548 /* init video encoder */
549 int MPV_encode_init(AVCodecContext *avctx)
550 {
551     MpegEncContext *s = avctx->priv_data;
552     int i, dummy;
553     int chroma_h_shift, chroma_v_shift;
554
555     avctx->pix_fmt = PIX_FMT_YUV420P; // FIXME
556
557     s->bit_rate = avctx->bit_rate;
558     s->bit_rate_tolerance = avctx->bit_rate_tolerance;
559     s->width = avctx->width;
560     s->height = avctx->height;
561     if(avctx->gop_size > 600){
562         fprintf(stderr, "Warning keyframe interval too large! reducing it ...\n");
563         avctx->gop_size=600;
564     }
565     s->gop_size = avctx->gop_size;
566     s->rtp_mode = avctx->rtp_mode;
567     s->rtp_payload_size = avctx->rtp_payload_size;
568     if (avctx->rtp_callback)
569         s->rtp_callback = avctx->rtp_callback;
570     s->max_qdiff= avctx->max_qdiff;
571     s->qcompress= avctx->qcompress;
572     s->qblur= avctx->qblur;
573     s->avctx = avctx;
574     s->flags= avctx->flags;
575     s->max_b_frames= avctx->max_b_frames;
576     s->b_frame_strategy= avctx->b_frame_strategy;
577     s->codec_id= avctx->codec->id;
578     s->luma_elim_threshold  = avctx->luma_elim_threshold;
579     s->chroma_elim_threshold= avctx->chroma_elim_threshold;
580     s->strict_std_compliance= avctx->strict_std_compliance;
581     s->data_partitioning= avctx->flags & CODEC_FLAG_PART;
582     s->quarter_sample= (avctx->flags & CODEC_FLAG_QPEL)!=0;
583     s->mpeg_quant= avctx->mpeg_quant;
584
585     if (s->gop_size <= 1) {
586         s->intra_only = 1;
587         s->gop_size = 12;
588     } else {
589         s->intra_only = 0;
590     }
591
592     s->me_method = avctx->me_method;
593
594     /* Fixed QSCALE */
595     s->fixed_qscale = (avctx->flags & CODEC_FLAG_QSCALE);
596     
597     s->adaptive_quant= (   s->avctx->lumi_masking
598                         || s->avctx->dark_masking
599                         || s->avctx->temporal_cplx_masking 
600                         || s->avctx->spatial_cplx_masking
601                         || s->avctx->p_masking)
602                        && !s->fixed_qscale;
603     
604     s->progressive_sequence= !(avctx->flags & CODEC_FLAG_INTERLACED_DCT);
605
606     if((s->flags & CODEC_FLAG_4MV) && s->codec_id != CODEC_ID_MPEG4){
607         fprintf(stderr, "4MV not supporetd by codec\n");
608         return -1;
609     }
610     
611     if(s->quarter_sample && s->codec_id != CODEC_ID_MPEG4){
612         fprintf(stderr, "qpel not supporetd by codec\n");
613         return -1;
614     }
615
616     if(s->data_partitioning && s->codec_id != CODEC_ID_MPEG4){
617         fprintf(stderr, "data partitioning not supporetd by codec\n");
618         return -1;
619     }
620     
621     if(s->max_b_frames && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG1VIDEO && s->codec_id != CODEC_ID_MPEG2VIDEO){
622         fprintf(stderr, "b frames not supporetd by codec\n");
623         return -1;
624     }
625     
626     if(s->mpeg_quant && s->codec_id != CODEC_ID_MPEG4){ //FIXME mpeg2 uses that too
627         fprintf(stderr, "mpeg2 style quantization not supporetd by codec\n");
628         return -1;
629     }
630         
631     if(s->codec_id==CODEC_ID_MJPEG){
632         s->intra_quant_bias= 1<<(QUANT_BIAS_SHIFT-1); //(a + x/2)/x
633         s->inter_quant_bias= 0;
634     }else if(s->mpeg_quant || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO){
635         s->intra_quant_bias= 3<<(QUANT_BIAS_SHIFT-3); //(a + x*3/8)/x
636         s->inter_quant_bias= 0;
637     }else{
638         s->intra_quant_bias=0;
639         s->inter_quant_bias=-(1<<(QUANT_BIAS_SHIFT-2)); //(a - x/4)/x
640     }
641     
642     if(avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
643         s->intra_quant_bias= avctx->intra_quant_bias;
644     if(avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
645         s->inter_quant_bias= avctx->inter_quant_bias;
646         
647     avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift);
648
649     av_reduce(&s->time_increment_resolution, &dummy, s->avctx->frame_rate, s->avctx->frame_rate_base, (1<<16)-1);
650     s->time_increment_bits = av_log2(s->time_increment_resolution - 1) + 1;
651
652     switch(avctx->codec->id) {
653     case CODEC_ID_MPEG1VIDEO:
654         s->out_format = FMT_MPEG1;
655         s->low_delay= 0; //s->max_b_frames ? 0 : 1;
656         avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
657         break;
658     case CODEC_ID_MPEG2VIDEO:
659         s->out_format = FMT_MPEG1;
660         s->low_delay= 0; //s->max_b_frames ? 0 : 1;
661         avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
662         s->rtp_mode= 1; // mpeg2 must have slices
663         if(s->rtp_payload_size == 0) s->rtp_payload_size= 256*256*256;
664         break;
665     case CODEC_ID_LJPEG:
666     case CODEC_ID_MJPEG:
667         s->out_format = FMT_MJPEG;
668         s->intra_only = 1; /* force intra only for jpeg */
669         s->mjpeg_write_tables = 1; /* write all tables */
670         s->mjpeg_data_only_frames = 0; /* write all the needed headers */
671         s->mjpeg_vsample[0] = 1<<chroma_v_shift;
672         s->mjpeg_vsample[1] = 1;
673         s->mjpeg_vsample[2] = 1; 
674         s->mjpeg_hsample[0] = 1<<chroma_h_shift;
675         s->mjpeg_hsample[1] = 1; 
676         s->mjpeg_hsample[2] = 1; 
677         if (mjpeg_init(s) < 0)
678             return -1;
679         avctx->delay=0;
680         s->low_delay=1;
681         break;
682 #ifdef CONFIG_RISKY
683     case CODEC_ID_H263:
684         if (h263_get_picture_format(s->width, s->height) == 7) {
685             printf("Input picture size isn't suitable for h263 codec! try h263+\n");
686             return -1;
687         }
688         s->out_format = FMT_H263;
689         avctx->delay=0;
690         s->low_delay=1;
691         break;
692     case CODEC_ID_H263P:
693         s->out_format = FMT_H263;
694         s->h263_plus = 1;
695         /* Fx */
696         s->unrestricted_mv=(avctx->flags & CODEC_FLAG_H263P_UMV) ? 1:0;
697         s->h263_aic= (avctx->flags & CODEC_FLAG_H263P_AIC) ? 1:0;
698         /* /Fx */
699         /* These are just to be sure */
700         s->umvplus = 1;
701         avctx->delay=0;
702         s->low_delay=1;
703         break;
704     case CODEC_ID_FLV1:
705         s->out_format = FMT_H263;
706         s->h263_flv = 2; /* format = 1; 11-bit codes */
707         s->unrestricted_mv = 1;
708         s->rtp_mode=0; /* don't allow GOB */
709         avctx->delay=0;
710         s->low_delay=1;
711         break;
712     case CODEC_ID_RV10:
713         s->out_format = FMT_H263;
714         s->h263_rv10 = 1;
715         avctx->delay=0;
716         s->low_delay=1;
717         break;
718     case CODEC_ID_MPEG4:
719         s->out_format = FMT_H263;
720         s->h263_pred = 1;
721         s->unrestricted_mv = 1;
722         s->low_delay= s->max_b_frames ? 0 : 1;
723         avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
724         break;
725     case CODEC_ID_MSMPEG4V1:
726         s->out_format = FMT_H263;
727         s->h263_msmpeg4 = 1;
728         s->h263_pred = 1;
729         s->unrestricted_mv = 1;
730         s->msmpeg4_version= 1;
731         avctx->delay=0;
732         s->low_delay=1;
733         break;
734     case CODEC_ID_MSMPEG4V2:
735         s->out_format = FMT_H263;
736         s->h263_msmpeg4 = 1;
737         s->h263_pred = 1;
738         s->unrestricted_mv = 1;
739         s->msmpeg4_version= 2;
740         avctx->delay=0;
741         s->low_delay=1;
742         break;
743     case CODEC_ID_MSMPEG4V3:
744         s->out_format = FMT_H263;
745         s->h263_msmpeg4 = 1;
746         s->h263_pred = 1;
747         s->unrestricted_mv = 1;
748         s->msmpeg4_version= 3;
749         s->flipflop_rounding=1;
750         avctx->delay=0;
751         s->low_delay=1;
752         break;
753     case CODEC_ID_WMV1:
754         s->out_format = FMT_H263;
755         s->h263_msmpeg4 = 1;
756         s->h263_pred = 1;
757         s->unrestricted_mv = 1;
758         s->msmpeg4_version= 4;
759         s->flipflop_rounding=1;
760         avctx->delay=0;
761         s->low_delay=1;
762         break;
763     case CODEC_ID_WMV2:
764         s->out_format = FMT_H263;
765         s->h263_msmpeg4 = 1;
766         s->h263_pred = 1;
767         s->unrestricted_mv = 1;
768         s->msmpeg4_version= 5;
769         s->flipflop_rounding=1;
770         avctx->delay=0;
771         s->low_delay=1;
772         break;
773 #endif
774     default:
775         return -1;
776     }
777     
778     { /* set up some save defaults, some codecs might override them later */
779         static int done=0;
780         if(!done){
781             int i;
782             done=1;
783
784             default_mv_penalty= av_mallocz( sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1) );
785             memset(default_mv_penalty, 0, sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1));
786             memset(default_fcode_tab , 0, sizeof(uint8_t)*(2*MAX_MV+1));
787
788             for(i=-16; i<16; i++){
789                 default_fcode_tab[i + MAX_MV]= 1;
790             }
791         }
792     }
793     s->me.mv_penalty= default_mv_penalty;
794     s->fcode_tab= default_fcode_tab;
795     s->y_dc_scale_table=
796     s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
797  
798     /* dont use mv_penalty table for crap MV as it would be confused */
799     //FIXME remove after fixing / removing old ME
800     if (s->me_method < ME_EPZS) s->me.mv_penalty = default_mv_penalty;
801
802     s->encoding = 1;
803
804     /* init */
805     if (MPV_common_init(s) < 0)
806         return -1;
807     
808     ff_init_me(s);
809
810 #ifdef CONFIG_ENCODERS
811 #ifdef CONFIG_RISKY
812     if (s->out_format == FMT_H263)
813         h263_encode_init(s);
814     if(s->msmpeg4_version)
815         ff_msmpeg4_encode_init(s);
816 #endif
817     if (s->out_format == FMT_MPEG1)
818         ff_mpeg1_encode_init(s);
819 #endif
820
821     /* init default q matrix */
822     for(i=0;i<64;i++) {
823         int j= s->dsp.idct_permutation[i];
824 #ifdef CONFIG_RISKY
825         if(s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){
826             s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
827             s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
828         }else if(s->out_format == FMT_H263){
829             s->intra_matrix[j] =
830             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
831         }else
832 #endif
833         { /* mpeg1/2 */
834             s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
835             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
836         }
837         if(s->avctx->intra_matrix)
838             s->intra_matrix[j] = s->avctx->intra_matrix[i];
839         if(s->avctx->inter_matrix)
840             s->inter_matrix[j] = s->avctx->inter_matrix[i];
841     }
842
843     /* precompute matrix */
844     /* for mjpeg, we do include qscale in the matrix */
845     if (s->out_format != FMT_MJPEG) {
846         convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16, s->q_intra_matrix16_bias, 
847                        s->intra_matrix, s->intra_quant_bias, 1, 31);
848         convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16, s->q_inter_matrix16_bias, 
849                        s->inter_matrix, s->inter_quant_bias, 1, 31);
850     }
851
852     if(ff_rate_control_init(s) < 0)
853         return -1;
854
855     s->picture_number = 0;
856     s->picture_in_gop_number = 0;
857     s->fake_picture_number = 0;
858     /* motion detector init */
859     s->f_code = 1;
860     s->b_code = 1;
861
862     return 0;
863 }
864
865 int MPV_encode_end(AVCodecContext *avctx)
866 {
867     MpegEncContext *s = avctx->priv_data;
868
869 #ifdef STATS
870     print_stats();
871 #endif
872
873     ff_rate_control_uninit(s);
874
875     MPV_common_end(s);
876     if (s->out_format == FMT_MJPEG)
877         mjpeg_close(s);
878         
879     av_freep(&avctx->extradata);
880       
881     return 0;
882 }
883
884 #endif //CONFIG_ENCODERS
885
886 void init_rl(RLTable *rl)
887 {
888     int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
889     uint8_t index_run[MAX_RUN+1];
890     int last, run, level, start, end, i;
891
892     /* compute max_level[], max_run[] and index_run[] */
893     for(last=0;last<2;last++) {
894         if (last == 0) {
895             start = 0;
896             end = rl->last;
897         } else {
898             start = rl->last;
899             end = rl->n;
900         }
901
902         memset(max_level, 0, MAX_RUN + 1);
903         memset(max_run, 0, MAX_LEVEL + 1);
904         memset(index_run, rl->n, MAX_RUN + 1);
905         for(i=start;i<end;i++) {
906             run = rl->table_run[i];
907             level = rl->table_level[i];
908             if (index_run[run] == rl->n)
909                 index_run[run] = i;
910             if (level > max_level[run])
911                 max_level[run] = level;
912             if (run > max_run[level])
913                 max_run[level] = run;
914         }
915         rl->max_level[last] = av_malloc(MAX_RUN + 1);
916         memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
917         rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
918         memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
919         rl->index_run[last] = av_malloc(MAX_RUN + 1);
920         memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
921     }
922 }
923
924 /* draw the edges of width 'w' of an image of size width, height */
925 //FIXME check that this is ok for mpeg4 interlaced
926 static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w)
927 {
928     uint8_t *ptr, *last_line;
929     int i;
930
931     last_line = buf + (height - 1) * wrap;
932     for(i=0;i<w;i++) {
933         /* top and bottom */
934         memcpy(buf - (i + 1) * wrap, buf, width);
935         memcpy(last_line + (i + 1) * wrap, last_line, width);
936     }
937     /* left and right */
938     ptr = buf;
939     for(i=0;i<height;i++) {
940         memset(ptr - w, ptr[0], w);
941         memset(ptr + width, ptr[width-1], w);
942         ptr += wrap;
943     }
944     /* corners */
945     for(i=0;i<w;i++) {
946         memset(buf - (i + 1) * wrap - w, buf[0], w); /* top left */
947         memset(buf - (i + 1) * wrap + width, buf[width-1], w); /* top right */
948         memset(last_line + (i + 1) * wrap - w, last_line[0], w); /* top left */
949         memset(last_line + (i + 1) * wrap + width, last_line[width-1], w); /* top right */
950     }
951 }
952
953 static int find_unused_picture(MpegEncContext *s, int shared){
954     int i;
955     
956     if(shared){
957         for(i=0; i<MAX_PICTURE_COUNT; i++){
958             if(s->picture[i].data[0]==NULL && s->picture[i].type==0) break;
959         }
960     }else{
961         for(i=0; i<MAX_PICTURE_COUNT; i++){
962             if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) break; //FIXME
963         }
964         for(i=0; i<MAX_PICTURE_COUNT; i++){
965             if(s->picture[i].data[0]==NULL) break;
966         }
967     }
968
969     assert(i<MAX_PICTURE_COUNT);
970     return i;
971 }
972
973 /* generic function for encode/decode called before a frame is coded/decoded */
974 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
975 {
976     int i;
977     AVFrame *pic;
978
979     s->mb_skiped = 0;
980
981     assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
982
983     /* mark&release old frames */
984     if (s->pict_type != B_TYPE && s->last_picture_ptr && s->last_picture_ptr->data[0]) {
985         avctx->release_buffer(avctx, (AVFrame*)s->last_picture_ptr);
986
987         /* release forgotten pictures */
988         /* if(mpeg124/h263) */
989         if(!s->encoding){
990             for(i=0; i<MAX_PICTURE_COUNT; i++){
991                 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
992                     fprintf(stderr, "releasing zombie picture\n");
993                     avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);                
994                 }
995             }
996         }
997     }
998 alloc:
999     if(!s->encoding){
1000         /* release non refernce frames */
1001         for(i=0; i<MAX_PICTURE_COUNT; i++){
1002             if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1003                 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
1004             }
1005         }
1006
1007         i= find_unused_picture(s, 0);
1008     
1009         pic= (AVFrame*)&s->picture[i];
1010         pic->reference= s->pict_type != B_TYPE ? 3 : 0;
1011
1012         if(s->current_picture_ptr)
1013             pic->coded_picture_number= s->current_picture_ptr->coded_picture_number+1;
1014         
1015         if( alloc_picture(s, (Picture*)pic, 0) < 0)
1016             return -1;
1017
1018         s->current_picture_ptr= &s->picture[i];
1019     }
1020
1021     s->current_picture_ptr->pict_type= s->pict_type;
1022     s->current_picture_ptr->quality= s->qscale;
1023     s->current_picture_ptr->key_frame= s->pict_type == I_TYPE;
1024
1025     s->current_picture= *s->current_picture_ptr;
1026   
1027   if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
1028     if (s->pict_type != B_TYPE) {
1029         s->last_picture_ptr= s->next_picture_ptr;
1030         s->next_picture_ptr= s->current_picture_ptr;
1031     }
1032     
1033     if(s->last_picture_ptr) s->last_picture= *s->last_picture_ptr;
1034     if(s->next_picture_ptr) s->next_picture= *s->next_picture_ptr;
1035     if(s->new_picture_ptr ) s->new_picture = *s->new_picture_ptr;
1036     
1037     if(s->pict_type != I_TYPE && s->last_picture_ptr==NULL){
1038         fprintf(stderr, "warning: first frame is no keyframe\n");
1039         assert(s->pict_type != B_TYPE); //these should have been dropped if we dont have a reference
1040         goto alloc;
1041     }
1042
1043     assert(s->pict_type == I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
1044
1045     if(s->picture_structure!=PICT_FRAME){
1046         int i;
1047         for(i=0; i<4; i++){
1048             if(s->picture_structure == PICT_BOTTOM_FIELD){
1049                  s->current_picture.data[i] += s->current_picture.linesize[i];
1050             } 
1051             s->current_picture.linesize[i] *= 2;
1052             s->last_picture.linesize[i] *=2;
1053             s->next_picture.linesize[i] *=2;
1054         }
1055     }
1056   }
1057    
1058     s->hurry_up= s->avctx->hurry_up;
1059     s->error_resilience= avctx->error_resilience;
1060
1061     /* set dequantizer, we cant do it during init as it might change for mpeg4
1062        and we cant do it in the header decode as init isnt called for mpeg4 there yet */
1063     if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO) 
1064         s->dct_unquantize = s->dct_unquantize_mpeg2;
1065     else if(s->out_format == FMT_H263)
1066         s->dct_unquantize = s->dct_unquantize_h263;
1067     else 
1068         s->dct_unquantize = s->dct_unquantize_mpeg1;
1069
1070 #ifdef HAVE_XVMC
1071     if(s->avctx->xvmc_acceleration)
1072         return XVMC_field_start(s, avctx);
1073 #endif
1074     return 0;
1075 }
1076
1077 /* generic function for encode/decode called after a frame has been coded/decoded */
1078 void MPV_frame_end(MpegEncContext *s)
1079 {
1080     int i;
1081     /* draw edge for correct motion prediction if outside */
1082 #ifdef HAVE_XVMC
1083 //just to make sure that all data is rendered.
1084     if(s->avctx->xvmc_acceleration){
1085         XVMC_field_end(s);
1086     }else
1087 #endif
1088     if(s->codec_id!=CODEC_ID_SVQ1 && s->out_format != FMT_MPEG1){
1089         if (s->pict_type != B_TYPE && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1090             draw_edges(s->current_picture.data[0], s->linesize  , s->h_edge_pos   , s->v_edge_pos   , EDGE_WIDTH  );
1091             draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
1092             draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
1093         }
1094     }
1095     emms_c();
1096     
1097     s->last_pict_type    = s->pict_type;
1098     if(s->pict_type!=B_TYPE){
1099         s->last_non_b_pict_type= s->pict_type;
1100     }
1101 #if 0
1102         /* copy back current_picture variables */
1103     for(i=0; i<MAX_PICTURE_COUNT; i++){
1104         if(s->picture[i].data[0] == s->current_picture.data[0]){
1105             s->picture[i]= s->current_picture;
1106             break;
1107         }    
1108     }
1109     assert(i<MAX_PICTURE_COUNT);
1110 #endif    
1111
1112     if(s->encoding){
1113         /* release non refernce frames */
1114         for(i=0; i<MAX_PICTURE_COUNT; i++){
1115             if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1116                 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
1117             }
1118         }
1119     }
1120     // clear copies, to avoid confusion
1121 #if 0
1122     memset(&s->last_picture, 0, sizeof(Picture));
1123     memset(&s->next_picture, 0, sizeof(Picture));
1124     memset(&s->current_picture, 0, sizeof(Picture));
1125 #endif
1126 }
1127
1128 /**
1129  * draws an line from (ex, ey) -> (sx, sy).
1130  * @param w width of the image
1131  * @param h height of the image
1132  * @param stride stride/linesize of the image
1133  * @param color color of the arrow
1134  */
1135 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1136     int t, x, y, f;
1137     
1138     sx= clip(sx, 0, w-1);
1139     sy= clip(sy, 0, h-1);
1140     ex= clip(ex, 0, w-1);
1141     ey= clip(ey, 0, h-1);
1142     
1143     buf[sy*stride + sx]+= color;
1144     
1145     if(ABS(ex - sx) > ABS(ey - sy)){
1146         if(sx > ex){
1147             t=sx; sx=ex; ex=t;
1148             t=sy; sy=ey; ey=t;
1149         }
1150         buf+= sx + sy*stride;
1151         ex-= sx;
1152         f= ((ey-sy)<<16)/ex;
1153         for(x= 0; x <= ex; x++){
1154             y= ((x*f) + (1<<15))>>16;
1155             buf[y*stride + x]+= color;
1156         }
1157     }else{
1158         if(sy > ey){
1159             t=sx; sx=ex; ex=t;
1160             t=sy; sy=ey; ey=t;
1161         }
1162         buf+= sx + sy*stride;
1163         ey-= sy;
1164         if(ey) f= ((ex-sx)<<16)/ey;
1165         else   f= 0;
1166         for(y= 0; y <= ey; y++){
1167             x= ((y*f) + (1<<15))>>16;
1168             buf[y*stride + x]+= color;
1169         }
1170     }
1171 }
1172
1173 /**
1174  * draws an arrow from (ex, ey) -> (sx, sy).
1175  * @param w width of the image
1176  * @param h height of the image
1177  * @param stride stride/linesize of the image
1178  * @param color color of the arrow
1179  */
1180 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){ 
1181     int dx,dy;
1182
1183     sx= clip(sx, -100, w+100);
1184     sy= clip(sy, -100, h+100);
1185     ex= clip(ex, -100, w+100);
1186     ey= clip(ey, -100, h+100);
1187     
1188     dx= ex - sx;
1189     dy= ey - sy;
1190     
1191     if(dx*dx + dy*dy > 3*3){
1192         int rx=  dx + dy;
1193         int ry= -dx + dy;
1194         int length= ff_sqrt((rx*rx + ry*ry)<<8);
1195         
1196         //FIXME subpixel accuracy
1197         rx= ROUNDED_DIV(rx*3<<4, length);
1198         ry= ROUNDED_DIV(ry*3<<4, length);
1199         
1200         draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1201         draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1202     }
1203     draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1204 }
1205
1206 /**
1207  * prints debuging info for the given picture.
1208  */
1209 void ff_print_debug_info(MpegEncContext *s, Picture *pict){
1210
1211     if(!pict || !pict->mb_type) return;
1212
1213     if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1214         int x,y;
1215
1216         for(y=0; y<s->mb_height; y++){
1217             for(x=0; x<s->mb_width; x++){
1218                 if(s->avctx->debug&FF_DEBUG_SKIP){
1219                     int count= s->mbskip_table[x + y*s->mb_stride];
1220                     if(count>9) count=9;
1221                     printf("%1d", count);
1222                 }
1223                 if(s->avctx->debug&FF_DEBUG_QP){
1224                     printf("%2d", pict->qscale_table[x + y*s->mb_stride]);
1225                 }
1226                 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1227                     int mb_type= pict->mb_type[x + y*s->mb_stride];
1228                     
1229                     //Type & MV direction
1230                     if(IS_PCM(mb_type))
1231                         printf("P");
1232                     else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1233                         printf("A");
1234                     else if(IS_INTRA4x4(mb_type))
1235                         printf("i");
1236                     else if(IS_INTRA16x16(mb_type))
1237                         printf("I");
1238                     else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1239                         printf("d");
1240                     else if(IS_DIRECT(mb_type))
1241                         printf("D");
1242                     else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1243                         printf("g");
1244                     else if(IS_GMC(mb_type))
1245                         printf("G");
1246                     else if(IS_SKIP(mb_type))
1247                         printf("S");
1248                     else if(!USES_LIST(mb_type, 1))
1249                         printf(">");
1250                     else if(!USES_LIST(mb_type, 0))
1251                         printf("<");
1252                     else{
1253                         assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1254                         printf("X");
1255                     }
1256                     
1257                     //segmentation
1258                     if(IS_8X8(mb_type))
1259                         printf("+");
1260                     else if(IS_16X8(mb_type))
1261                         printf("-");
1262                     else if(IS_8X16(mb_type))
1263                         printf("¦");
1264                     else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1265                         printf(" ");
1266                     else
1267                         printf("?");
1268                     
1269                         
1270                     if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264)
1271                         printf("=");
1272                     else
1273                         printf(" ");
1274                 }
1275 //                printf(" ");
1276             }
1277             printf("\n");
1278         }
1279     }
1280     
1281     if((s->avctx->debug&FF_DEBUG_VIS_MV) && s->motion_val){
1282         const int shift= 1 + s->quarter_sample;
1283         int mb_y;
1284         uint8_t *ptr= pict->data[0];
1285         s->low_delay=0; //needed to see the vectors without trashing the buffers
1286
1287         for(mb_y=0; mb_y<s->mb_height; mb_y++){
1288             int mb_x;
1289             for(mb_x=0; mb_x<s->mb_width; mb_x++){
1290                 const int mb_index= mb_x + mb_y*s->mb_stride;
1291                 if(IS_8X8(s->current_picture.mb_type[mb_index])){
1292                     int i;
1293                     for(i=0; i<4; i++){
1294                         int sx= mb_x*16 + 4 + 8*(i&1);
1295                         int sy= mb_y*16 + 4 + 8*(i>>1);
1296                         int xy= 1 + mb_x*2 + (i&1) + (mb_y*2 + 1 + (i>>1))*(s->mb_width*2 + 2);
1297                         int mx= (s->motion_val[xy][0]>>shift) + sx;
1298                         int my= (s->motion_val[xy][1]>>shift) + sy;
1299                         draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100);
1300                     }
1301                 }else{
1302                     int sx= mb_x*16 + 8;
1303                     int sy= mb_y*16 + 8;
1304                     int xy= 1 + mb_x*2 + (mb_y*2 + 1)*(s->mb_width*2 + 2);
1305                     int mx= (s->motion_val[xy][0]>>shift) + sx;
1306                     int my= (s->motion_val[xy][1]>>shift) + sy;
1307                     draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100);
1308                 }
1309                 s->mbskip_table[mb_index]=0;
1310             }
1311         }
1312     }
1313 }
1314
1315 #ifdef CONFIG_ENCODERS
1316
1317 static int get_sae(uint8_t *src, int ref, int stride){
1318     int x,y;
1319     int acc=0;
1320     
1321     for(y=0; y<16; y++){
1322         for(x=0; x<16; x++){
1323             acc+= ABS(src[x+y*stride] - ref);
1324         }
1325     }
1326     
1327     return acc;
1328 }
1329
1330 static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride){
1331     int x, y, w, h;
1332     int acc=0;
1333     
1334     w= s->width &~15;
1335     h= s->height&~15;
1336     
1337     for(y=0; y<h; y+=16){
1338         for(x=0; x<w; x+=16){
1339             int offset= x + y*stride;
1340             int sad = s->dsp.pix_abs16x16(src + offset, ref + offset, stride);
1341             int mean= (s->dsp.pix_sum(src + offset, stride) + 128)>>8;
1342             int sae = get_sae(src + offset, mean, stride);
1343             
1344             acc+= sae + 500 < sad;
1345         }
1346     }
1347     return acc;
1348 }
1349
1350
1351 static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
1352     AVFrame *pic=NULL;
1353     int i;
1354     const int encoding_delay= s->max_b_frames;
1355     int direct=1;
1356     
1357   if(pic_arg){
1358     if(encoding_delay && !(s->flags&CODEC_FLAG_INPUT_PRESERVED)) direct=0;
1359     if(pic_arg->linesize[0] != s->linesize) direct=0;
1360     if(pic_arg->linesize[1] != s->uvlinesize) direct=0;
1361     if(pic_arg->linesize[2] != s->uvlinesize) direct=0;
1362   
1363 //    printf("%d %d %d %d\n",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize);
1364     
1365     if(direct){
1366         i= find_unused_picture(s, 1);
1367
1368         pic= (AVFrame*)&s->picture[i];
1369         pic->reference= 3;
1370     
1371         for(i=0; i<4; i++){
1372             pic->data[i]= pic_arg->data[i];
1373             pic->linesize[i]= pic_arg->linesize[i];
1374         }
1375         alloc_picture(s, (Picture*)pic, 1);
1376     }else{
1377         i= find_unused_picture(s, 0);
1378
1379         pic= (AVFrame*)&s->picture[i];
1380         pic->reference= 3;
1381
1382         alloc_picture(s, (Picture*)pic, 0);
1383         for(i=0; i<4; i++){
1384             /* the input will be 16 pixels to the right relative to the actual buffer start
1385              * and the current_pic, so the buffer can be reused, yes its not beatifull 
1386              */
1387             pic->data[i]+= 16; 
1388         }
1389
1390         if(   pic->data[0] == pic_arg->data[0] 
1391            && pic->data[1] == pic_arg->data[1]
1392            && pic->data[2] == pic_arg->data[2]){
1393        // empty
1394         }else{
1395             int h_chroma_shift, v_chroma_shift;
1396             avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1397         
1398             for(i=0; i<3; i++){
1399                 int src_stride= pic_arg->linesize[i];
1400                 int dst_stride= i ? s->uvlinesize : s->linesize;
1401                 int h_shift= i ? h_chroma_shift : 0;
1402                 int v_shift= i ? v_chroma_shift : 0;
1403                 int w= s->width >>h_shift;
1404                 int h= s->height>>v_shift;
1405                 uint8_t *src= pic_arg->data[i];
1406                 uint8_t *dst= pic->data[i];
1407             
1408                 if(src_stride==dst_stride)
1409                     memcpy(dst, src, src_stride*h);
1410                 else{
1411                     while(h--){
1412                         memcpy(dst, src, w);
1413                         dst += dst_stride;
1414                         src += src_stride;
1415                     }
1416                 }
1417             }
1418         }
1419     }
1420     pic->quality= pic_arg->quality;
1421     pic->pict_type= pic_arg->pict_type;
1422     pic->pts = pic_arg->pts;
1423     
1424     if(s->input_picture[encoding_delay])
1425         pic->display_picture_number= s->input_picture[encoding_delay]->display_picture_number + 1;
1426     
1427   }
1428
1429     /* shift buffer entries */
1430     for(i=1; i<MAX_PICTURE_COUNT /*s->encoding_delay+1*/; i++)
1431         s->input_picture[i-1]= s->input_picture[i];
1432         
1433     s->input_picture[encoding_delay]= (Picture*)pic;
1434
1435     return 0;
1436 }
1437
1438 static void select_input_picture(MpegEncContext *s){
1439     int i;
1440     int coded_pic_num=0;    
1441
1442     if(s->reordered_input_picture[0])
1443         coded_pic_num= s->reordered_input_picture[0]->coded_picture_number + 1;
1444
1445     for(i=1; i<MAX_PICTURE_COUNT; i++)
1446         s->reordered_input_picture[i-1]= s->reordered_input_picture[i];
1447     s->reordered_input_picture[MAX_PICTURE_COUNT-1]= NULL;
1448
1449     /* set next picture types & ordering */
1450     if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){
1451         if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){
1452             s->reordered_input_picture[0]= s->input_picture[0];
1453             s->reordered_input_picture[0]->pict_type= I_TYPE;
1454             s->reordered_input_picture[0]->coded_picture_number= coded_pic_num;
1455         }else{
1456             int b_frames;
1457             
1458             if(s->flags&CODEC_FLAG_PASS2){
1459                 for(i=0; i<s->max_b_frames+1; i++){
1460                     int pict_num= s->input_picture[0]->display_picture_number + i;
1461                     int pict_type= s->rc_context.entry[pict_num].new_pict_type;
1462                     s->input_picture[i]->pict_type= pict_type;
1463                     
1464                     if(i + 1 >= s->rc_context.num_entries) break;
1465                 }
1466             }
1467
1468             if(s->input_picture[0]->pict_type){
1469                 /* user selected pict_type */
1470                 for(b_frames=0; b_frames<s->max_b_frames+1; b_frames++){
1471                     if(s->input_picture[b_frames]->pict_type!=B_TYPE) break;
1472                 }
1473             
1474                 if(b_frames > s->max_b_frames){
1475                     fprintf(stderr, "warning, too many bframes in a row\n");
1476                     b_frames = s->max_b_frames;
1477                 }
1478             }else if(s->b_frame_strategy==0){
1479                 b_frames= s->max_b_frames;
1480                 while(b_frames && !s->input_picture[b_frames]) b_frames--;
1481             }else if(s->b_frame_strategy==1){
1482                 for(i=1; i<s->max_b_frames+1; i++){
1483                     if(s->input_picture[i] && s->input_picture[i]->b_frame_score==0){
1484                         s->input_picture[i]->b_frame_score= 
1485                             get_intra_count(s, s->input_picture[i  ]->data[0], 
1486                                                s->input_picture[i-1]->data[0], s->linesize) + 1;
1487                     }
1488                 }
1489                 for(i=0; i<s->max_b_frames; i++){
1490                     if(s->input_picture[i]==NULL || s->input_picture[i]->b_frame_score - 1 > s->mb_num/40) break;
1491                 }
1492                                 
1493                 b_frames= FFMAX(0, i-1);
1494                 
1495                 /* reset scores */
1496                 for(i=0; i<b_frames+1; i++){
1497                     s->input_picture[i]->b_frame_score=0;
1498                 }
1499             }else{
1500                 fprintf(stderr, "illegal b frame strategy\n");
1501                 b_frames=0;
1502             }
1503
1504             emms_c();
1505 //static int b_count=0;
1506 //b_count+= b_frames;
1507 //printf("b_frames: %d\n", b_count);
1508                         
1509             s->reordered_input_picture[0]= s->input_picture[b_frames];
1510             if(   s->picture_in_gop_number + b_frames >= s->gop_size 
1511                || s->reordered_input_picture[0]->pict_type== I_TYPE)
1512                 s->reordered_input_picture[0]->pict_type= I_TYPE;
1513             else
1514                 s->reordered_input_picture[0]->pict_type= P_TYPE;
1515             s->reordered_input_picture[0]->coded_picture_number= coded_pic_num;
1516             for(i=0; i<b_frames; i++){
1517                 coded_pic_num++;
1518                 s->reordered_input_picture[i+1]= s->input_picture[i];
1519                 s->reordered_input_picture[i+1]->pict_type= B_TYPE;
1520                 s->reordered_input_picture[i+1]->coded_picture_number= coded_pic_num;
1521             }
1522         }
1523     }
1524     
1525     if(s->reordered_input_picture[0]){
1526         s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE ? 3 : 0;
1527
1528         s->new_picture= *s->reordered_input_picture[0];
1529
1530         if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED){
1531             // input is a shared pix, so we cant modifiy it -> alloc a new one & ensure that the shared one is reuseable
1532         
1533             int i= find_unused_picture(s, 0);
1534             Picture *pic= &s->picture[i];
1535
1536             /* mark us unused / free shared pic */
1537             for(i=0; i<4; i++)
1538                 s->reordered_input_picture[0]->data[i]= NULL;
1539             s->reordered_input_picture[0]->type= 0;
1540             
1541             //FIXME bad, copy * except
1542             pic->pict_type = s->reordered_input_picture[0]->pict_type;
1543             pic->quality   = s->reordered_input_picture[0]->quality;
1544             pic->coded_picture_number = s->reordered_input_picture[0]->coded_picture_number;
1545             pic->reference = s->reordered_input_picture[0]->reference;
1546             pic->pts = s->reordered_input_picture[0]->pts;
1547             
1548             alloc_picture(s, pic, 0);
1549
1550             s->current_picture_ptr= pic;
1551         }else{
1552             // input is not a shared pix -> reuse buffer for current_pix
1553
1554             assert(   s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_USER 
1555                    || s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL);
1556             
1557             s->current_picture_ptr= s->reordered_input_picture[0];
1558             for(i=0; i<4; i++){
1559                 //reverse the +16 we did before storing the input
1560                 s->current_picture_ptr->data[i]-=16;
1561             }
1562         }
1563         s->current_picture= *s->current_picture_ptr;
1564     
1565         s->picture_number= s->new_picture.display_picture_number;
1566 //printf("dpn:%d\n", s->picture_number);
1567     }else{
1568        memset(&s->new_picture, 0, sizeof(Picture));
1569     }
1570 }
1571
1572 int MPV_encode_picture(AVCodecContext *avctx,
1573                        unsigned char *buf, int buf_size, void *data)
1574 {
1575     MpegEncContext *s = avctx->priv_data;
1576     AVFrame *pic_arg = data;
1577     int i;
1578
1579     if(avctx->pix_fmt != PIX_FMT_YUV420P){
1580         fprintf(stderr, "this codec supports only YUV420P\n");
1581         return -1;
1582     }
1583     
1584     init_put_bits(&s->pb, buf, buf_size, NULL, NULL);
1585
1586     s->picture_in_gop_number++;
1587
1588     load_input_picture(s, pic_arg);
1589     
1590     select_input_picture(s);
1591     
1592     /* output? */
1593     if(s->new_picture.data[0]){
1594
1595         s->pict_type= s->new_picture.pict_type;
1596         if (s->fixed_qscale){ /* the ratecontrol needs the last qscale so we dont touch it for CBR */
1597             s->qscale= (int)(s->new_picture.quality+0.5);
1598             assert(s->qscale);
1599         }
1600 //emms_c();
1601 //printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale);
1602         MPV_frame_start(s, avctx);
1603
1604         encode_picture(s, s->picture_number);
1605         
1606         avctx->real_pict_num  = s->picture_number;
1607         avctx->header_bits = s->header_bits;
1608         avctx->mv_bits     = s->mv_bits;
1609         avctx->misc_bits   = s->misc_bits;
1610         avctx->i_tex_bits  = s->i_tex_bits;
1611         avctx->p_tex_bits  = s->p_tex_bits;
1612         avctx->i_count     = s->i_count;
1613         avctx->p_count     = s->mb_num - s->i_count - s->skip_count; //FIXME f/b_count in avctx
1614         avctx->skip_count  = s->skip_count;
1615
1616         MPV_frame_end(s);
1617
1618         if (s->out_format == FMT_MJPEG)
1619             mjpeg_picture_trailer(s);
1620         
1621         if(s->flags&CODEC_FLAG_PASS1)
1622             ff_write_pass1_stats(s);
1623
1624         for(i=0; i<4; i++){
1625             avctx->error[i] += s->current_picture_ptr->error[i];
1626         }
1627     }
1628
1629     s->input_picture_number++;
1630
1631     flush_put_bits(&s->pb);
1632     s->frame_bits  = (pbBufPtr(&s->pb) - s->pb.buf) * 8;
1633     
1634     s->total_bits += s->frame_bits;
1635     avctx->frame_bits  = s->frame_bits;
1636     
1637     return pbBufPtr(&s->pb) - s->pb.buf;
1638 }
1639
1640 #endif //CONFIG_ENCODERS
1641
1642 static inline void gmc1_motion(MpegEncContext *s,
1643                                uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1644                                int dest_offset,
1645                                uint8_t **ref_picture, int src_offset)
1646 {
1647     uint8_t *ptr;
1648     int offset, src_x, src_y, linesize, uvlinesize;
1649     int motion_x, motion_y;
1650     int emu=0;
1651
1652     motion_x= s->sprite_offset[0][0];
1653     motion_y= s->sprite_offset[0][1];
1654     src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy+1));
1655     src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy+1));
1656     motion_x<<=(3-s->sprite_warping_accuracy);
1657     motion_y<<=(3-s->sprite_warping_accuracy);
1658     src_x = clip(src_x, -16, s->width);
1659     if (src_x == s->width)
1660         motion_x =0;
1661     src_y = clip(src_y, -16, s->height);
1662     if (src_y == s->height)
1663         motion_y =0;
1664
1665     linesize = s->linesize;
1666     uvlinesize = s->uvlinesize;
1667     
1668     ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset;
1669
1670     dest_y+=dest_offset;
1671     if(s->flags&CODEC_FLAG_EMU_EDGE){
1672         if(src_x<0 || src_y<0 || src_x + 17 >= s->h_edge_pos
1673                               || src_y + 17 >= s->v_edge_pos){
1674             ff_emulated_edge_mc(s->edge_emu_buffer, ptr, linesize, 17, 17, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
1675             ptr= s->edge_emu_buffer;
1676         }
1677     }
1678     
1679     if((motion_x|motion_y)&7){
1680         s->dsp.gmc1(dest_y  , ptr  , linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
1681         s->dsp.gmc1(dest_y+8, ptr+8, linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
1682     }else{
1683         int dxy;
1684         
1685         dxy= ((motion_x>>3)&1) | ((motion_y>>2)&2);
1686         if (s->no_rounding){
1687             s->dsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
1688         }else{
1689             s->dsp.put_pixels_tab       [0][dxy](dest_y, ptr, linesize, 16);
1690         }
1691     }
1692     
1693     if(s->flags&CODEC_FLAG_GRAY) return;
1694
1695     motion_x= s->sprite_offset[1][0];
1696     motion_y= s->sprite_offset[1][1];
1697     src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy+1));
1698     src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy+1));
1699     motion_x<<=(3-s->sprite_warping_accuracy);
1700     motion_y<<=(3-s->sprite_warping_accuracy);
1701     src_x = clip(src_x, -8, s->width>>1);
1702     if (src_x == s->width>>1)
1703         motion_x =0;
1704     src_y = clip(src_y, -8, s->height>>1);
1705     if (src_y == s->height>>1)
1706         motion_y =0;
1707
1708     offset = (src_y * uvlinesize) + src_x + (src_offset>>1);
1709     ptr = ref_picture[1] + offset;
1710     if(s->flags&CODEC_FLAG_EMU_EDGE){
1711         if(src_x<0 || src_y<0 || src_x + 9 >= s->h_edge_pos>>1
1712                               || src_y + 9 >= s->v_edge_pos>>1){
1713             ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1714             ptr= s->edge_emu_buffer;
1715             emu=1;
1716         }
1717     }
1718     s->dsp.gmc1(dest_cb + (dest_offset>>1), ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
1719     
1720     ptr = ref_picture[2] + offset;
1721     if(emu){
1722         ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1723         ptr= s->edge_emu_buffer;
1724     }
1725     s->dsp.gmc1(dest_cr + (dest_offset>>1), ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
1726     
1727     return;
1728 }
1729
1730 static inline void gmc_motion(MpegEncContext *s,
1731                                uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1732                                int dest_offset,
1733                                uint8_t **ref_picture, int src_offset)
1734 {
1735     uint8_t *ptr;
1736     int linesize, uvlinesize;
1737     const int a= s->sprite_warping_accuracy;
1738     int ox, oy;
1739
1740     linesize = s->linesize;
1741     uvlinesize = s->uvlinesize;
1742
1743     ptr = ref_picture[0] + src_offset;
1744
1745     dest_y+=dest_offset;
1746     
1747     ox= s->sprite_offset[0][0] + s->sprite_delta[0][0]*s->mb_x*16 + s->sprite_delta[0][1]*s->mb_y*16;
1748     oy= s->sprite_offset[0][1] + s->sprite_delta[1][0]*s->mb_x*16 + s->sprite_delta[1][1]*s->mb_y*16;
1749
1750     s->dsp.gmc(dest_y, ptr, linesize, 16,
1751            ox, 
1752            oy, 
1753            s->sprite_delta[0][0], s->sprite_delta[0][1],
1754            s->sprite_delta[1][0], s->sprite_delta[1][1], 
1755            a+1, (1<<(2*a+1)) - s->no_rounding,
1756            s->h_edge_pos, s->v_edge_pos);
1757     s->dsp.gmc(dest_y+8, ptr, linesize, 16,
1758            ox + s->sprite_delta[0][0]*8, 
1759            oy + s->sprite_delta[1][0]*8, 
1760            s->sprite_delta[0][0], s->sprite_delta[0][1],
1761            s->sprite_delta[1][0], s->sprite_delta[1][1], 
1762            a+1, (1<<(2*a+1)) - s->no_rounding,
1763            s->h_edge_pos, s->v_edge_pos);
1764
1765     if(s->flags&CODEC_FLAG_GRAY) return;
1766
1767
1768     dest_cb+=dest_offset>>1;
1769     dest_cr+=dest_offset>>1;
1770     
1771     ox= s->sprite_offset[1][0] + s->sprite_delta[0][0]*s->mb_x*8 + s->sprite_delta[0][1]*s->mb_y*8;
1772     oy= s->sprite_offset[1][1] + s->sprite_delta[1][0]*s->mb_x*8 + s->sprite_delta[1][1]*s->mb_y*8;
1773
1774     ptr = ref_picture[1] + (src_offset>>1);
1775     s->dsp.gmc(dest_cb, ptr, uvlinesize, 8,
1776            ox, 
1777            oy, 
1778            s->sprite_delta[0][0], s->sprite_delta[0][1],
1779            s->sprite_delta[1][0], s->sprite_delta[1][1], 
1780            a+1, (1<<(2*a+1)) - s->no_rounding,
1781            s->h_edge_pos>>1, s->v_edge_pos>>1);
1782     
1783     ptr = ref_picture[2] + (src_offset>>1);
1784     s->dsp.gmc(dest_cr, ptr, uvlinesize, 8,
1785            ox, 
1786            oy, 
1787            s->sprite_delta[0][0], s->sprite_delta[0][1],
1788            s->sprite_delta[1][0], s->sprite_delta[1][1], 
1789            a+1, (1<<(2*a+1)) - s->no_rounding,
1790            s->h_edge_pos>>1, s->v_edge_pos>>1);
1791 }
1792
1793 /**
1794  * Copies a rectangular area of samples to a temporary buffer and replicates the boarder samples.
1795  * @param buf destination buffer
1796  * @param src source buffer
1797  * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers
1798  * @param block_w width of block
1799  * @param block_h height of block
1800  * @param src_x x coordinate of the top left sample of the block in the source buffer
1801  * @param src_y y coordinate of the top left sample of the block in the source buffer
1802  * @param w width of the source buffer
1803  * @param h height of the source buffer
1804  */
1805 void ff_emulated_edge_mc(uint8_t *buf, uint8_t *src, int linesize, int block_w, int block_h, 
1806                                     int src_x, int src_y, int w, int h){
1807     int x, y;
1808     int start_y, start_x, end_y, end_x;
1809
1810     if(src_y>= h){
1811         src+= (h-1-src_y)*linesize;
1812         src_y=h-1;
1813     }else if(src_y<=-block_h){
1814         src+= (1-block_h-src_y)*linesize;
1815         src_y=1-block_h;
1816     }
1817     if(src_x>= w){
1818         src+= (w-1-src_x);
1819         src_x=w-1;
1820     }else if(src_x<=-block_w){
1821         src+= (1-block_w-src_x);
1822         src_x=1-block_w;
1823     }
1824
1825     start_y= FFMAX(0, -src_y);
1826     start_x= FFMAX(0, -src_x);
1827     end_y= FFMIN(block_h, h-src_y);
1828     end_x= FFMIN(block_w, w-src_x);
1829
1830     // copy existing part
1831     for(y=start_y; y<end_y; y++){
1832         for(x=start_x; x<end_x; x++){
1833             buf[x + y*linesize]= src[x + y*linesize];
1834         }
1835     }
1836
1837     //top
1838     for(y=0; y<start_y; y++){
1839         for(x=start_x; x<end_x; x++){
1840             buf[x + y*linesize]= buf[x + start_y*linesize];
1841         }
1842     }
1843
1844     //bottom
1845     for(y=end_y; y<block_h; y++){
1846         for(x=start_x; x<end_x; x++){
1847             buf[x + y*linesize]= buf[x + (end_y-1)*linesize];
1848         }
1849     }
1850                                     
1851     for(y=0; y<block_h; y++){
1852        //left
1853         for(x=0; x<start_x; x++){
1854             buf[x + y*linesize]= buf[start_x + y*linesize];
1855         }
1856        
1857        //right
1858         for(x=end_x; x<block_w; x++){
1859             buf[x + y*linesize]= buf[end_x - 1 + y*linesize];
1860         }
1861     }
1862 }
1863
1864
1865 /* apply one mpeg motion vector to the three components */
1866 static inline void mpeg_motion(MpegEncContext *s,
1867                                uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1868                                int dest_offset,
1869                                uint8_t **ref_picture, int src_offset,
1870                                int field_based, op_pixels_func (*pix_op)[4],
1871                                int motion_x, int motion_y, int h)
1872 {
1873     uint8_t *ptr;
1874     int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize;
1875     int emu=0;
1876 #if 0    
1877 if(s->quarter_sample)
1878 {
1879     motion_x>>=1;
1880     motion_y>>=1;
1881 }
1882 #endif
1883     dxy = ((motion_y & 1) << 1) | (motion_x & 1);
1884     src_x = s->mb_x * 16 + (motion_x >> 1);
1885     src_y = s->mb_y * (16 >> field_based) + (motion_y >> 1);
1886                 
1887     /* WARNING: do no forget half pels */
1888     height = s->height >> field_based;
1889     v_edge_pos = s->v_edge_pos >> field_based;
1890     src_x = clip(src_x, -16, s->width);
1891     if (src_x == s->width)
1892         dxy &= ~1;
1893     src_y = clip(src_y, -16, height);
1894     if (src_y == height)
1895         dxy &= ~2;
1896     linesize   = s->current_picture.linesize[0] << field_based;
1897     uvlinesize = s->current_picture.linesize[1] << field_based;
1898     ptr = ref_picture[0] + (src_y * linesize) + (src_x) + src_offset;
1899     dest_y += dest_offset;
1900
1901     if(s->flags&CODEC_FLAG_EMU_EDGE){
1902         if(src_x<0 || src_y<0 || src_x + (motion_x&1) + 16 > s->h_edge_pos
1903                               || src_y + (motion_y&1) + h  > v_edge_pos){
1904             ff_emulated_edge_mc(s->edge_emu_buffer, ptr - src_offset, s->linesize, 17, 17+field_based,  //FIXME linesize? and uv below
1905                              src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
1906             ptr= s->edge_emu_buffer + src_offset;
1907             emu=1;
1908         }
1909     }
1910     pix_op[0][dxy](dest_y, ptr, linesize, h);
1911
1912     if(s->flags&CODEC_FLAG_GRAY) return;
1913
1914     if (s->out_format == FMT_H263) {
1915         dxy = 0;
1916         if ((motion_x & 3) != 0)
1917             dxy |= 1;
1918         if ((motion_y & 3) != 0)
1919             dxy |= 2;
1920         mx = motion_x >> 2;
1921         my = motion_y >> 2;
1922     } else {
1923         mx = motion_x / 2;
1924         my = motion_y / 2;
1925         dxy = ((my & 1) << 1) | (mx & 1);
1926         mx >>= 1;
1927         my >>= 1;
1928     }
1929     
1930     src_x = s->mb_x * 8 + mx;
1931     src_y = s->mb_y * (8 >> field_based) + my;
1932     src_x = clip(src_x, -8, s->width >> 1);
1933     if (src_x == (s->width >> 1))
1934         dxy &= ~1;
1935     src_y = clip(src_y, -8, height >> 1);
1936     if (src_y == (height >> 1))
1937         dxy &= ~2;
1938     offset = (src_y * uvlinesize) + src_x + (src_offset >> 1);
1939     ptr = ref_picture[1] + offset;
1940     if(emu){
1941         ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based, 
1942                          src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
1943         ptr= s->edge_emu_buffer + (src_offset >> 1);
1944     }
1945     pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
1946
1947     ptr = ref_picture[2] + offset;
1948     if(emu){
1949         ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based, 
1950                          src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
1951         ptr= s->edge_emu_buffer + (src_offset >> 1);
1952     }
1953     pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
1954 }
1955
1956 static inline void qpel_motion(MpegEncContext *s,
1957                                uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1958                                int dest_offset,
1959                                uint8_t **ref_picture, int src_offset,
1960                                int field_based, op_pixels_func (*pix_op)[4],
1961                                qpel_mc_func (*qpix_op)[16],
1962                                int motion_x, int motion_y, int h)
1963 {
1964     uint8_t *ptr;
1965     int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize;
1966     int emu=0;
1967
1968     dxy = ((motion_y & 3) << 2) | (motion_x & 3);
1969     src_x = s->mb_x * 16 + (motion_x >> 2);
1970     src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
1971
1972     height = s->height >> field_based;
1973     v_edge_pos = s->v_edge_pos >> field_based;
1974     src_x = clip(src_x, -16, s->width);
1975     if (src_x == s->width)
1976         dxy &= ~3;
1977     src_y = clip(src_y, -16, height);
1978     if (src_y == height)
1979         dxy &= ~12;
1980     linesize = s->linesize << field_based;
1981     uvlinesize = s->uvlinesize << field_based;
1982     ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset;
1983     dest_y += dest_offset;
1984 //printf("%d %d %d\n", src_x, src_y, dxy);
1985     
1986     if(s->flags&CODEC_FLAG_EMU_EDGE){
1987         if(src_x<0 || src_y<0 || src_x + (motion_x&3) + 16 > s->h_edge_pos
1988                               || src_y + (motion_y&3) + h  > v_edge_pos){
1989             ff_emulated_edge_mc(s->edge_emu_buffer, ptr - src_offset, s->linesize, 17, 17+field_based, 
1990                              src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
1991             ptr= s->edge_emu_buffer + src_offset;
1992             emu=1;
1993         }
1994     }
1995     if(!field_based)
1996         qpix_op[0][dxy](dest_y, ptr, linesize);
1997     else{
1998         //damn interlaced mode
1999         //FIXME boundary mirroring is not exactly correct here
2000         qpix_op[1][dxy](dest_y  , ptr  , linesize);
2001         qpix_op[1][dxy](dest_y+8, ptr+8, linesize);
2002     }
2003
2004     if(s->flags&CODEC_FLAG_GRAY) return;
2005
2006     if(field_based){
2007         mx= motion_x/2;
2008         my= motion_y>>1;
2009     }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA2){
2010         static const int rtab[8]= {0,0,1,1,0,0,0,1};
2011         mx= (motion_x>>1) + rtab[motion_x&7];
2012         my= (motion_y>>1) + rtab[motion_y&7];
2013     }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA){
2014         mx= (motion_x>>1)|(motion_x&1);
2015         my= (motion_y>>1)|(motion_y&1);
2016     }else{
2017         mx= motion_x/2;
2018         my= motion_y/2;
2019     }
2020     mx= (mx>>1)|(mx&1);
2021     my= (my>>1)|(my&1);
2022
2023     dxy= (mx&1) | ((my&1)<<1);
2024     mx>>=1;
2025     my>>=1;
2026
2027     src_x = s->mb_x * 8 + mx;
2028     src_y = s->mb_y * (8 >> field_based) + my;
2029     src_x = clip(src_x, -8, s->width >> 1);
2030     if (src_x == (s->width >> 1))
2031         dxy &= ~1;
2032     src_y = clip(src_y, -8, height >> 1);
2033     if (src_y == (height >> 1))
2034         dxy &= ~2;
2035
2036     offset = (src_y * uvlinesize) + src_x + (src_offset >> 1);
2037     ptr = ref_picture[1] + offset;
2038     if(emu){
2039         ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based, 
2040                          src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
2041         ptr= s->edge_emu_buffer + (src_offset >> 1);
2042     }
2043     pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr,  uvlinesize, h >> 1);
2044     
2045     ptr = ref_picture[2] + offset;
2046     if(emu){
2047         ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based, 
2048                          src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
2049         ptr= s->edge_emu_buffer + (src_offset >> 1);
2050     }
2051     pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr,  uvlinesize, h >> 1);
2052 }
2053
2054 inline int ff_h263_round_chroma(int x){
2055     if (x >= 0)
2056         return  (h263_chroma_roundtab[x & 0xf] + ((x >> 3) & ~1));
2057     else {
2058         x = -x;
2059         return -(h263_chroma_roundtab[x & 0xf] + ((x >> 3) & ~1));
2060     }
2061 }
2062
2063 /**
2064  * motion compesation of a single macroblock
2065  * @param s context
2066  * @param dest_y luma destination pointer
2067  * @param dest_cb chroma cb/u destination pointer
2068  * @param dest_cr chroma cr/v destination pointer
2069  * @param dir direction (0->forward, 1->backward)
2070  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2071  * @param pic_op halfpel motion compensation function (average or put normally)
2072  * @param pic_op qpel motion compensation function (average or put normally)
2073  * the motion vectors are taken from s->mv and the MV type from s->mv_type
2074  */
2075 static inline void MPV_motion(MpegEncContext *s, 
2076                               uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2077                               int dir, uint8_t **ref_picture, 
2078                               op_pixels_func (*pix_op)[4], qpel_mc_func (*qpix_op)[16])
2079 {
2080     int dxy, offset, mx, my, src_x, src_y, motion_x, motion_y;
2081     int mb_x, mb_y, i;
2082     uint8_t *ptr, *dest;
2083     int emu=0;
2084
2085     mb_x = s->mb_x;
2086     mb_y = s->mb_y;
2087
2088     switch(s->mv_type) {
2089     case MV_TYPE_16X16:
2090 #ifdef CONFIG_RISKY
2091         if(s->mcsel){
2092             if(s->real_sprite_warping_points==1){
2093                 gmc1_motion(s, dest_y, dest_cb, dest_cr, 0,
2094                             ref_picture, 0);
2095             }else{
2096                 gmc_motion(s, dest_y, dest_cb, dest_cr, 0,
2097                             ref_picture, 0);
2098             }
2099         }else if(s->quarter_sample){
2100             qpel_motion(s, dest_y, dest_cb, dest_cr, 0,
2101                         ref_picture, 0,
2102                         0, pix_op, qpix_op,
2103                         s->mv[dir][0][0], s->mv[dir][0][1], 16);
2104         }else if(s->mspel){
2105             ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
2106                         ref_picture, pix_op,
2107                         s->mv[dir][0][0], s->mv[dir][0][1], 16);
2108         }else
2109 #endif
2110         {
2111             mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2112                         ref_picture, 0,
2113                         0, pix_op,
2114                         s->mv[dir][0][0], s->mv[dir][0][1], 16);
2115         }           
2116         break;
2117     case MV_TYPE_8X8:
2118         mx = 0;
2119         my = 0;
2120         if(s->quarter_sample){
2121             for(i=0;i<4;i++) {
2122                 motion_x = s->mv[dir][i][0];
2123                 motion_y = s->mv[dir][i][1];
2124
2125                 dxy = ((motion_y & 3) << 2) | (motion_x & 3);
2126                 src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
2127                 src_y = mb_y * 16 + (motion_y >> 2) + (i >>1) * 8;
2128                     
2129                 /* WARNING: do no forget half pels */
2130                 src_x = clip(src_x, -16, s->width);
2131                 if (src_x == s->width)
2132                     dxy &= ~3;
2133                 src_y = clip(src_y, -16, s->height);
2134                 if (src_y == s->height)
2135                     dxy &= ~12;
2136                     
2137                 ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
2138                 if(s->flags&CODEC_FLAG_EMU_EDGE){
2139                     if(src_x<0 || src_y<0 || src_x + (motion_x&3) + 8 > s->h_edge_pos
2140                                           || src_y + (motion_y&3) + 8 > s->v_edge_pos){
2141                         ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
2142                         ptr= s->edge_emu_buffer;
2143                     }
2144                 }
2145                 dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
2146                 qpix_op[1][dxy](dest, ptr, s->linesize);
2147
2148                 mx += s->mv[dir][i][0]/2;
2149                 my += s->mv[dir][i][1]/2;
2150             }
2151         }else{
2152             for(i=0;i<4;i++) {
2153                 motion_x = s->mv[dir][i][0];
2154                 motion_y = s->mv[dir][i][1];
2155
2156                 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
2157                 src_x = mb_x * 16 + (motion_x >> 1) + (i & 1) * 8;
2158                 src_y = mb_y * 16 + (motion_y >> 1) + (i >>1) * 8;
2159                     
2160                 /* WARNING: do no forget half pels */
2161                 src_x = clip(src_x, -16, s->width);
2162                 if (src_x == s->width)
2163                     dxy &= ~1;
2164                 src_y = clip(src_y, -16, s->height);
2165                 if (src_y == s->height)
2166                     dxy &= ~2;
2167                     
2168                 ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
2169                 if(s->flags&CODEC_FLAG_EMU_EDGE){
2170                     if(src_x<0 || src_y<0 || src_x + (motion_x&1) + 8 > s->h_edge_pos
2171                                           || src_y + (motion_y&1) + 8 > s->v_edge_pos){
2172                         ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
2173                         ptr= s->edge_emu_buffer;
2174                     }
2175                 }
2176                 dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
2177                 pix_op[1][dxy](dest, ptr, s->linesize, 8);
2178
2179                 mx += s->mv[dir][i][0];
2180                 my += s->mv[dir][i][1];
2181             }
2182         }
2183
2184         if(s->flags&CODEC_FLAG_GRAY) break;
2185         /* In case of 8X8, we construct a single chroma motion vector
2186            with a special rounding */
2187         mx= ff_h263_round_chroma(mx);
2188         my= ff_h263_round_chroma(my);
2189         dxy = ((my & 1) << 1) | (mx & 1);
2190         mx >>= 1;
2191         my >>= 1;
2192
2193         src_x = mb_x * 8 + mx;
2194         src_y = mb_y * 8 + my;
2195         src_x = clip(src_x, -8, s->width/2);
2196         if (src_x == s->width/2)
2197             dxy &= ~1;
2198         src_y = clip(src_y, -8, s->height/2);
2199         if (src_y == s->height/2)
2200             dxy &= ~2;
2201         
2202         offset = (src_y * (s->uvlinesize)) + src_x;
2203         ptr = ref_picture[1] + offset;
2204         if(s->flags&CODEC_FLAG_EMU_EDGE){
2205                 if(src_x<0 || src_y<0 || src_x + (dxy &1) + 8 > s->h_edge_pos>>1
2206                                       || src_y + (dxy>>1) + 8 > s->v_edge_pos>>1){
2207                     ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
2208                     ptr= s->edge_emu_buffer;
2209                     emu=1;
2210                 }
2211             }
2212         pix_op[1][dxy](dest_cb, ptr, s->uvlinesize, 8);
2213
2214         ptr = ref_picture[2] + offset;
2215         if(emu){
2216             ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
2217             ptr= s->edge_emu_buffer;
2218         }
2219         pix_op[1][dxy](dest_cr, ptr, s->uvlinesize, 8);
2220         break;
2221     case MV_TYPE_FIELD:
2222         if (s->picture_structure == PICT_FRAME) {
2223             if(s->quarter_sample){
2224                 /* top field */
2225                 qpel_motion(s, dest_y, dest_cb, dest_cr, 0,
2226                             ref_picture, s->field_select[dir][0] ? s->linesize : 0,
2227                             1, pix_op, qpix_op,
2228                             s->mv[dir][0][0], s->mv[dir][0][1], 8);
2229                 /* bottom field */
2230                 qpel_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
2231                             ref_picture, s->field_select[dir][1] ? s->linesize : 0,
2232                             1, pix_op, qpix_op,
2233                             s->mv[dir][1][0], s->mv[dir][1][1], 8);
2234             }else{
2235                 /* top field */       
2236                 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2237                             ref_picture, s->field_select[dir][0] ? s->linesize : 0,
2238                             1, pix_op,
2239                             s->mv[dir][0][0], s->mv[dir][0][1], 8);
2240                 /* bottom field */
2241                 mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
2242                             ref_picture, s->field_select[dir][1] ? s->linesize : 0,
2243                             1, pix_op,
2244                             s->mv[dir][1][0], s->mv[dir][1][1], 8);
2245             }
2246         } else {
2247             int offset;
2248             if(s->picture_structure == s->field_select[dir][0] + 1 || s->pict_type == B_TYPE || s->first_field){
2249                 offset= s->field_select[dir][0] ? s->linesize : 0;
2250             }else{
2251                 ref_picture= s->current_picture.data;
2252                 offset= s->field_select[dir][0] ? s->linesize : -s->linesize; 
2253             } 
2254
2255             mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2256                         ref_picture, offset,
2257                         0, pix_op,
2258                         s->mv[dir][0][0], s->mv[dir][0][1], 16);
2259         }
2260         break;
2261     case MV_TYPE_16X8:{
2262         int offset;
2263          uint8_t ** ref2picture;
2264
2265             if(s->picture_structure == s->field_select[dir][0] + 1 || s->pict_type == B_TYPE || s->first_field){
2266                 ref2picture= ref_picture;
2267                 offset= s->field_select[dir][0] ? s->linesize : 0;
2268             }else{
2269                 ref2picture= s->current_picture.data;
2270                 offset= s->field_select[dir][0] ? s->linesize : -s->linesize; 
2271             } 
2272
2273             mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2274                         ref2picture, offset,
2275                         0, pix_op,
2276                         s->mv[dir][0][0], s->mv[dir][0][1], 8);
2277
2278
2279             if(s->picture_structure == s->field_select[dir][1] + 1 || s->pict_type == B_TYPE || s->first_field){
2280                 ref2picture= ref_picture;
2281                 offset= s->field_select[dir][1] ? s->linesize : 0;
2282             }else{
2283                 ref2picture= s->current_picture.data;
2284                 offset= s->field_select[dir][1] ? s->linesize : -s->linesize; 
2285             } 
2286             // I know it is ugly but this is the only way to fool emu_edge without rewrite mpeg_motion
2287             mpeg_motion(s, dest_y+16*s->linesize, dest_cb+8*s->uvlinesize, dest_cr+8*s->uvlinesize,
2288                         0,
2289                         ref2picture, offset,
2290                         0, pix_op,
2291                         s->mv[dir][1][0], s->mv[dir][1][1]+16, 8);
2292         }
2293         
2294         break;
2295     case MV_TYPE_DMV:
2296     {
2297     op_pixels_func (*dmv_pix_op)[4];
2298     int offset;
2299
2300         dmv_pix_op = s->dsp.put_pixels_tab;
2301
2302         if(s->picture_structure == PICT_FRAME){
2303             //put top field from top field
2304             mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2305                         ref_picture, 0,
2306                         1, dmv_pix_op,
2307                         s->mv[dir][0][0], s->mv[dir][0][1], 8);
2308             //put bottom field from bottom field
2309             mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
2310                         ref_picture, s->linesize,
2311                         1, dmv_pix_op,
2312                         s->mv[dir][0][0], s->mv[dir][0][1], 8);
2313
2314             dmv_pix_op = s->dsp.avg_pixels_tab; 
2315         
2316             //avg top field from bottom field
2317             mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2318                         ref_picture, s->linesize,
2319                         1, dmv_pix_op,
2320                         s->mv[dir][2][0], s->mv[dir][2][1], 8);
2321             //avg bottom field from top field
2322             mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
2323                         ref_picture, 0,
2324                         1, dmv_pix_op,
2325                         s->mv[dir][3][0], s->mv[dir][3][1], 8);
2326
2327         }else{
2328             offset=(s->picture_structure == PICT_BOTTOM_FIELD)? 
2329                          s->linesize : 0;
2330
2331             //put field from the same parity
2332             //same parity is never in the same frame
2333             mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2334                         ref_picture,offset,
2335                         0,dmv_pix_op,
2336                         s->mv[dir][0][0],s->mv[dir][0][1],16);
2337
2338             // after put we make avg of the same block
2339             dmv_pix_op=s->dsp.avg_pixels_tab; 
2340
2341             //opposite parity is always in the same frame if this is second field
2342             if(!s->first_field){
2343                 ref_picture = s->current_picture.data;    
2344                 //top field is one linesize from frame beginig
2345                 offset=(s->picture_structure == PICT_BOTTOM_FIELD)? 
2346                         -s->linesize : s->linesize;
2347             }else 
2348                 offset=(s->picture_structure == PICT_BOTTOM_FIELD)? 
2349                         0 : s->linesize;
2350
2351             //avg field from the opposite parity
2352             mpeg_motion(s, dest_y, dest_cb, dest_cr,0,
2353                         ref_picture, offset,
2354                         0,dmv_pix_op,
2355                         s->mv[dir][2][0],s->mv[dir][2][1],16);
2356         }
2357     }
2358     break;
2359
2360     }
2361 }
2362
2363
2364 /* put block[] to dest[] */
2365 static inline void put_dct(MpegEncContext *s, 
2366                            DCTELEM *block, int i, uint8_t *dest, int line_size)
2367 {
2368     s->dct_unquantize(s, block, i, s->qscale);
2369     s->dsp.idct_put (dest, line_size, block);
2370 }
2371
2372 /* add block[] to dest[] */
2373 static inline void add_dct(MpegEncContext *s, 
2374                            DCTELEM *block, int i, uint8_t *dest, int line_size)
2375 {
2376     if (s->block_last_index[i] >= 0) {
2377         s->dsp.idct_add (dest, line_size, block);
2378     }
2379 }
2380
2381 static inline void add_dequant_dct(MpegEncContext *s, 
2382                            DCTELEM *block, int i, uint8_t *dest, int line_size)
2383 {
2384     if (s->block_last_index[i] >= 0) {
2385         s->dct_unquantize(s, block, i, s->qscale);
2386
2387         s->dsp.idct_add (dest, line_size, block);
2388     }
2389 }
2390
2391 /**
2392  * cleans dc, ac, coded_block for the current non intra MB
2393  */
2394 void ff_clean_intra_table_entries(MpegEncContext *s)
2395 {
2396     int wrap = s->block_wrap[0];
2397     int xy = s->block_index[0];
2398     
2399     s->dc_val[0][xy           ] = 
2400     s->dc_val[0][xy + 1       ] = 
2401     s->dc_val[0][xy     + wrap] =
2402     s->dc_val[0][xy + 1 + wrap] = 1024;
2403     /* ac pred */
2404     memset(s->ac_val[0][xy       ], 0, 32 * sizeof(int16_t));
2405     memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2406     if (s->msmpeg4_version>=3) {
2407         s->coded_block[xy           ] =
2408         s->coded_block[xy + 1       ] =
2409         s->coded_block[xy     + wrap] =
2410         s->coded_block[xy + 1 + wrap] = 0;
2411     }
2412     /* chroma */
2413     wrap = s->block_wrap[4];
2414     xy = s->mb_x + 1 + (s->mb_y + 1) * wrap;
2415     s->dc_val[1][xy] =
2416     s->dc_val[2][xy] = 1024;
2417     /* ac pred */
2418     memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2419     memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2420     
2421     s->mbintra_table[s->mb_x + s->mb_y*s->mb_stride]= 0;
2422 }
2423
2424 /* generic function called after a macroblock has been parsed by the
2425    decoder or after it has been encoded by the encoder.
2426
2427    Important variables used:
2428    s->mb_intra : true if intra macroblock
2429    s->mv_dir   : motion vector direction
2430    s->mv_type  : motion vector type
2431    s->mv       : motion vector
2432    s->interlaced_dct : true if interlaced dct used (mpeg2)
2433  */
2434 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
2435 {
2436     int mb_x, mb_y;
2437     const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2438 #ifdef HAVE_XVMC
2439     if(s->avctx->xvmc_acceleration){
2440         XVMC_decode_mb(s,block);
2441         return;
2442     }
2443 #endif
2444
2445     mb_x = s->mb_x;
2446     mb_y = s->mb_y;
2447
2448     s->current_picture.qscale_table[mb_xy]= s->qscale;
2449
2450     /* update DC predictors for P macroblocks */
2451     if (!s->mb_intra) {
2452         if (s->h263_pred || s->h263_aic) {
2453             if(s->mbintra_table[mb_xy])
2454                 ff_clean_intra_table_entries(s);
2455         } else {
2456             s->last_dc[0] =
2457             s->last_dc[1] =
2458             s->last_dc[2] = 128 << s->intra_dc_precision;
2459         }
2460     }
2461     else if (s->h263_pred || s->h263_aic)
2462         s->mbintra_table[mb_xy]=1;
2463
2464     if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==B_TYPE))) { //FIXME precalc
2465         uint8_t *dest_y, *dest_cb, *dest_cr;
2466         int dct_linesize, dct_offset;
2467         op_pixels_func (*op_pix)[4];
2468         qpel_mc_func (*op_qpix)[16];
2469         const int linesize= s->current_picture.linesize[0]; //not s->linesize as this woulnd be wrong for field pics
2470         const int uvlinesize= s->current_picture.linesize[1];
2471
2472         /* avoid copy if macroblock skipped in last frame too */
2473         /* skip only during decoding as we might trash the buffers during encoding a bit */
2474         if(!s->encoding){
2475             uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2476             const int age= s->current_picture.age;
2477
2478             assert(age);
2479
2480             if (s->mb_skiped) {
2481                 s->mb_skiped= 0;
2482                 assert(s->pict_type!=I_TYPE);
2483  
2484                 (*mbskip_ptr) ++; /* indicate that this time we skiped it */
2485                 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2486
2487                 /* if previous was skipped too, then nothing to do !  */
2488                 if (*mbskip_ptr >= age && s->current_picture.reference){
2489                     return;
2490                 }
2491             } else if(!s->current_picture.reference){
2492                 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
2493                 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2494             } else{
2495                 *mbskip_ptr = 0; /* not skipped */
2496             }
2497         }
2498
2499         if (s->interlaced_dct) {
2500             dct_linesize = linesize * 2;
2501             dct_offset = linesize;
2502         } else {
2503             dct_linesize = linesize;
2504             dct_offset = linesize * 8;
2505         }
2506         
2507         dest_y=  s->dest[0];
2508         dest_cb= s->dest[1];
2509         dest_cr= s->dest[2];
2510
2511         if (!s->mb_intra) {
2512             /* motion handling */
2513             /* decoding or more than one mb_type (MC was allready done otherwise) */
2514             if(!s->encoding){
2515                 if ((!s->no_rounding) || s->pict_type==B_TYPE){                
2516                     op_pix = s->dsp.put_pixels_tab;
2517                     op_qpix= s->dsp.put_qpel_pixels_tab;
2518                 }else{
2519                     op_pix = s->dsp.put_no_rnd_pixels_tab;
2520                     op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab;
2521                 }
2522
2523                 if (s->mv_dir & MV_DIR_FORWARD) {
2524                     MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
2525                     op_pix = s->dsp.avg_pixels_tab;
2526                     op_qpix= s->dsp.avg_qpel_pixels_tab;
2527                 }
2528                 if (s->mv_dir & MV_DIR_BACKWARD) {
2529                     MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
2530                 }
2531             }
2532
2533             /* skip dequant / idct if we are really late ;) */
2534             if(s->hurry_up>1) return;
2535
2536             /* add dct residue */
2537             if(s->encoding || !(   s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
2538                                 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2539                 add_dequant_dct(s, block[0], 0, dest_y, dct_linesize);
2540                 add_dequant_dct(s, block[1], 1, dest_y + 8, dct_linesize);
2541                 add_dequant_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
2542                 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
2543
2544                 if(!(s->flags&CODEC_FLAG_GRAY)){
2545                     add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize);
2546                     add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize);
2547                 }
2548             } else if(s->codec_id != CODEC_ID_WMV2){
2549                 add_dct(s, block[0], 0, dest_y, dct_linesize);
2550                 add_dct(s, block[1], 1, dest_y + 8, dct_linesize);
2551                 add_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
2552                 add_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
2553
2554                 if(!(s->flags&CODEC_FLAG_GRAY)){
2555                     add_dct(s, block[4], 4, dest_cb, uvlinesize);
2556                     add_dct(s, block[5], 5, dest_cr, uvlinesize);
2557                 }
2558             } 
2559 #ifdef CONFIG_RISKY
2560             else{
2561                 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2562             }
2563 #endif
2564         } else {
2565             /* dct only in intra block */
2566             if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2567                 put_dct(s, block[0], 0, dest_y, dct_linesize);
2568                 put_dct(s, block[1], 1, dest_y + 8, dct_linesize);
2569                 put_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
2570                 put_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
2571
2572                 if(!(s->flags&CODEC_FLAG_GRAY)){
2573                     put_dct(s, block[4], 4, dest_cb, uvlinesize);
2574                     put_dct(s, block[5], 5, dest_cr, uvlinesize);
2575                 }
2576             }else{
2577                 s->dsp.idct_put(dest_y                 , dct_linesize, block[0]);
2578                 s->dsp.idct_put(dest_y              + 8, dct_linesize, block[1]);
2579                 s->dsp.idct_put(dest_y + dct_offset    , dct_linesize, block[2]);
2580                 s->dsp.idct_put(dest_y + dct_offset + 8, dct_linesize, block[3]);
2581
2582                 if(!(s->flags&CODEC_FLAG_GRAY)){
2583                     s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2584                     s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2585                 }
2586             }
2587         }
2588     }
2589 }
2590
2591 #ifdef CONFIG_ENCODERS
2592
2593 static inline void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
2594 {
2595     static const char tab[64]=
2596         {3,2,2,1,1,1,1,1,
2597          1,1,1,1,1,1,1,1,
2598          1,1,1,1,1,1,1,1,
2599          0,0,0,0,0,0,0,0,
2600          0,0,0,0,0,0,0,0,
2601          0,0,0,0,0,0,0,0,
2602          0,0,0,0,0,0,0,0,
2603          0,0,0,0,0,0,0,0};
2604     int score=0;
2605     int run=0;
2606     int i;
2607     DCTELEM *block= s->block[n];
2608     const int last_index= s->block_last_index[n];
2609     int skip_dc;
2610
2611     if(threshold<0){
2612         skip_dc=0;
2613         threshold= -threshold;
2614     }else
2615         skip_dc=1;
2616
2617     /* are all which we could set to zero are allready zero? */
2618     if(last_index<=skip_dc - 1) return;
2619
2620     for(i=0; i<=last_index; i++){
2621         const int j = s->intra_scantable.permutated[i];
2622         const int level = ABS(block[j]);
2623         if(level==1){
2624             if(skip_dc && i==0) continue;
2625             score+= tab[run];
2626             run=0;
2627         }else if(level>1){
2628             return;
2629         }else{
2630             run++;
2631         }
2632     }
2633     if(score >= threshold) return;
2634     for(i=skip_dc; i<=last_index; i++){
2635         const int j = s->intra_scantable.permutated[i];
2636         block[j]=0;
2637     }
2638     if(block[0]) s->block_last_index[n]= 0;
2639     else         s->block_last_index[n]= -1;
2640 }
2641
2642 static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block, int last_index)
2643 {
2644     int i;
2645     const int maxlevel= s->max_qcoeff;
2646     const int minlevel= s->min_qcoeff;
2647     
2648     if(s->mb_intra){
2649         i=1; //skip clipping of intra dc
2650     }else
2651         i=0;
2652     
2653     for(;i<=last_index; i++){
2654         const int j= s->intra_scantable.permutated[i];
2655         int level = block[j];
2656        
2657         if     (level>maxlevel) level=maxlevel;
2658         else if(level<minlevel) level=minlevel;
2659
2660         block[j]= level;
2661     }
2662 }
2663
2664 #if 0
2665 static int pix_vcmp16x8(uint8_t *s, int stride){ //FIXME move to dsputil & optimize
2666     int score=0;
2667     int x,y;
2668     
2669     for(y=0; y<7; y++){
2670         for(x=0; x<16; x+=4){
2671             score+= ABS(s[x  ] - s[x  +stride]) + ABS(s[x+1] - s[x+1+stride]) 
2672                    +ABS(s[x+2] - s[x+2+stride]) + ABS(s[x+3] - s[x+3+stride]);
2673         }
2674         s+= stride;
2675     }
2676     
2677     return score;
2678 }
2679
2680 static int pix_diff_vcmp16x8(uint8_t *s1, uint8_t*s2, int stride){ //FIXME move to dsputil & optimize
2681     int score=0;
2682     int x,y;
2683     
2684     for(y=0; y<7; y++){
2685         for(x=0; x<16; x++){
2686             score+= ABS(s1[x  ] - s2[x ] - s1[x  +stride] + s2[x +stride]);
2687         }
2688         s1+= stride;
2689         s2+= stride;
2690     }
2691     
2692     return score;
2693 }
2694 #else
2695 #define SQ(a) ((a)*(a))
2696
2697 static int pix_vcmp16x8(uint8_t *s, int stride){ //FIXME move to dsputil & optimize
2698     int score=0;
2699     int x,y;
2700     
2701     for(y=0; y<7; y++){
2702         for(x=0; x<16; x+=4){
2703             score+= SQ(s[x  ] - s[x  +stride]) + SQ(s[x+1] - s[x+1+stride]) 
2704                    +SQ(s[x+2] - s[x+2+stride]) + SQ(s[x+3] - s[x+3+stride]);
2705         }
2706         s+= stride;
2707     }
2708     
2709     return score;
2710 }
2711
2712 static int pix_diff_vcmp16x8(uint8_t *s1, uint8_t*s2, int stride){ //FIXME move to dsputil & optimize
2713     int score=0;
2714     int x,y;
2715     
2716     for(y=0; y<7; y++){
2717         for(x=0; x<16; x++){
2718             score+= SQ(s1[x  ] - s2[x ] - s1[x  +stride] + s2[x +stride]);
2719         }
2720         s1+= stride;
2721         s2+= stride;
2722     }
2723     
2724     return score;
2725 }
2726
2727 #endif
2728
2729 #endif //CONFIG_ENCODERS
2730
2731 /**
2732  *
2733  * @param h is the normal height, this will be reduced automatically if needed for the last row
2734  */
2735 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2736     if (s->avctx->draw_horiz_band) {
2737         AVFrame *src;
2738         int offset[4];
2739         
2740         if(s->picture_structure != PICT_FRAME){
2741             h <<= 1;
2742             y <<= 1;
2743             if(s->first_field  && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2744         }
2745
2746         h= FFMIN(h, s->height - y);
2747
2748         if(s->pict_type==B_TYPE || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER)) 
2749             src= (AVFrame*)s->current_picture_ptr;
2750         else if(s->last_picture_ptr)
2751             src= (AVFrame*)s->last_picture_ptr;
2752         else
2753             return;
2754             
2755         if(s->pict_type==B_TYPE && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2756             offset[0]=
2757             offset[1]=
2758             offset[2]=
2759             offset[3]= 0;
2760         }else{
2761             offset[0]= y * s->linesize;;
2762             offset[1]= 
2763             offset[2]= (y>>1) * s->uvlinesize;;
2764             offset[3]= 0;
2765         }
2766
2767         emms_c();
2768
2769         s->avctx->draw_horiz_band(s->avctx, src, offset,
2770                                   y, s->picture_structure, h);
2771     }
2772 }
2773
2774 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2775     const int linesize= s->current_picture.linesize[0]; //not s->linesize as this woulnd be wrong for field pics
2776     const int uvlinesize= s->current_picture.linesize[1];
2777         
2778     s->block_index[0]= s->block_wrap[0]*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2779     s->block_index[1]= s->block_wrap[0]*(s->mb_y*2 + 1)     + s->mb_x*2;
2780     s->block_index[2]= s->block_wrap[0]*(s->mb_y*2 + 2) - 1 + s->mb_x*2;
2781     s->block_index[3]= s->block_wrap[0]*(s->mb_y*2 + 2)     + s->mb_x*2;
2782     s->block_index[4]= s->block_wrap[4]*(s->mb_y + 1)                    + s->block_wrap[0]*(s->mb_height*2 + 2) + s->mb_x;
2783     s->block_index[5]= s->block_wrap[4]*(s->mb_y + 1 + s->mb_height + 2) + s->block_wrap[0]*(s->mb_height*2 + 2) + s->mb_x;
2784     
2785     if(s->pict_type==B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME){
2786         s->dest[0] = s->current_picture.data[0] + s->mb_x * 16 - 16;
2787         s->dest[1] = s->current_picture.data[1] + s->mb_x * 8 - 8;
2788         s->dest[2] = s->current_picture.data[2] + s->mb_x * 8 - 8;
2789     }else{
2790         s->dest[0] = s->current_picture.data[0] + (s->mb_y * 16* linesize  ) + s->mb_x * 16 - 16;
2791         s->dest[1] = s->current_picture.data[1] + (s->mb_y * 8 * uvlinesize) + s->mb_x * 8 - 8;
2792         s->dest[2] = s->current_picture.data[2] + (s->mb_y * 8 * uvlinesize) + s->mb_x * 8 - 8;
2793     }    
2794 }
2795
2796 #ifdef CONFIG_ENCODERS
2797
2798 static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2799 {
2800     const int mb_x= s->mb_x;
2801     const int mb_y= s->mb_y;
2802     int i;
2803     int skip_dct[6];
2804     int dct_offset   = s->linesize*8; //default for progressive frames
2805     
2806     for(i=0; i<6; i++) skip_dct[i]=0;
2807     
2808     if(s->adaptive_quant){
2809         s->dquant= s->current_picture.qscale_table[mb_x + mb_y*s->mb_stride] - s->qscale;
2810
2811         if(s->out_format==FMT_H263){
2812             if     (s->dquant> 2) s->dquant= 2;
2813             else if(s->dquant<-2) s->dquant=-2;
2814         }
2815             
2816         if(s->codec_id==CODEC_ID_MPEG4){        
2817             if(!s->mb_intra){
2818                 if(s->mv_dir&MV_DIRECT)
2819                     s->dquant=0;
2820
2821                 assert(s->dquant==0 || s->mv_type!=MV_TYPE_8X8);
2822             }
2823         }
2824         s->qscale+= s->dquant;
2825         s->y_dc_scale= s->y_dc_scale_table[ s->qscale ];
2826         s->c_dc_scale= s->c_dc_scale_table[ s->qscale ];
2827     }
2828
2829     if (s->mb_intra) {
2830         uint8_t *ptr;
2831         int wrap_y;
2832         int emu=0;
2833
2834         wrap_y = s->linesize;
2835         ptr = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
2836
2837         if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
2838             ff_emulated_edge_mc(s->edge_emu_buffer, ptr, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
2839             ptr= s->edge_emu_buffer;
2840             emu=1;
2841         }
2842         
2843         if(s->flags&CODEC_FLAG_INTERLACED_DCT){
2844             int progressive_score, interlaced_score;
2845             
2846             progressive_score= pix_vcmp16x8(ptr, wrap_y  ) + pix_vcmp16x8(ptr + wrap_y*8, wrap_y );
2847             interlaced_score = pix_vcmp16x8(ptr, wrap_y*2) + pix_vcmp16x8(ptr + wrap_y  , wrap_y*2);
2848             
2849             if(progressive_score > interlaced_score + 100){
2850                 s->interlaced_dct=1;
2851             
2852                 dct_offset= wrap_y;
2853                 wrap_y<<=1;
2854             }else
2855                 s->interlaced_dct=0;
2856         }
2857         
2858         s->dsp.get_pixels(s->block[0], ptr                 , wrap_y);
2859         s->dsp.get_pixels(s->block[1], ptr              + 8, wrap_y);
2860         s->dsp.get_pixels(s->block[2], ptr + dct_offset    , wrap_y);
2861         s->dsp.get_pixels(s->block[3], ptr + dct_offset + 8, wrap_y);
2862
2863         if(s->flags&CODEC_FLAG_GRAY){
2864             skip_dct[4]= 1;
2865             skip_dct[5]= 1;
2866         }else{
2867             int wrap_c = s->uvlinesize;
2868             ptr = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8;
2869             if(emu){
2870                 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2871                 ptr= s->edge_emu_buffer;
2872             }
2873             s->dsp.get_pixels(s->block[4], ptr, wrap_c);
2874
2875             ptr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8;
2876             if(emu){
2877                 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2878                 ptr= s->edge_emu_buffer;
2879             }
2880             s->dsp.get_pixels(s->block[5], ptr, wrap_c);
2881         }
2882     }else{
2883         op_pixels_func (*op_pix)[4];
2884         qpel_mc_func (*op_qpix)[16];
2885         uint8_t *dest_y, *dest_cb, *dest_cr;
2886         uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2887         int wrap_y, wrap_c;
2888         int emu=0;
2889
2890         dest_y  = s->dest[0];
2891         dest_cb = s->dest[1];
2892         dest_cr = s->dest[2];
2893         wrap_y = s->linesize;
2894         wrap_c = s->uvlinesize;
2895         ptr_y  = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
2896         ptr_cb = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8;
2897         ptr_cr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8;
2898
2899         if ((!s->no_rounding) || s->pict_type==B_TYPE){
2900             op_pix = s->dsp.put_pixels_tab;
2901             op_qpix= s->dsp.put_qpel_pixels_tab;
2902         }else{
2903             op_pix = s->dsp.put_no_rnd_pixels_tab;
2904             op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab;
2905         }
2906
2907         if (s->mv_dir & MV_DIR_FORWARD) {
2908             MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
2909             op_pix = s->dsp.avg_pixels_tab;
2910             op_qpix= s->dsp.avg_qpel_pixels_tab;
2911         }
2912         if (s->mv_dir & MV_DIR_BACKWARD) {
2913             MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
2914         }
2915
2916         if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
2917             ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
2918             ptr_y= s->edge_emu_buffer;
2919             emu=1;
2920         }
2921         
2922         if(s->flags&CODEC_FLAG_INTERLACED_DCT){
2923             int progressive_score, interlaced_score;
2924             
2925             progressive_score= pix_diff_vcmp16x8(ptr_y           , dest_y           , wrap_y  ) 
2926                              + pix_diff_vcmp16x8(ptr_y + wrap_y*8, dest_y + wrap_y*8, wrap_y  );
2927             interlaced_score = pix_diff_vcmp16x8(ptr_y           , dest_y           , wrap_y*2)
2928                              + pix_diff_vcmp16x8(ptr_y + wrap_y  , dest_y + wrap_y  , wrap_y*2);
2929             
2930             if(progressive_score > interlaced_score + 600){
2931                 s->interlaced_dct=1;
2932             
2933                 dct_offset= wrap_y;
2934                 wrap_y<<=1;
2935             }else
2936                 s->interlaced_dct=0;
2937         }
2938         
2939         s->dsp.diff_pixels(s->block[0], ptr_y                 , dest_y                 , wrap_y);
2940         s->dsp.diff_pixels(s->block[1], ptr_y              + 8, dest_y              + 8, wrap_y);
2941         s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset    , dest_y + dct_offset    , wrap_y);
2942         s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8, dest_y + dct_offset + 8, wrap_y);
2943         
2944         if(s->flags&CODEC_FLAG_GRAY){
2945             skip_dct[4]= 1;
2946             skip_dct[5]= 1;
2947         }else{
2948             if(emu){
2949                 ff_emulated_edge_mc(s->edge_emu_buffer, ptr_cb, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2950                 ptr_cb= s->edge_emu_buffer;
2951             }
2952             s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2953             if(emu){
2954                 ff_emulated_edge_mc(s->edge_emu_buffer, ptr_cr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2955                 ptr_cr= s->edge_emu_buffer;
2956             }
2957             s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2958         }
2959         /* pre quantization */         
2960         if(s->current_picture.mc_mb_var[s->mb_stride*mb_y+ mb_x]<2*s->qscale*s->qscale){
2961             //FIXME optimize
2962             if(s->dsp.pix_abs8x8(ptr_y               , dest_y               , wrap_y) < 20*s->qscale) skip_dct[0]= 1;
2963             if(s->dsp.pix_abs8x8(ptr_y            + 8, dest_y            + 8, wrap_y) < 20*s->qscale) skip_dct[1]= 1;
2964             if(s->dsp.pix_abs8x8(ptr_y +dct_offset   , dest_y +dct_offset   , wrap_y) < 20*s->qscale) skip_dct[2]= 1;
2965             if(s->dsp.pix_abs8x8(ptr_y +dct_offset+ 8, dest_y +dct_offset+ 8, wrap_y) < 20*s->qscale) skip_dct[3]= 1;
2966             if(s->dsp.pix_abs8x8(ptr_cb              , dest_cb              , wrap_c) < 20*s->qscale) skip_dct[4]= 1;
2967             if(s->dsp.pix_abs8x8(ptr_cr              , dest_cr              , wrap_c) < 20*s->qscale) skip_dct[5]= 1;
2968 #if 0
2969 {
2970  static int stat[7];
2971  int num=0;
2972  for(i=0; i<6; i++)
2973   if(skip_dct[i]) num++;
2974  stat[num]++;
2975  
2976  if(s->mb_x==0 && s->mb_y==0){
2977   for(i=0; i<7; i++){
2978    printf("%6d %1d\n", stat[i], i);
2979   }
2980  }
2981 }
2982 #endif
2983         }
2984
2985     }
2986             
2987 #if 0
2988             {
2989                 float adap_parm;
2990                 
2991                 adap_parm = ((s->avg_mb_var << 1) + s->mb_var[s->mb_stride*mb_y+mb_x] + 1.0) /
2992                             ((s->mb_var[s->mb_stride*mb_y+mb_x] << 1) + s->avg_mb_var + 1.0);
2993             
2994                 printf("\ntype=%c qscale=%2d adap=%0.2f dquant=%4.2f var=%4d avgvar=%4d", 
2995                         (s->mb_type[s->mb_stride*mb_y+mb_x] > 0) ? 'I' : 'P', 
2996                         s->qscale, adap_parm, s->qscale*adap_parm,
2997                         s->mb_var[s->mb_stride*mb_y+mb_x], s->avg_mb_var);
2998             }
2999 #endif
3000     /* DCT & quantize */
3001     if(s->out_format==FMT_MJPEG){
3002         for(i=0;i<6;i++) {
3003             int overflow;
3004             s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, 8, &overflow);
3005             if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
3006         }
3007     }else{
3008         for(i=0;i<6;i++) {
3009             if(!skip_dct[i]){
3010                 int overflow;
3011                 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
3012             // FIXME we could decide to change to quantizer instead of clipping
3013             // JS: I don't think that would be a good idea it could lower quality instead
3014             //     of improve it. Just INTRADC clipping deserves changes in quantizer
3015                 if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
3016             }else
3017                 s->block_last_index[i]= -1;
3018         }
3019         if(s->luma_elim_threshold && !s->mb_intra)
3020             for(i=0; i<4; i++)
3021                 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
3022         if(s->chroma_elim_threshold && !s->mb_intra)
3023             for(i=4; i<6; i++)
3024                 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
3025     }
3026
3027     if((s->flags&CODEC_FLAG_GRAY) && s->mb_intra){
3028         s->block_last_index[4]=
3029         s->block_last_index[5]= 0;
3030         s->block[4][0]=
3031         s->block[5][0]= (1024 + s->c_dc_scale/2)/ s->c_dc_scale;
3032     }
3033
3034     /* huffman encode */
3035     switch(s->codec_id){ //FIXME funct ptr could be slightly faster
3036     case CODEC_ID_MPEG1VIDEO:
3037     case CODEC_ID_MPEG2VIDEO:
3038         mpeg1_encode_mb(s, s->block, motion_x, motion_y); break;
3039 #ifdef CONFIG_RISKY
3040     case CODEC_ID_MPEG4:
3041         mpeg4_encode_mb(s, s->block, motion_x, motion_y); break;
3042     case CODEC_ID_MSMPEG4V2:
3043     case CODEC_ID_MSMPEG4V3:
3044     case CODEC_ID_WMV1:
3045         msmpeg4_encode_mb(s, s->block, motion_x, motion_y); break;
3046     case CODEC_ID_WMV2:
3047          ff_wmv2_encode_mb(s, s->block, motion_x, motion_y); break;
3048     case CODEC_ID_H263:
3049     case CODEC_ID_H263P:
3050     case CODEC_ID_FLV1:
3051     case CODEC_ID_RV10:
3052         h263_encode_mb(s, s->block, motion_x, motion_y); break;
3053 #endif
3054     case CODEC_ID_MJPEG:
3055         mjpeg_encode_mb(s, s->block); break;
3056     default:
3057         assert(0);
3058     }
3059 }
3060
3061 #endif //CONFIG_ENCODERS
3062
3063 /**
3064  * combines the (truncated) bitstream to a complete frame
3065  * @returns -1 if no complete frame could be created
3066  */
3067 int ff_combine_frame( MpegEncContext *s, int next, uint8_t **buf, int *buf_size){
3068     ParseContext *pc= &s->parse_context;
3069
3070 #if 0
3071     if(pc->overread){
3072         printf("overread %d, state:%X next:%d index:%d o_index:%d\n", pc->overread, pc->state, next, pc->index, pc->overread_index);
3073         printf("%X %X %X %X\n", (*buf)[0], (*buf)[1],(*buf)[2],(*buf)[3]);
3074     }
3075 #endif
3076
3077     /* copy overreaded byes from last frame into buffer */
3078     for(; pc->overread>0; pc->overread--){
3079         pc->buffer[pc->index++]= pc->buffer[pc->overread_index++];
3080     }
3081     
3082     pc->last_index= pc->index;
3083
3084     /* copy into buffer end return */
3085     if(next == END_NOT_FOUND){
3086         pc->buffer= av_fast_realloc(pc->buffer, &pc->buffer_size, (*buf_size) + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
3087
3088         memcpy(&pc->buffer[pc->index], *buf, *buf_size);
3089         pc->index += *buf_size;
3090         return -1;
3091     }
3092
3093     *buf_size=
3094     pc->overread_index= pc->index + next;
3095     
3096     /* append to buffer */
3097     if(pc->index){
3098         pc->buffer= av_fast_realloc(pc->buffer, &pc->buffer_size, next + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
3099
3100         memcpy(&pc->buffer[pc->index], *buf, next + FF_INPUT_BUFFER_PADDING_SIZE );
3101         pc->index = 0;
3102         *buf= pc->buffer;
3103     }
3104
3105     /* store overread bytes */
3106     for(;next < 0; next++){
3107         pc->state = (pc->state<<8) | pc->buffer[pc->last_index + next];
3108         pc->overread++;
3109     }
3110
3111 #if 0
3112     if(pc->overread){
3113         printf("overread %d, state:%X next:%d index:%d o_index:%d\n", pc->overread, pc->state, next, pc->index, pc->overread_index);
3114         printf("%X %X %X %X\n", (*buf)[0], (*buf)[1],(*buf)[2],(*buf)[3]);
3115     }
3116 #endif
3117
3118     return 0;
3119 }
3120
3121 void ff_mpeg_flush(AVCodecContext *avctx){
3122     int i;
3123     MpegEncContext *s = avctx->priv_data;
3124     
3125     for(i=0; i<MAX_PICTURE_COUNT; i++){
3126        if(s->picture[i].data[0] && (   s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
3127                                     || s->picture[i].type == FF_BUFFER_TYPE_USER))
3128         avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
3129     }
3130     s->last_picture_ptr = s->next_picture_ptr = NULL;
3131     
3132     s->parse_context.state= -1;
3133     s->parse_context.frame_start_found= 0;
3134     s->parse_context.overread= 0;
3135     s->parse_context.overread_index= 0;
3136     s->parse_context.index= 0;
3137     s->parse_context.last_index= 0;
3138 }
3139
3140 #ifdef CONFIG_ENCODERS
3141 void ff_copy_bits(PutBitContext *pb, uint8_t *src, int length)
3142 {
3143     int bytes= length>>4;
3144     int bits= length&15;
3145     int i;
3146
3147     if(length==0) return;
3148
3149     for(i=0; i<bytes; i++) put_bits(pb, 16, be2me_16(((uint16_t*)src)[i]));
3150     put_bits(pb, bits, be2me_16(((uint16_t*)src)[i])>>(16-bits));
3151 }
3152
3153 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
3154     int i;
3155
3156     memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop?
3157
3158     /* mpeg1 */
3159     d->mb_skip_run= s->mb_skip_run;
3160     for(i=0; i<3; i++)
3161         d->last_dc[i]= s->last_dc[i];
3162     
3163     /* statistics */
3164     d->mv_bits= s->mv_bits;
3165     d->i_tex_bits= s->i_tex_bits;
3166     d->p_tex_bits= s->p_tex_bits;
3167     d->i_count= s->i_count;
3168     d->f_count= s->f_count;
3169     d->b_count= s->b_count;
3170     d->skip_count= s->skip_count;
3171     d->misc_bits= s->misc_bits;
3172     d->last_bits= 0;
3173
3174     d->mb_skiped= 0;
3175     d->qscale= s->qscale;
3176 }
3177
3178 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
3179     int i;
3180
3181     memcpy(d->mv, s->mv, 2*4*2*sizeof(int)); 
3182     memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop?
3183     
3184     /* mpeg1 */
3185     d->mb_skip_run= s->mb_skip_run;
3186     for(i=0; i<3; i++)
3187         d->last_dc[i]= s->last_dc[i];
3188     
3189     /* statistics */
3190     d->mv_bits= s->mv_bits;
3191     d->i_tex_bits= s->i_tex_bits;
3192     d->p_tex_bits= s->p_tex_bits;
3193     d->i_count= s->i_count;
3194     d->f_count= s->f_count;
3195     d->b_count= s->b_count;
3196     d->skip_count= s->skip_count;
3197     d->misc_bits= s->misc_bits;
3198
3199     d->mb_intra= s->mb_intra;
3200     d->mb_skiped= s->mb_skiped;
3201     d->mv_type= s->mv_type;
3202     d->mv_dir= s->mv_dir;
3203     d->pb= s->pb;
3204     if(s->data_partitioning){
3205         d->pb2= s->pb2;
3206         d->tex_pb= s->tex_pb;
3207     }
3208     d->block= s->block;
3209     for(i=0; i<6; i++)
3210         d->block_last_index[i]= s->block_last_index[i];
3211     d->interlaced_dct= s->interlaced_dct;
3212     d->qscale= s->qscale;
3213 }
3214
3215 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type, 
3216                            PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
3217                            int *dmin, int *next_block, int motion_x, int motion_y)
3218 {
3219     int score;
3220     uint8_t *dest_backup[3];
3221     
3222     copy_context_before_encode(s, backup, type);
3223
3224     s->block= s->blocks[*next_block];
3225     s->pb= pb[*next_block];
3226     if(s->data_partitioning){
3227         s->pb2   = pb2   [*next_block];
3228         s->tex_pb= tex_pb[*next_block];
3229     }
3230     
3231     if(*next_block){
3232         memcpy(dest_backup, s->dest, sizeof(s->dest));
3233         s->dest[0] = s->me.scratchpad;
3234         s->dest[1] = s->me.scratchpad + 16;
3235         s->dest[2] = s->me.scratchpad + 16 + 8;
3236         assert(2*s->uvlinesize == s->linesize); //should be no prob for encoding
3237         assert(s->linesize >= 64); //FIXME
3238     }
3239
3240     encode_mb(s, motion_x, motion_y);
3241     
3242     score= get_bit_count(&s->pb);
3243     if(s->data_partitioning){
3244         score+= get_bit_count(&s->pb2);
3245         score+= get_bit_count(&s->tex_pb);
3246     }
3247    
3248     if(s->avctx->mb_decision == FF_MB_DECISION_RD){
3249         MPV_decode_mb(s, s->block);
3250
3251         score *= s->qscale * s->qscale * 109;
3252         score += sse_mb(s) << 7;
3253     }
3254     
3255     if(*next_block){
3256         memcpy(s->dest, dest_backup, sizeof(s->dest));
3257     }
3258
3259     if(score<*dmin){
3260         *dmin= score;
3261         *next_block^=1;
3262
3263         copy_context_after_encode(best, s, type);
3264     }
3265 }
3266                 
3267 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
3268     uint32_t *sq = squareTbl + 256;
3269     int acc=0;
3270     int x,y;
3271     
3272     if(w==16 && h==16) 
3273         return s->dsp.sse[0](NULL, src1, src2, stride);
3274     else if(w==8 && h==8)
3275         return s->dsp.sse[1](NULL, src1, src2, stride);
3276     
3277     for(y=0; y<h; y++){
3278         for(x=0; x<w; x++){
3279             acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
3280         } 
3281     }
3282     
3283     assert(acc>=0);
3284     
3285     return acc;
3286 }
3287
3288 static int sse_mb(MpegEncContext *s){
3289     int w= 16;
3290     int h= 16;
3291
3292     if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3293     if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3294
3295     if(w==16 && h==16)
3296         return  s->dsp.sse[0](NULL, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize)
3297                +s->dsp.sse[1](NULL, s->new_picture.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize)
3298                +s->dsp.sse[1](NULL, s->new_picture.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize);
3299     else
3300         return  sse(s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
3301                +sse(s, s->new_picture.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
3302                +sse(s, s->new_picture.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
3303 }
3304
3305 static void encode_picture(MpegEncContext *s, int picture_number)
3306 {
3307     int mb_x, mb_y, pdif = 0;
3308     int i;
3309     int bits;
3310     MpegEncContext best_s, backup_s;
3311     uint8_t bit_buf[2][3000];
3312     uint8_t bit_buf2[2][3000];
3313     uint8_t bit_buf_tex[2][3000];
3314     PutBitContext pb[2], pb2[2], tex_pb[2];
3315
3316     for(i=0; i<2; i++){
3317         init_put_bits(&pb    [i], bit_buf    [i], 3000, NULL, NULL);
3318         init_put_bits(&pb2   [i], bit_buf2   [i], 3000, NULL, NULL);
3319         init_put_bits(&tex_pb[i], bit_buf_tex[i], 3000, NULL, NULL);
3320     }
3321
3322     s->picture_number = picture_number;
3323     
3324     /* Reset the average MB variance */
3325     s->current_picture.mb_var_sum = 0;
3326     s->current_picture.mc_mb_var_sum = 0;
3327
3328 #ifdef CONFIG_RISKY
3329     /* we need to initialize some time vars before we can encode b-frames */
3330     // RAL: Condition added for MPEG1VIDEO
3331     //FIXME figure out why mpeg1/2 need this !!!
3332     if (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->h263_msmpeg4))
3333         ff_set_mpeg4_time(s, s->picture_number); 
3334 #endif
3335         
3336     s->scene_change_score=0;
3337     
3338     s->qscale= (int)(s->frame_qscale + 0.5); //FIXME qscale / ... stuff for ME ratedistoration
3339     
3340     if(s->pict_type==I_TYPE){
3341         if(s->msmpeg4_version >= 3) s->no_rounding=1;
3342         else                        s->no_rounding=0;
3343     }else if(s->pict_type!=B_TYPE){
3344         if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4)
3345             s->no_rounding ^= 1;          
3346     }
3347     
3348     /* Estimate motion for every MB */
3349     s->mb_intra=0; //for the rate distoration & bit compare functions
3350     if(s->pict_type != I_TYPE){
3351         if(s->pict_type != B_TYPE){
3352             if((s->avctx->pre_me && s->last_non_b_pict_type==I_TYPE) || s->avctx->pre_me==2){
3353                 s->me.pre_pass=1;
3354                 s->me.dia_size= s->avctx->pre_dia_size;
3355
3356                 for(mb_y=s->mb_height-1; mb_y >=0 ; mb_y--) {
3357                     for(mb_x=s->mb_width-1; mb_x >=0 ; mb_x--) {
3358                         s->mb_x = mb_x;
3359                         s->mb_y = mb_y;
3360                         ff_pre_estimate_p_frame_motion(s, mb_x, mb_y);
3361                     }
3362                 }
3363                 s->me.pre_pass=0;
3364             }
3365         }
3366
3367         s->me.dia_size= s->avctx->dia_size;
3368         for(mb_y=0; mb_y < s->mb_height; mb_y++) {
3369             s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1;
3370             s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1);
3371             s->block_index[2]= s->block_wrap[0]*(mb_y*2 + 2) - 1;
3372             s->block_index[3]= s->block_wrap[0]*(mb_y*2 + 2);
3373             for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3374                 s->mb_x = mb_x;
3375                 s->mb_y = mb_y;
3376                 s->block_index[0]+=2;
3377                 s->block_index[1]+=2;
3378                 s->block_index[2]+=2;
3379                 s->block_index[3]+=2;
3380                 
3381                 /* compute motion vector & mb_type and store in context */
3382                 if(s->pict_type==B_TYPE)
3383                     ff_estimate_b_frame_motion(s, mb_x, mb_y);
3384                 else
3385                     ff_estimate_p_frame_motion(s, mb_x, mb_y);
3386             }
3387         }
3388     }else /* if(s->pict_type == I_TYPE) */{
3389         /* I-Frame */
3390         //FIXME do we need to zero them?
3391         memset(s->motion_val[0], 0, sizeof(int16_t)*(s->mb_width*2 + 2)*(s->mb_height*2 + 2)*2);
3392         memset(s->p_mv_table   , 0, sizeof(int16_t)*(s->mb_stride)*s->mb_height*2);
3393         memset(s->mb_type      , MB_TYPE_INTRA, sizeof(uint8_t)*s->mb_stride*s->mb_height);
3394         
3395         if(!s->fixed_qscale){
3396             /* finding spatial complexity for I-frame rate control */
3397             for(mb_y=0; mb_y < s->mb_height; mb_y++) {
3398                 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3399                     int xx = mb_x * 16;
3400                     int yy = mb_y * 16;
3401                     uint8_t *pix = s->new_picture.data[0] + (yy * s->linesize) + xx;
3402                     int varc;
3403                     int sum = s->dsp.pix_sum(pix, s->linesize);
3404     
3405                     varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)(sum*sum))>>8) + 500 + 128)>>8;
3406
3407                     s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
3408                     s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
3409                     s->current_picture.mb_var_sum    += varc;
3410                 }
3411             }
3412         }
3413     }
3414     emms_c();
3415
3416     if(s->scene_change_score > 0 && s->pict_type == P_TYPE){
3417         s->pict_type= I_TYPE;
3418         memset(s->mb_type   , MB_TYPE_INTRA, sizeof(uint8_t)*s->mb_stride*s->mb_height);
3419 //printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3420     }
3421
3422     if(!s->umvplus){
3423         if(s->pict_type==P_TYPE || s->pict_type==S_TYPE) {
3424             s->f_code= ff_get_best_fcode(s, s->p_mv_table, MB_TYPE_INTER);
3425         
3426             ff_fix_long_p_mvs(s);
3427         }
3428
3429         if(s->pict_type==B_TYPE){
3430             int a, b;
3431
3432             a = ff_get_best_fcode(s, s->b_forw_mv_table, MB_TYPE_FORWARD);
3433             b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, MB_TYPE_BIDIR);
3434             s->f_code = FFMAX(a, b);
3435
3436             a = ff_get_best_fcode(s, s->b_back_mv_table, MB_TYPE_BACKWARD);
3437             b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, MB_TYPE_BIDIR);
3438             s->b_code = FFMAX(a, b);
3439
3440             ff_fix_long_b_mvs(s, s->b_forw_mv_table, s->f_code, MB_TYPE_FORWARD);
3441             ff_fix_long_b_mvs(s, s->b_back_mv_table, s->b_code, MB_TYPE_BACKWARD);
3442             ff_fix_long_b_mvs(s, s->b_bidir_forw_mv_table, s->f_code, MB_TYPE_BIDIR);
3443             ff_fix_long_b_mvs(s, s->b_bidir_back_mv_table, s->b_code, MB_TYPE_BIDIR);
3444         }
3445     }
3446     
3447     if (s->fixed_qscale) 
3448         s->frame_qscale = s->current_picture.quality;
3449     else
3450         s->frame_qscale = ff_rate_estimate_qscale(s);
3451
3452     if(s->adaptive_quant){
3453 #ifdef CONFIG_RISKY
3454         switch(s->codec_id){
3455         case CODEC_ID_MPEG4:
3456             ff_clean_mpeg4_qscales(s);
3457             break;
3458         case CODEC_ID_H263:
3459         case CODEC_ID_H263P:
3460         case CODEC_ID_FLV1:
3461             ff_clean_h263_qscales(s);
3462             break;
3463         }
3464 #endif
3465
3466         s->qscale= s->current_picture.qscale_table[0];
3467     }else
3468         s->qscale= (int)(s->frame_qscale + 0.5);
3469         
3470     if (s->out_format == FMT_MJPEG) {
3471         /* for mjpeg, we do include qscale in the matrix */
3472         s->intra_matrix[0] = ff_mpeg1_default_intra_matrix[0];
3473         for(i=1;i<64;i++){
3474             int j= s->dsp.idct_permutation[i];
3475
3476             s->intra_matrix[j] = CLAMP_TO_8BIT((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3477         }
3478         convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16, 
3479                        s->q_intra_matrix16_bias, s->intra_matrix, s->intra_quant_bias, 8, 8);
3480     }
3481     
3482     //FIXME var duplication
3483     s->current_picture.key_frame= s->pict_type == I_TYPE;
3484     s->current_picture.pict_type= s->pict_type;
3485
3486     if(s->current_picture.key_frame)
3487         s->picture_in_gop_number=0;
3488
3489     s->last_bits= get_bit_count(&s->pb);
3490     switch(s->out_format) {
3491     case FMT_MJPEG:
3492         mjpeg_picture_header(s);
3493         break;
3494 #ifdef CONFIG_RISKY
3495     case FMT_H263:
3496         if (s->codec_id == CODEC_ID_WMV2) 
3497             ff_wmv2_encode_picture_header(s, picture_number);
3498         else if (s->h263_msmpeg4) 
3499             msmpeg4_encode_picture_header(s, picture_number);
3500         else if (s->h263_pred)
3501             mpeg4_encode_picture_header(s, picture_number);
3502         else if (s->h263_rv10) 
3503             rv10_encode_picture_header(s, picture_number);
3504         else if (s->codec_id == CODEC_ID_FLV1)
3505             ff_flv_encode_picture_header(s, picture_number);
3506         else
3507             h263_encode_picture_header(s, picture_number);
3508         break;
3509 #endif
3510     case FMT_MPEG1:
3511         mpeg1_encode_picture_header(s, picture_number);
3512         break;
3513     }
3514     bits= get_bit_count(&s->pb);
3515     s->header_bits= bits - s->last_bits;
3516     s->last_bits= bits;
3517     s->mv_bits=0;
3518     s->misc_bits=0;
3519     s->i_tex_bits=0;
3520     s->p_tex_bits=0;
3521     s->i_count=0;
3522     s->f_count=0;
3523     s->b_count=0;
3524     s->skip_count=0;
3525
3526     for(i=0; i<3; i++){
3527         /* init last dc values */
3528         /* note: quant matrix value (8) is implied here */
3529         s->last_dc[i] = 128;
3530         
3531         s->current_picture_ptr->error[i] = 0;
3532     }
3533     s->mb_skip_run = 0;
3534     s->last_mv[0][0][0] = 0;
3535     s->last_mv[0][0][1] = 0;
3536     s->last_mv[1][0][0] = 0;
3537     s->last_mv[1][0][1] = 0;
3538      
3539     s->last_mv_dir = 0;
3540
3541 #ifdef CONFIG_RISKY
3542     switch(s->codec_id){
3543     case CODEC_ID_H263:
3544     case CODEC_ID_H263P:
3545     case CODEC_ID_FLV1:
3546         s->gob_index = ff_h263_get_gob_height(s);
3547         break;
3548     case CODEC_ID_MPEG4:
3549         if(s->partitioned_frame)
3550             ff_mpeg4_init_partitions(s);
3551         break;
3552     }
3553 #endif
3554
3555     s->resync_mb_x=0;
3556     s->resync_mb_y=0;
3557     s->first_slice_line = 1;
3558     s->ptr_lastgob = s->pb.buf;
3559     for(mb_y=0; mb_y < s->mb_height; mb_y++) {
3560         s->mb_x=0;
3561         s->mb_y= mb_y;
3562
3563         s->y_dc_scale= s->y_dc_scale_table[ s->qscale ];
3564         s->c_dc_scale= s->c_dc_scale_table[ s->qscale ];
3565         ff_init_block_index(s);
3566         
3567         for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3568             const int xy= mb_y*s->mb_stride + mb_x;
3569             int mb_type= s->mb_type[xy];
3570 //            int d;
3571             int dmin= INT_MAX;
3572
3573             s->mb_x = mb_x;
3574             ff_update_block_index(s);
3575
3576             /* write gob / video packet header  */
3577 #ifdef CONFIG_RISKY
3578             if(s->rtp_mode && mb_y + mb_x>0){
3579                 int current_packet_size, is_gob_start;
3580                 
3581                 current_packet_size= pbBufPtr(&s->pb) - s->ptr_lastgob;
3582                 is_gob_start=0;
3583                 
3584                 if(s->codec_id==CODEC_ID_MPEG4){
3585                     if(current_packet_size >= s->rtp_payload_size){
3586
3587                         if(s->partitioned_frame){
3588                             ff_mpeg4_merge_partitions(s);
3589                             ff_mpeg4_init_partitions(s);
3590                         }
3591                         ff_mpeg4_encode_video_packet_header(s);
3592
3593                         if(s->flags&CODEC_FLAG_PASS1){
3594                             int bits= get_bit_count(&s->pb);
3595                             s->misc_bits+= bits - s->last_bits;
3596                             s->last_bits= bits;
3597                         }
3598                         ff_mpeg4_clean_buffers(s);
3599                         is_gob_start=1;
3600                     }
3601                 }else if(s->codec_id==CODEC_ID_MPEG1VIDEO){
3602                     if(   current_packet_size >= s->rtp_payload_size 
3603                        && s->mb_skip_run==0){
3604                         ff_mpeg1_encode_slice_header(s);
3605                         ff_mpeg1_clean_buffers(s);
3606                         is_gob_start=1;
3607                     }
3608                 }else if(s->codec_id==CODEC_ID_MPEG2VIDEO){
3609                     if(   (   current_packet_size >= s->rtp_payload_size || mb_x==0)
3610                        && s->mb_skip_run==0){
3611                         ff_mpeg1_encode_slice_header(s);
3612                         ff_mpeg1_clean_buffers(s);
3613                         is_gob_start=1;
3614                     }
3615                 }else{
3616                     if(current_packet_size >= s->rtp_payload_size
3617                        && s->mb_x==0 && s->mb_y%s->gob_index==0){
3618                        
3619                         h263_encode_gob_header(s, mb_y);                       
3620                         is_gob_start=1;
3621                     }
3622                 }
3623
3624                 if(is_gob_start){
3625                     s->ptr_lastgob = pbBufPtr(&s->pb);
3626                     s->first_slice_line=1;
3627                     s->resync_mb_x=mb_x;
3628                     s->resync_mb_y=mb_y;
3629                 }
3630             }
3631 #endif
3632
3633             if(  (s->resync_mb_x   == s->mb_x)
3634                && s->resync_mb_y+1 == s->mb_y){
3635                 s->first_slice_line=0; 
3636             }
3637
3638             s->mb_skiped=0;
3639
3640             if(mb_type & (mb_type-1)){ // more than 1 MB type possible
3641                 int next_block=0;
3642                 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3643
3644                 copy_context_before_encode(&backup_s, s, -1);
3645                 backup_s.pb= s->pb;
3646                 best_s.data_partitioning= s->data_partitioning;
3647                 best_s.partitioned_frame= s->partitioned_frame;
3648                 if(s->data_partitioning){
3649                     backup_s.pb2= s->pb2;
3650                     backup_s.tex_pb= s->tex_pb;
3651                 }
3652
3653                 if(mb_type&MB_TYPE_INTER){
3654                     s->mv_dir = MV_DIR_FORWARD;
3655                     s->mv_type = MV_TYPE_16X16;
3656                     s->mb_intra= 0;
3657                     s->mv[0][0][0] = s->p_mv_table[xy][0];
3658                     s->mv[0][0][1] = s->p_mv_table[xy][1];
3659                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER, pb, pb2, tex_pb, 
3660                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3661                 }
3662                 if(mb_type&MB_TYPE_INTER4V){                 
3663                     s->mv_dir = MV_DIR_FORWARD;
3664                     s->mv_type = MV_TYPE_8X8;
3665                     s->mb_intra= 0;
3666                     for(i=0; i<4; i++){
3667                         s->mv[0][i][0] = s->motion_val[s->block_index[i]][0];
3668                         s->mv[0][i][1] = s->motion_val[s->block_index[i]][1];
3669                     }
3670                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER4V, pb, pb2, tex_pb, 
3671                                  &dmin, &next_block, 0, 0);
3672                 }
3673                 if(mb_type&MB_TYPE_FORWARD){
3674                     s->mv_dir = MV_DIR_FORWARD;
3675                     s->mv_type = MV_TYPE_16X16;
3676                     s->mb_intra= 0;
3677                     s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3678                     s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3679                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_FORWARD, pb, pb2, tex_pb, 
3680                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3681                 }
3682                 if(mb_type&MB_TYPE_BACKWARD){
3683                     s->mv_dir = MV_DIR_BACKWARD;
3684                     s->mv_type = MV_TYPE_16X16;
3685                     s->mb_intra= 0;
3686                     s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3687                     s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3688                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BACKWARD, pb, pb2, tex_pb, 
3689                                  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3690                 }
3691                 if(mb_type&MB_TYPE_BIDIR){
3692                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3693                     s->mv_type = MV_TYPE_16X16;
3694                     s->mb_intra= 0;
3695                     s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3696                     s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3697                     s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3698                     s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3699                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BIDIR, pb, pb2, tex_pb, 
3700                                  &dmin, &next_block, 0, 0);
3701                 }
3702                 if(mb_type&MB_TYPE_DIRECT){
3703                     int mx= s->b_direct_mv_table[xy][0];
3704                     int my= s->b_direct_mv_table[xy][1];
3705                     
3706                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3707                     s->mb_intra= 0;
3708 #ifdef CONFIG_RISKY
3709                     ff_mpeg4_set_direct_mv(s, mx, my);
3710 #endif
3711                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_DIRECT, pb, pb2, tex_pb, 
3712                                  &dmin, &next_block, mx, my);
3713                 }
3714                 if(mb_type&MB_TYPE_INTRA){
3715                     s->mv_dir = 0;
3716                     s->mv_type = MV_TYPE_16X16;
3717                     s->mb_intra= 1;
3718                     s->mv[0][0][0] = 0;
3719                     s->mv[0][0][1] = 0;
3720                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTRA, pb, pb2, tex_pb, 
3721                                  &dmin, &next_block, 0, 0);
3722                     if(s->h263_pred || s->h263_aic){
3723                         if(best_s.mb_intra)
3724                             s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3725                         else
3726                             ff_clean_intra_table_entries(s); //old mode?
3727                     }
3728                 }
3729                 copy_context_after_encode(s, &best_s, -1);
3730                 
3731                 pb_bits_count= get_bit_count(&s->pb);
3732                 flush_put_bits(&s->pb);
3733                 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3734                 s->pb= backup_s.pb;
3735                 
3736                 if(s->data_partitioning){
3737                     pb2_bits_count= get_bit_count(&s->pb2);
3738                     flush_put_bits(&s->pb2);
3739                     ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3740                     s->pb2= backup_s.pb2;
3741                     
3742                     tex_pb_bits_count= get_bit_count(&s->tex_pb);
3743                     flush_put_bits(&s->tex_pb);
3744                     ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3745                     s->tex_pb= backup_s.tex_pb;
3746                 }
3747                 s->last_bits= get_bit_count(&s->pb);
3748                 
3749                 if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE)
3750                     ff_h263_update_motion_val(s);
3751         
3752                 if(next_block==0){
3753                     s->dsp.put_pixels_tab[0][0](s->dest[0], s->me.scratchpad     , s->linesize  ,16);
3754                     s->dsp.put_pixels_tab[1][0](s->dest[1], s->me.scratchpad + 16, s->uvlinesize, 8);
3755                     s->dsp.put_pixels_tab[1][0](s->dest[2], s->me.scratchpad + 24, s->uvlinesize, 8);
3756                 }
3757
3758                 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3759                     MPV_decode_mb(s, s->block);
3760             } else {
3761                 int motion_x, motion_y;
3762                 int intra_score;
3763                 int inter_score= s->current_picture.mb_cmp_score[mb_x + mb_y*s->mb_stride];
3764                 
3765               if(s->avctx->mb_decision==FF_MB_DECISION_SIMPLE && s->pict_type==P_TYPE){ //FIXME check if the mess is usefull at all
3766                 /* get luma score */
3767                 if((s->avctx->mb_cmp&0xFF)==FF_CMP_SSE){
3768                     intra_score= (s->current_picture.mb_var[mb_x + mb_y*s->mb_stride]<<8) - 500; //FIXME dont scale it down so we dont have to fix it
3769                 }else{
3770                     uint8_t *dest_y;
3771
3772                     int mean= s->current_picture.mb_mean[mb_x + mb_y*s->mb_stride]; //FIXME
3773                     mean*= 0x01010101;
3774                     
3775                     dest_y  = s->new_picture.data[0] + (mb_y * 16 * s->linesize    ) + mb_x * 16;
3776                 
3777                     for(i=0; i<16; i++){
3778                         *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 0]) = mean;
3779                         *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 4]) = mean;
3780                         *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 8]) = mean;
3781                         *(uint32_t*)(&s->me.scratchpad[i*s->linesize+12]) = mean;
3782                     }
3783
3784                     s->mb_intra=1;
3785                     intra_score= s->dsp.mb_cmp[0](s, s->me.scratchpad, dest_y, s->linesize);
3786                                         
3787 /*                    printf("intra:%7d inter:%7d var:%7d mc_var.%7d\n", intra_score>>8, inter_score>>8, 
3788                         s->current_picture.mb_var[mb_x + mb_y*s->mb_stride],
3789                         s->current_picture.mc_mb_var[mb_x + mb_y*s->mb_stride]);*/
3790                 }
3791                 
3792                 /* get chroma score */
3793                 if(s->avctx->mb_cmp&FF_CMP_CHROMA){
3794                     int i;
3795                     
3796                     s->mb_intra=1;
3797                     for(i=1; i<3; i++){
3798                         uint8_t *dest_c;
3799                         int mean;
3800                         
3801                         if(s->out_format == FMT_H263){
3802                             mean= (s->dc_val[i][mb_x + (mb_y+1)*(s->mb_width+2)] + 4)>>3; //FIXME not exact but simple ;)
3803                         }else{
3804                             mean= (s->last_dc[i] + 4)>>3;
3805                         }
3806                         dest_c = s->new_picture.data[i] + (mb_y * 8  * (s->uvlinesize)) + mb_x * 8;
3807                         
3808                         mean*= 0x01010101;
3809                         for(i=0; i<8; i++){
3810                             *(uint32_t*)(&s->me.scratchpad[i*s->uvlinesize+ 0]) = mean;
3811                             *(uint32_t*)(&s->me.scratchpad[i*s->uvlinesize+ 4]) = mean;
3812                         }
3813                         
3814                         intra_score+= s->dsp.mb_cmp[1](s, s->me.scratchpad, dest_c, s->uvlinesize);
3815                     }                
3816                 }
3817
3818                 /* bias */
3819                 switch(s->avctx->mb_cmp&0xFF){
3820                 default:
3821                 case FF_CMP_SAD:
3822                     intra_score+= 32*s->qscale;
3823                     break;
3824                 case FF_CMP_SSE:
3825                     intra_score+= 24*s->qscale*s->qscale;
3826                     break;
3827                 case FF_CMP_SATD:
3828                     intra_score+= 96*s->qscale;
3829                     break;
3830                 case FF_CMP_DCT:
3831                     intra_score+= 48*s->qscale;
3832                     break;
3833                 case FF_CMP_BIT:
3834                     intra_score+= 16;
3835                     break;
3836                 case FF_CMP_PSNR:
3837                 case FF_CMP_RD:
3838                     intra_score+= (s->qscale*s->qscale*109*8 + 64)>>7;
3839                     break;
3840                 }
3841
3842                 if(intra_score < inter_score)
3843                     mb_type= MB_TYPE_INTRA;
3844               }  
3845                 
3846                 s->mv_type=MV_TYPE_16X16;
3847                 // only one MB-Type possible
3848                 
3849                 switch(mb_type){
3850                 case MB_TYPE_INTRA:
3851                     s->mv_dir = 0;
3852                     s->mb_intra= 1;
3853                     motion_x= s->mv[0][0][0] = 0;
3854                     motion_y= s->mv[0][0][1] = 0;
3855                     break;
3856                 case MB_TYPE_INTER:
3857                     s->mv_dir = MV_DIR_FORWARD;
3858                     s->mb_intra= 0;
3859                     motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3860                     motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3861                     break;
3862                 case MB_TYPE_INTER4V:
3863                     s->mv_dir = MV_DIR_FORWARD;
3864                     s->mv_type = MV_TYPE_8X8;
3865                     s->mb_intra= 0;
3866                     for(i=0; i<4; i++){
3867                         s->mv[0][i][0] = s->motion_val[s->block_index[i]][0];
3868                         s->mv[0][i][1] = s->motion_val[s->block_index[i]][1];
3869                     }
3870                     motion_x= motion_y= 0;
3871                     break;
3872                 case MB_TYPE_DIRECT:
3873                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3874                     s->mb_intra= 0;
3875                     motion_x=s->b_direct_mv_table[xy][0];
3876                     motion_y=s->b_direct_mv_table[xy][1];
3877 #ifdef CONFIG_RISKY
3878                     ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3879 #endif
3880                     break;
3881                 case MB_TYPE_BIDIR:
3882                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3883                     s->mb_intra= 0;
3884                     motion_x=0;
3885                     motion_y=0;
3886                     s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3887                     s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3888                     s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3889                     s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3890                     break;
3891                 case MB_TYPE_BACKWARD:
3892                     s->mv_dir = MV_DIR_BACKWARD;
3893                     s->mb_intra= 0;
3894                     motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3895                     motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3896                     break;
3897                 case MB_TYPE_FORWARD:
3898                     s->mv_dir = MV_DIR_FORWARD;
3899                     s->mb_intra= 0;
3900                     motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3901                     motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3902 //                    printf(" %d %d ", motion_x, motion_y);
3903                     break;
3904                 default:
3905                     motion_x=motion_y=0; //gcc warning fix
3906                     printf("illegal MB type\n");
3907                 }
3908
3909                 encode_mb(s, motion_x, motion_y);
3910
3911                 // RAL: Update last macrobloc type
3912                 s->last_mv_dir = s->mv_dir;
3913             
3914                 if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE)
3915                     ff_h263_update_motion_val(s);
3916
3917                 MPV_decode_mb(s, s->block);
3918             }
3919
3920             /* clean the MV table in IPS frames for direct mode in B frames */
3921             if(s->mb_intra /* && I,P,S_TYPE */){
3922                 s->p_mv_table[xy][0]=0;
3923                 s->p_mv_table[xy][1]=0;
3924             }
3925             
3926             if(s->flags&CODEC_FLAG_PSNR){
3927                 int w= 16;
3928                 int h= 16;
3929
3930                 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3931                 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3932
3933                 s->current_picture_ptr->error[0] += sse(
3934                     s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3935                     s->dest[0], w, h, s->linesize);
3936                 s->current_picture_ptr->error[1] += sse(
3937                     s, s->new_picture.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,
3938                     s->dest[1], w>>1, h>>1, s->uvlinesize);
3939                 s->current_picture_ptr->error[2] += sse(
3940                     s, s->new_picture    .data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,
3941                     s->dest[2], w>>1, h>>1, s->uvlinesize);
3942             }
3943 //printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_stride, get_bit_count(&s->pb));
3944         }
3945     }
3946     emms_c();
3947
3948 #ifdef CONFIG_RISKY
3949     if(s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame)
3950         ff_mpeg4_merge_partitions(s);
3951
3952     if (s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == I_TYPE)
3953         msmpeg4_encode_ext_header(s);
3954
3955     if(s->codec_id==CODEC_ID_MPEG4) 
3956         ff_mpeg4_stuffing(&s->pb);
3957 #endif
3958
3959     //if (s->gob_number)
3960     //    fprintf(stderr,"\nNumber of GOB: %d", s->gob_number);
3961     
3962     /* Send the last GOB if RTP */    
3963     if (s->rtp_mode) {
3964         flush_put_bits(&s->pb);
3965         pdif = pbBufPtr(&s->pb) - s->ptr_lastgob;
3966         /* Call the RTP callback to send the last GOB */
3967         if (s->rtp_callback)
3968             s->rtp_callback(s->ptr_lastgob, pdif, s->gob_number);
3969         s->ptr_lastgob = pbBufPtr(&s->pb);
3970         //fprintf(stderr,"\nGOB: %2d size: %d (last)", s->gob_number, pdif);
3971     }
3972 }
3973
3974 static int dct_quantize_trellis_c(MpegEncContext *s, 
3975                         DCTELEM *block, int n,
3976                         int qscale, int *overflow){
3977     const int *qmat;
3978     const uint8_t *scantable= s->intra_scantable.scantable;
3979     int max=0;
3980     unsigned int threshold1, threshold2;
3981     int bias=0;
3982     int run_tab[65];
3983     int level_tab[65];
3984     int score_tab[65];
3985     int last_run=0;
3986     int last_level=0;
3987     int last_score= 0;
3988     int last_i= 0;
3989     int coeff[3][64];
3990     int coeff_count[64];
3991     int lambda, qmul, qadd, start_i, last_non_zero, i;
3992     const int esc_length= s->ac_esc_length;
3993     uint8_t * length;
3994     uint8_t * last_length;
3995     int score_limit=0;
3996     int left_limit= 0;
3997         
3998     s->dsp.fdct (block);
3999
4000     qmul= qscale*16;
4001     qadd= ((qscale-1)|1)*8;
4002
4003     if (s->mb_intra) {
4004         int q;
4005         if (!s->h263_aic) {
4006             if (n < 4)
4007                 q = s->y_dc_scale;
4008             else
4009                 q = s->c_dc_scale;
4010             q = q << 3;
4011         } else{
4012             /* For AIC we skip quant/dequant of INTRADC */
4013             q = 1 << 3;
4014             qadd=0;
4015         }
4016             
4017         /* note: block[0] is assumed to be positive */
4018         block[0] = (block[0] + (q >> 1)) / q;
4019         start_i = 1;
4020         last_non_zero = 0;
4021         qmat = s->q_intra_matrix[qscale];
4022         if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4023             bias= 1<<(QMAT_SHIFT-1);
4024         length     = s->intra_ac_vlc_length;
4025         last_length= s->intra_ac_vlc_last_length;
4026     } else {
4027         start_i = 0;
4028         last_non_zero = -1;
4029         qmat = s->q_inter_matrix[qscale];
4030         length     = s->inter_ac_vlc_length;
4031         last_length= s->inter_ac_vlc_last_length;
4032     }
4033
4034     threshold1= (1<<QMAT_SHIFT) - bias - 1;
4035     threshold2= (threshold1<<1);
4036
4037     for(i=start_i; i<64; i++) {
4038         const int j = scantable[i];
4039         const int k= i-start_i;
4040         int level = block[j];
4041         level = level * qmat[j];
4042
4043 //        if(   bias+level >= (1<<(QMAT_SHIFT - 3))
4044 //           || bias-level >= (1<<(QMAT_SHIFT - 3))){
4045         if(((unsigned)(level+threshold1))>threshold2){
4046             if(level>0){
4047                 level= (bias + level)>>QMAT_SHIFT;
4048                 coeff[0][k]= level;
4049                 coeff[1][k]= level-1;
4050 //                coeff[2][k]= level-2;
4051             }else{
4052                 level= (bias - level)>>QMAT_SHIFT;
4053                 coeff[0][k]= -level;
4054                 coeff[1][k]= -level+1;
4055 //                coeff[2][k]= -level+2;
4056             }
4057             coeff_count[k]= FFMIN(level, 2);
4058             max |=level;
4059             last_non_zero = i;
4060         }else{
4061             coeff[0][k]= (level>>31)|1;
4062             coeff_count[k]= 1;
4063         }
4064     }
4065     
4066     *overflow= s->max_qcoeff < max; //overflow might have happend
4067     
4068     if(last_non_zero < start_i){
4069         memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
4070         return last_non_zero;
4071     }
4072
4073     lambda= (qscale*qscale*64*105 + 64)>>7; //FIXME finetune
4074         
4075     score_tab[0]= 0;
4076     for(i=0;