91b46b2b687aa2680d1bc3943f8bd86a3639079f
[ffmpeg.git] / libavcodec / mpegvideo.c
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard.
4  *
5  * This library is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU Lesser General Public
7  * License as published by the Free Software Foundation; either
8  * version 2 of the License, or (at your option) any later version.
9  *
10  * This library is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * Lesser General Public License for more details.
14  *
15  * You should have received a copy of the GNU Lesser General Public
16  * License along with this library; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  *
19  * 4MV & hq & b-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
20  */
21  
22 /**
23  * @file mpegvideo.c
24  * The simplest mpeg encoder (well, it was the simplest!).
25  */ 
26  
27 #include <ctype.h>
28 #include <limits.h>
29 #include "avcodec.h"
30 #include "dsputil.h"
31 #include "mpegvideo.h"
32
33 #ifdef USE_FASTMEMCPY
34 #include "fastmemcpy.h"
35 #endif
36
37 //#undef NDEBUG
38 //#include <assert.h>
39
40 #ifdef CONFIG_ENCODERS
41 static void encode_picture(MpegEncContext *s, int picture_number);
42 #endif //CONFIG_ENCODERS
43 static void dct_unquantize_mpeg1_c(MpegEncContext *s, 
44                                    DCTELEM *block, int n, int qscale);
45 static void dct_unquantize_mpeg2_c(MpegEncContext *s,
46                                    DCTELEM *block, int n, int qscale);
47 static void dct_unquantize_h263_c(MpegEncContext *s, 
48                                   DCTELEM *block, int n, int qscale);
49 static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w);
50 #ifdef CONFIG_ENCODERS
51 static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
52 static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
53 static int sse_mb(MpegEncContext *s);
54 #endif //CONFIG_ENCODERS
55
56 #ifdef HAVE_XVMC
57 extern int  XVMC_field_start(MpegEncContext*s, AVCodecContext *avctx);
58 extern void XVMC_field_end(MpegEncContext *s);
59 extern void XVMC_decode_mb(MpegEncContext *s, DCTELEM block[6][64]);
60 #endif
61
62 void (*draw_edges)(uint8_t *buf, int wrap, int width, int height, int w)= draw_edges_c;
63
64
65 /* enable all paranoid tests for rounding, overflows, etc... */
66 //#define PARANOID
67
68 //#define DEBUG
69
70
71 /* for jpeg fast DCT */
72 #define CONST_BITS 14
73
74 static const uint16_t aanscales[64] = {
75     /* precomputed values scaled up by 14 bits */
76     16384, 22725, 21407, 19266, 16384, 12873,  8867,  4520,
77     22725, 31521, 29692, 26722, 22725, 17855, 12299,  6270,
78     21407, 29692, 27969, 25172, 21407, 16819, 11585,  5906,
79     19266, 26722, 25172, 22654, 19266, 15137, 10426,  5315,
80     16384, 22725, 21407, 19266, 16384, 12873,  8867,  4520,
81     12873, 17855, 16819, 15137, 12873, 10114,  6967,  3552,
82     8867 , 12299, 11585, 10426,  8867,  6967,  4799,  2446,
83     4520 ,  6270,  5906,  5315,  4520,  3552,  2446,  1247
84 };
85
86 static const uint8_t h263_chroma_roundtab[16] = {
87 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
88     0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
89 };
90
91 #ifdef CONFIG_ENCODERS
92 static uint8_t (*default_mv_penalty)[MAX_MV*2+1]=NULL;
93 static uint8_t default_fcode_tab[MAX_MV*2+1];
94
95 enum PixelFormat ff_yuv420p_list[2]= {PIX_FMT_YUV420P, -1};
96
97 static void convert_matrix(MpegEncContext *s, int (*qmat)[64], uint16_t (*qmat16)[64], uint16_t (*qmat16_bias)[64],
98                            const uint16_t *quant_matrix, int bias, int qmin, int qmax)
99 {
100     int qscale;
101
102     for(qscale=qmin; qscale<=qmax; qscale++){
103         int i;
104         if (s->dsp.fdct == ff_jpeg_fdct_islow) {
105             for(i=0;i<64;i++) {
106                 const int j= s->dsp.idct_permutation[i];
107                 /* 16 <= qscale * quant_matrix[i] <= 7905 */
108                 /* 19952         <= aanscales[i] * qscale * quant_matrix[i]           <= 249205026 */
109                 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
110                 /* 3444240       >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
111                 
112                 qmat[qscale][i] = (int)((uint64_t_C(1) << QMAT_SHIFT) / 
113                                 (qscale * quant_matrix[j]));
114             }
115         } else if (s->dsp.fdct == fdct_ifast) {
116             for(i=0;i<64;i++) {
117                 const int j= s->dsp.idct_permutation[i];
118                 /* 16 <= qscale * quant_matrix[i] <= 7905 */
119                 /* 19952         <= aanscales[i] * qscale * quant_matrix[i]           <= 249205026 */
120                 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
121                 /* 3444240       >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
122                 
123                 qmat[qscale][i] = (int)((uint64_t_C(1) << (QMAT_SHIFT + 14)) / 
124                                 (aanscales[i] * qscale * quant_matrix[j]));
125             }
126         } else {
127             for(i=0;i<64;i++) {
128                 const int j= s->dsp.idct_permutation[i];
129                 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
130                    So 16           <= qscale * quant_matrix[i]             <= 7905
131                    so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905
132                    so 32768        >= (1<<19) / (qscale * quant_matrix[i]) >= 67
133                 */
134                 qmat[qscale][i] = (int)((uint64_t_C(1) << QMAT_SHIFT) / (qscale * quant_matrix[j]));
135 //                qmat  [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]);
136                 qmat16[qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[j]);
137
138                 if(qmat16[qscale][i]==0 || qmat16[qscale][i]==128*256) qmat16[qscale][i]=128*256-1;
139                 qmat16_bias[qscale][i]= ROUNDED_DIV(bias<<(16-QUANT_BIAS_SHIFT), qmat16[qscale][i]);
140             }
141         }
142     }
143 }
144 #endif //CONFIG_ENCODERS
145
146 void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable){
147     int i;
148     int end;
149     
150     st->scantable= src_scantable;
151
152     for(i=0; i<64; i++){
153         int j;
154         j = src_scantable[i];
155         st->permutated[i] = permutation[j];
156 #ifdef ARCH_POWERPC
157         st->inverse[j] = i;
158 #endif
159     }
160     
161     end=-1;
162     for(i=0; i<64; i++){
163         int j;
164         j = st->permutated[i];
165         if(j>end) end=j;
166         st->raster_end[i]= end;
167     }
168 }
169
170 /* init common dct for both encoder and decoder */
171 int DCT_common_init(MpegEncContext *s)
172 {
173     s->dct_unquantize_h263 = dct_unquantize_h263_c;
174     s->dct_unquantize_mpeg1 = dct_unquantize_mpeg1_c;
175     s->dct_unquantize_mpeg2 = dct_unquantize_mpeg2_c;
176
177 #ifdef CONFIG_ENCODERS
178     s->dct_quantize= dct_quantize_c;
179 #endif
180         
181 #ifdef HAVE_MMX
182     MPV_common_init_mmx(s);
183 #endif
184 #ifdef ARCH_ALPHA
185     MPV_common_init_axp(s);
186 #endif
187 #ifdef HAVE_MLIB
188     MPV_common_init_mlib(s);
189 #endif
190 #ifdef HAVE_MMI
191     MPV_common_init_mmi(s);
192 #endif
193 #ifdef ARCH_ARMV4L
194     MPV_common_init_armv4l(s);
195 #endif
196 #ifdef ARCH_POWERPC
197     MPV_common_init_ppc(s);
198 #endif
199
200 #ifdef CONFIG_ENCODERS
201     s->fast_dct_quantize= s->dct_quantize;
202
203     if(s->flags&CODEC_FLAG_TRELLIS_QUANT){
204         s->dct_quantize= dct_quantize_trellis_c; //move before MPV_common_init_*
205     }
206
207 #endif //CONFIG_ENCODERS
208
209     /* load & permutate scantables
210        note: only wmv uses differnt ones 
211     */
212     ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_zigzag_direct);
213     ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_zigzag_direct);
214     ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
215     ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
216
217     s->picture_structure= PICT_FRAME;
218     
219     return 0;
220 }
221
222 /**
223  * allocates a Picture
224  * The pixels are allocated/set by calling get_buffer() if shared=0
225  */
226 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
227     const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) doesnt sig11
228     const int mb_array_size= s->mb_stride*s->mb_height;
229     int i;
230     
231     if(shared){
232         assert(pic->data[0]);
233         assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
234         pic->type= FF_BUFFER_TYPE_SHARED;
235     }else{
236         int r;
237         
238         assert(!pic->data[0]);
239         
240         r= s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
241         
242         if(r<0 || !pic->age || !pic->type || !pic->data[0]){
243             fprintf(stderr, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
244             return -1;
245         }
246
247         if(s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])){
248             fprintf(stderr, "get_buffer() failed (stride changed)\n");
249             return -1;
250         }
251
252         if(pic->linesize[1] != pic->linesize[2]){
253             fprintf(stderr, "get_buffer() failed (uv stride missmatch)\n");
254             return -1;
255         }
256
257         s->linesize  = pic->linesize[0];
258         s->uvlinesize= pic->linesize[1];
259     }
260     
261     if(pic->qscale_table==NULL){
262         if (s->encoding) {        
263             CHECKED_ALLOCZ(pic->mb_var   , mb_array_size * sizeof(int16_t))
264             CHECKED_ALLOCZ(pic->mc_mb_var, mb_array_size * sizeof(int16_t))
265             CHECKED_ALLOCZ(pic->mb_mean  , mb_array_size * sizeof(int8_t))
266             CHECKED_ALLOCZ(pic->mb_cmp_score, mb_array_size * sizeof(int32_t))
267         }
268
269         CHECKED_ALLOCZ(pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2) //the +2 is for the slice end check
270         CHECKED_ALLOCZ(pic->qscale_table , mb_array_size * sizeof(uint8_t))
271         CHECKED_ALLOCZ(pic->mb_type_base , big_mb_num    * sizeof(int))
272         pic->mb_type= pic->mb_type_base + s->mb_stride+1;
273         if(s->out_format == FMT_H264){
274             for(i=0; i<2; i++){
275                 CHECKED_ALLOCZ(pic->motion_val[i], 2 * 16 * s->mb_num * sizeof(uint16_t))
276                 CHECKED_ALLOCZ(pic->ref_index[i] , 4 * s->mb_num * sizeof(uint8_t))
277             }
278         }
279         pic->qstride= s->mb_stride;
280     }
281
282     //it might be nicer if the application would keep track of these but it would require a API change
283     memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
284     s->prev_pict_types[0]= s->pict_type;
285     if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == B_TYPE)
286         pic->age= INT_MAX; // skiped MBs in b frames are quite rare in mpeg1/2 and its a bit tricky to skip them anyway
287     
288     return 0;
289 fail: //for the CHECKED_ALLOCZ macro
290     return -1;
291 }
292
293 /**
294  * deallocates a picture
295  */
296 static void free_picture(MpegEncContext *s, Picture *pic){
297     int i;
298
299     if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
300         s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
301     }
302
303     av_freep(&pic->mb_var);
304     av_freep(&pic->mc_mb_var);
305     av_freep(&pic->mb_mean);
306     av_freep(&pic->mb_cmp_score);
307     av_freep(&pic->mbskip_table);
308     av_freep(&pic->qscale_table);
309     av_freep(&pic->mb_type_base);
310     pic->mb_type= NULL;
311     for(i=0; i<2; i++){
312         av_freep(&pic->motion_val[i]);
313         av_freep(&pic->ref_index[i]);
314     }
315     
316     if(pic->type == FF_BUFFER_TYPE_SHARED){
317         for(i=0; i<4; i++){
318             pic->base[i]=
319             pic->data[i]= NULL;
320         }
321         pic->type= 0;        
322     }
323 }
324
325 /* init common structure for both encoder and decoder */
326 int MPV_common_init(MpegEncContext *s)
327 {
328     int y_size, c_size, yc_size, i, mb_array_size, x, y;
329
330     dsputil_init(&s->dsp, s->avctx);
331     DCT_common_init(s);
332
333     s->flags= s->avctx->flags;
334
335     s->mb_width  = (s->width  + 15) / 16;
336     s->mb_height = (s->height + 15) / 16;
337     s->mb_stride = s->mb_width + 1;
338     mb_array_size= s->mb_height * s->mb_stride;
339
340     /* set default edge pos, will be overriden in decode_header if needed */
341     s->h_edge_pos= s->mb_width*16;
342     s->v_edge_pos= s->mb_height*16;
343
344     s->mb_num = s->mb_width * s->mb_height;
345     
346     s->block_wrap[0]=
347     s->block_wrap[1]=
348     s->block_wrap[2]=
349     s->block_wrap[3]= s->mb_width*2 + 2;
350     s->block_wrap[4]=
351     s->block_wrap[5]= s->mb_width + 2;
352
353     y_size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
354     c_size = (s->mb_width + 2) * (s->mb_height + 2);
355     yc_size = y_size + 2 * c_size;
356
357     /* convert fourcc to upper case */
358     s->avctx->codec_tag=   toupper( s->avctx->codec_tag     &0xFF)          
359                         + (toupper((s->avctx->codec_tag>>8 )&0xFF)<<8 )
360                         + (toupper((s->avctx->codec_tag>>16)&0xFF)<<16) 
361                         + (toupper((s->avctx->codec_tag>>24)&0xFF)<<24);
362
363     CHECKED_ALLOCZ(s->allocated_edge_emu_buffer, (s->width+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance
364     s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*17;
365
366     s->avctx->coded_frame= (AVFrame*)&s->current_picture;
367
368     CHECKED_ALLOCZ(s->mb_index2xy, (s->mb_num+1)*sizeof(int)) //error ressilience code looks cleaner with this
369     for(y=0; y<s->mb_height; y++){
370         for(x=0; x<s->mb_width; x++){
371             s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
372         }
373     }
374     s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
375     
376     if (s->encoding) {
377         int mv_table_size= s->mb_stride * (s->mb_height+2) + 1;
378
379         /* Allocate MV tables */
380         CHECKED_ALLOCZ(s->p_mv_table_base            , mv_table_size * 2 * sizeof(int16_t))
381         CHECKED_ALLOCZ(s->b_forw_mv_table_base       , mv_table_size * 2 * sizeof(int16_t))
382         CHECKED_ALLOCZ(s->b_back_mv_table_base       , mv_table_size * 2 * sizeof(int16_t))
383         CHECKED_ALLOCZ(s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
384         CHECKED_ALLOCZ(s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
385         CHECKED_ALLOCZ(s->b_direct_mv_table_base     , mv_table_size * 2 * sizeof(int16_t))
386         s->p_mv_table           = s->p_mv_table_base            + s->mb_stride + 1;
387         s->b_forw_mv_table      = s->b_forw_mv_table_base       + s->mb_stride + 1;
388         s->b_back_mv_table      = s->b_back_mv_table_base       + s->mb_stride + 1;
389         s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
390         s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
391         s->b_direct_mv_table    = s->b_direct_mv_table_base     + s->mb_stride + 1;
392
393         //FIXME should be linesize instead of s->width*2 but that isnt known before get_buffer()
394         CHECKED_ALLOCZ(s->me.scratchpad,  s->width*2*16*3*sizeof(uint8_t)) 
395         
396         CHECKED_ALLOCZ(s->me.map      , ME_MAP_SIZE*sizeof(uint32_t))
397         CHECKED_ALLOCZ(s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t))
398
399         if(s->codec_id==CODEC_ID_MPEG4){
400             CHECKED_ALLOCZ(s->tex_pb_buffer, PB_BUFFER_SIZE);
401             CHECKED_ALLOCZ(   s->pb2_buffer, PB_BUFFER_SIZE);
402         }
403         
404         if(s->msmpeg4_version){
405             CHECKED_ALLOCZ(s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int));
406         }
407         CHECKED_ALLOCZ(s->avctx->stats_out, 256);
408
409         /* Allocate MB type table */
410         CHECKED_ALLOCZ(s->mb_type  , mb_array_size * sizeof(uint8_t)) //needed for encoding
411     }
412         
413     CHECKED_ALLOCZ(s->error_status_table, mb_array_size*sizeof(uint8_t))
414     
415     if (s->out_format == FMT_H263 || s->encoding) {
416         int size;
417
418         /* MV prediction */
419         size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
420         CHECKED_ALLOCZ(s->motion_val, size * 2 * sizeof(int16_t));
421     }
422
423     if(s->codec_id==CODEC_ID_MPEG4){
424         /* interlaced direct mode decoding tables */
425         CHECKED_ALLOCZ(s->field_mv_table, mb_array_size*2*2 * sizeof(int16_t))
426         CHECKED_ALLOCZ(s->field_select_table, mb_array_size*2* sizeof(int8_t))
427     }
428     if (s->out_format == FMT_H263) {
429         /* ac values */
430         CHECKED_ALLOCZ(s->ac_val[0], yc_size * sizeof(int16_t) * 16);
431         s->ac_val[1] = s->ac_val[0] + y_size;
432         s->ac_val[2] = s->ac_val[1] + c_size;
433         
434         /* cbp values */
435         CHECKED_ALLOCZ(s->coded_block, y_size);
436         
437         /* divx501 bitstream reorder buffer */
438         CHECKED_ALLOCZ(s->bitstream_buffer, BITSTREAM_BUFFER_SIZE);
439
440         /* cbp, ac_pred, pred_dir */
441         CHECKED_ALLOCZ(s->cbp_table  , mb_array_size * sizeof(uint8_t))
442         CHECKED_ALLOCZ(s->pred_dir_table, mb_array_size * sizeof(uint8_t))
443     }
444     
445     if (s->h263_pred || s->h263_plus || !s->encoding) {
446         /* dc values */
447         //MN: we need these for error resilience of intra-frames
448         CHECKED_ALLOCZ(s->dc_val[0], yc_size * sizeof(int16_t));
449         s->dc_val[1] = s->dc_val[0] + y_size;
450         s->dc_val[2] = s->dc_val[1] + c_size;
451         for(i=0;i<yc_size;i++)
452             s->dc_val[0][i] = 1024;
453     }
454
455     /* which mb is a intra block */
456     CHECKED_ALLOCZ(s->mbintra_table, mb_array_size);
457     memset(s->mbintra_table, 1, mb_array_size);
458     
459     /* default structure is frame */
460     s->picture_structure = PICT_FRAME;
461     
462     /* init macroblock skip table */
463     CHECKED_ALLOCZ(s->mbskip_table, mb_array_size+2);
464     //Note the +1 is for a quicker mpeg4 slice_end detection
465     CHECKED_ALLOCZ(s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
466     
467     s->block= s->blocks[0];
468
469     s->parse_context.state= -1;
470
471     s->context_initialized = 1;
472     return 0;
473  fail:
474     MPV_common_end(s);
475     return -1;
476 }
477
478
479 //extern int sads;
480
481 /* init common structure for both encoder and decoder */
482 void MPV_common_end(MpegEncContext *s)
483 {
484     int i;
485
486     av_freep(&s->parse_context.buffer);
487     s->parse_context.buffer_size=0;
488
489     av_freep(&s->mb_type);
490     av_freep(&s->p_mv_table_base);
491     av_freep(&s->b_forw_mv_table_base);
492     av_freep(&s->b_back_mv_table_base);
493     av_freep(&s->b_bidir_forw_mv_table_base);
494     av_freep(&s->b_bidir_back_mv_table_base);
495     av_freep(&s->b_direct_mv_table_base);
496     s->p_mv_table= NULL;
497     s->b_forw_mv_table= NULL;
498     s->b_back_mv_table= NULL;
499     s->b_bidir_forw_mv_table= NULL;
500     s->b_bidir_back_mv_table= NULL;
501     s->b_direct_mv_table= NULL;
502     
503     av_freep(&s->motion_val);
504     av_freep(&s->dc_val[0]);
505     av_freep(&s->ac_val[0]);
506     av_freep(&s->coded_block);
507     av_freep(&s->mbintra_table);
508     av_freep(&s->cbp_table);
509     av_freep(&s->pred_dir_table);
510     av_freep(&s->me.scratchpad);
511     av_freep(&s->me.map);
512     av_freep(&s->me.score_map);
513     
514     av_freep(&s->mbskip_table);
515     av_freep(&s->prev_pict_types);
516     av_freep(&s->bitstream_buffer);
517     av_freep(&s->tex_pb_buffer);
518     av_freep(&s->pb2_buffer);
519     av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
520     av_freep(&s->field_mv_table);
521     av_freep(&s->field_select_table);
522     av_freep(&s->avctx->stats_out);
523     av_freep(&s->ac_stats);
524     av_freep(&s->error_status_table);
525     av_freep(&s->mb_index2xy);
526
527     for(i=0; i<MAX_PICTURE_COUNT; i++){
528         free_picture(s, &s->picture[i]);
529     }
530     avcodec_default_free_buffers(s->avctx);
531     s->context_initialized = 0;
532 }
533
534 #ifdef CONFIG_ENCODERS
535
536 /* init video encoder */
537 int MPV_encode_init(AVCodecContext *avctx)
538 {
539     MpegEncContext *s = avctx->priv_data;
540     int i;
541     int chroma_h_shift, chroma_v_shift;
542
543     s->bit_rate = avctx->bit_rate;
544     s->bit_rate_tolerance = avctx->bit_rate_tolerance;
545     s->width = avctx->width;
546     s->height = avctx->height;
547     if(avctx->gop_size > 600){
548         fprintf(stderr, "Warning keyframe interval too large! reducing it ...\n");
549         avctx->gop_size=600;
550     }
551     s->gop_size = avctx->gop_size;
552     s->rtp_mode = avctx->rtp_mode;
553     s->rtp_payload_size = avctx->rtp_payload_size;
554     if (avctx->rtp_callback)
555         s->rtp_callback = avctx->rtp_callback;
556     s->max_qdiff= avctx->max_qdiff;
557     s->qcompress= avctx->qcompress;
558     s->qblur= avctx->qblur;
559     s->avctx = avctx;
560     s->flags= avctx->flags;
561     s->max_b_frames= avctx->max_b_frames;
562     s->b_frame_strategy= avctx->b_frame_strategy;
563     s->codec_id= avctx->codec->id;
564     s->luma_elim_threshold  = avctx->luma_elim_threshold;
565     s->chroma_elim_threshold= avctx->chroma_elim_threshold;
566     s->strict_std_compliance= avctx->strict_std_compliance;
567     s->data_partitioning= avctx->flags & CODEC_FLAG_PART;
568     s->quarter_sample= (avctx->flags & CODEC_FLAG_QPEL)!=0;
569     s->mpeg_quant= avctx->mpeg_quant;
570
571     if (s->gop_size <= 1) {
572         s->intra_only = 1;
573         s->gop_size = 12;
574     } else {
575         s->intra_only = 0;
576     }
577
578     s->me_method = avctx->me_method;
579
580     /* Fixed QSCALE */
581     s->fixed_qscale = (avctx->flags & CODEC_FLAG_QSCALE);
582     
583     s->adaptive_quant= (   s->avctx->lumi_masking
584                         || s->avctx->dark_masking
585                         || s->avctx->temporal_cplx_masking 
586                         || s->avctx->spatial_cplx_masking
587                         || s->avctx->p_masking)
588                        && !s->fixed_qscale;
589     
590     s->progressive_sequence= !(avctx->flags & CODEC_FLAG_INTERLACED_DCT);
591
592     if((s->flags & CODEC_FLAG_4MV) && s->codec_id != CODEC_ID_MPEG4){
593         fprintf(stderr, "4MV not supporetd by codec\n");
594         return -1;
595     }
596     
597     if(s->quarter_sample && s->codec_id != CODEC_ID_MPEG4){
598         fprintf(stderr, "qpel not supporetd by codec\n");
599         return -1;
600     }
601
602     if(s->data_partitioning && s->codec_id != CODEC_ID_MPEG4){
603         fprintf(stderr, "data partitioning not supporetd by codec\n");
604         return -1;
605     }
606     
607     if(s->max_b_frames && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG1VIDEO){
608         fprintf(stderr, "b frames not supporetd by codec\n");
609         return -1;
610     }
611     
612     if(s->mpeg_quant && s->codec_id != CODEC_ID_MPEG4){ //FIXME mpeg2 uses that too
613         fprintf(stderr, "mpeg2 style quantization not supporetd by codec\n");
614         return -1;
615     }
616         
617     if(s->codec_id==CODEC_ID_MJPEG){
618         s->intra_quant_bias= 1<<(QUANT_BIAS_SHIFT-1); //(a + x/2)/x
619         s->inter_quant_bias= 0;
620     }else if(s->mpeg_quant || s->codec_id==CODEC_ID_MPEG1VIDEO){
621         s->intra_quant_bias= 3<<(QUANT_BIAS_SHIFT-3); //(a + x*3/8)/x
622         s->inter_quant_bias= 0;
623     }else{
624         s->intra_quant_bias=0;
625         s->inter_quant_bias=-(1<<(QUANT_BIAS_SHIFT-2)); //(a - x/4)/x
626     }
627     
628     if(avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
629         s->intra_quant_bias= avctx->intra_quant_bias;
630     if(avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
631         s->inter_quant_bias= avctx->inter_quant_bias;
632         
633     avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift);
634
635     switch(avctx->codec->id) {
636     case CODEC_ID_MPEG1VIDEO:
637         s->out_format = FMT_MPEG1;
638         s->low_delay= 0; //s->max_b_frames ? 0 : 1;
639         avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
640         break;
641     case CODEC_ID_LJPEG:
642     case CODEC_ID_MJPEG:
643         s->out_format = FMT_MJPEG;
644         s->intra_only = 1; /* force intra only for jpeg */
645         s->mjpeg_write_tables = 1; /* write all tables */
646         s->mjpeg_data_only_frames = 0; /* write all the needed headers */
647         s->mjpeg_vsample[0] = 1<<chroma_v_shift;
648         s->mjpeg_vsample[1] = 1;
649         s->mjpeg_vsample[2] = 1; 
650         s->mjpeg_hsample[0] = 1<<chroma_h_shift;
651         s->mjpeg_hsample[1] = 1; 
652         s->mjpeg_hsample[2] = 1; 
653         if (mjpeg_init(s) < 0)
654             return -1;
655         avctx->delay=0;
656         s->low_delay=1;
657         break;
658 #ifdef CONFIG_RISKY
659     case CODEC_ID_H263:
660         if (h263_get_picture_format(s->width, s->height) == 7) {
661             printf("Input picture size isn't suitable for h263 codec! try h263+\n");
662             return -1;
663         }
664         s->out_format = FMT_H263;
665         avctx->delay=0;
666         s->low_delay=1;
667         break;
668     case CODEC_ID_H263P:
669         s->out_format = FMT_H263;
670         s->h263_plus = 1;
671         /* Fx */
672         s->unrestricted_mv=(avctx->flags & CODEC_FLAG_H263P_UMV) ? 1:0;
673         s->h263_aic= (avctx->flags & CODEC_FLAG_H263P_AIC) ? 1:0;
674         /* /Fx */
675         /* These are just to be sure */
676         s->umvplus = 1;
677         avctx->delay=0;
678         s->low_delay=1;
679         break;
680     case CODEC_ID_FLV1:
681         s->out_format = FMT_H263;
682         s->h263_flv = 2; /* format = 1; 11-bit codes */
683         s->unrestricted_mv = 1;
684         s->rtp_mode=0; /* don't allow GOB */
685         avctx->delay=0;
686         s->low_delay=1;
687         break;
688     case CODEC_ID_RV10:
689         s->out_format = FMT_H263;
690         s->h263_rv10 = 1;
691         avctx->delay=0;
692         s->low_delay=1;
693         break;
694     case CODEC_ID_MPEG4:
695         s->out_format = FMT_H263;
696         s->h263_pred = 1;
697         s->unrestricted_mv = 1;
698         s->low_delay= s->max_b_frames ? 0 : 1;
699         avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
700         break;
701     case CODEC_ID_MSMPEG4V1:
702         s->out_format = FMT_H263;
703         s->h263_msmpeg4 = 1;
704         s->h263_pred = 1;
705         s->unrestricted_mv = 1;
706         s->msmpeg4_version= 1;
707         avctx->delay=0;
708         s->low_delay=1;
709         break;
710     case CODEC_ID_MSMPEG4V2:
711         s->out_format = FMT_H263;
712         s->h263_msmpeg4 = 1;
713         s->h263_pred = 1;
714         s->unrestricted_mv = 1;
715         s->msmpeg4_version= 2;
716         avctx->delay=0;
717         s->low_delay=1;
718         break;
719     case CODEC_ID_MSMPEG4V3:
720         s->out_format = FMT_H263;
721         s->h263_msmpeg4 = 1;
722         s->h263_pred = 1;
723         s->unrestricted_mv = 1;
724         s->msmpeg4_version= 3;
725         s->flipflop_rounding=1;
726         avctx->delay=0;
727         s->low_delay=1;
728         break;
729     case CODEC_ID_WMV1:
730         s->out_format = FMT_H263;
731         s->h263_msmpeg4 = 1;
732         s->h263_pred = 1;
733         s->unrestricted_mv = 1;
734         s->msmpeg4_version= 4;
735         s->flipflop_rounding=1;
736         avctx->delay=0;
737         s->low_delay=1;
738         break;
739     case CODEC_ID_WMV2:
740         s->out_format = FMT_H263;
741         s->h263_msmpeg4 = 1;
742         s->h263_pred = 1;
743         s->unrestricted_mv = 1;
744         s->msmpeg4_version= 5;
745         s->flipflop_rounding=1;
746         avctx->delay=0;
747         s->low_delay=1;
748         break;
749 #endif
750     default:
751         return -1;
752     }
753     
754     { /* set up some save defaults, some codecs might override them later */
755         static int done=0;
756         if(!done){
757             int i;
758             done=1;
759
760             default_mv_penalty= av_mallocz( sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1) );
761             memset(default_mv_penalty, 0, sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1));
762             memset(default_fcode_tab , 0, sizeof(uint8_t)*(2*MAX_MV+1));
763
764             for(i=-16; i<16; i++){
765                 default_fcode_tab[i + MAX_MV]= 1;
766             }
767         }
768     }
769     s->me.mv_penalty= default_mv_penalty;
770     s->fcode_tab= default_fcode_tab;
771     s->y_dc_scale_table=
772     s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
773  
774     /* dont use mv_penalty table for crap MV as it would be confused */
775     //FIXME remove after fixing / removing old ME
776     if (s->me_method < ME_EPZS) s->me.mv_penalty = default_mv_penalty;
777
778     s->encoding = 1;
779
780     /* init */
781     if (MPV_common_init(s) < 0)
782         return -1;
783     
784     ff_init_me(s);
785
786 #ifdef CONFIG_ENCODERS
787 #ifdef CONFIG_RISKY
788     if (s->out_format == FMT_H263)
789         h263_encode_init(s);
790     if(s->msmpeg4_version)
791         ff_msmpeg4_encode_init(s);
792 #endif
793     if (s->out_format == FMT_MPEG1)
794         ff_mpeg1_encode_init(s);
795 #endif
796
797     /* init default q matrix */
798     for(i=0;i<64;i++) {
799         int j= s->dsp.idct_permutation[i];
800 #ifdef CONFIG_RISKY
801         if(s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){
802             s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
803             s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
804         }else if(s->out_format == FMT_H263){
805             s->intra_matrix[j] =
806             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
807         }else
808 #endif
809         { /* mpeg1 */
810             s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
811             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
812         }
813     }
814
815     /* precompute matrix */
816     /* for mjpeg, we do include qscale in the matrix */
817     if (s->out_format != FMT_MJPEG) {
818         convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16, s->q_intra_matrix16_bias, 
819                        s->intra_matrix, s->intra_quant_bias, 1, 31);
820         convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16, s->q_inter_matrix16_bias, 
821                        s->inter_matrix, s->inter_quant_bias, 1, 31);
822     }
823
824     if(ff_rate_control_init(s) < 0)
825         return -1;
826
827     s->picture_number = 0;
828     s->picture_in_gop_number = 0;
829     s->fake_picture_number = 0;
830     /* motion detector init */
831     s->f_code = 1;
832     s->b_code = 1;
833
834     return 0;
835 }
836
837 int MPV_encode_end(AVCodecContext *avctx)
838 {
839     MpegEncContext *s = avctx->priv_data;
840
841 #ifdef STATS
842     print_stats();
843 #endif
844
845     ff_rate_control_uninit(s);
846
847     MPV_common_end(s);
848     if (s->out_format == FMT_MJPEG)
849         mjpeg_close(s);
850       
851     return 0;
852 }
853
854 #endif //CONFIG_ENCODERS
855
856 void init_rl(RLTable *rl)
857 {
858     int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
859     uint8_t index_run[MAX_RUN+1];
860     int last, run, level, start, end, i;
861
862     /* compute max_level[], max_run[] and index_run[] */
863     for(last=0;last<2;last++) {
864         if (last == 0) {
865             start = 0;
866             end = rl->last;
867         } else {
868             start = rl->last;
869             end = rl->n;
870         }
871
872         memset(max_level, 0, MAX_RUN + 1);
873         memset(max_run, 0, MAX_LEVEL + 1);
874         memset(index_run, rl->n, MAX_RUN + 1);
875         for(i=start;i<end;i++) {
876             run = rl->table_run[i];
877             level = rl->table_level[i];
878             if (index_run[run] == rl->n)
879                 index_run[run] = i;
880             if (level > max_level[run])
881                 max_level[run] = level;
882             if (run > max_run[level])
883                 max_run[level] = run;
884         }
885         rl->max_level[last] = av_malloc(MAX_RUN + 1);
886         memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
887         rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
888         memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
889         rl->index_run[last] = av_malloc(MAX_RUN + 1);
890         memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
891     }
892 }
893
894 /* draw the edges of width 'w' of an image of size width, height */
895 //FIXME check that this is ok for mpeg4 interlaced
896 static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w)
897 {
898     uint8_t *ptr, *last_line;
899     int i;
900
901     last_line = buf + (height - 1) * wrap;
902     for(i=0;i<w;i++) {
903         /* top and bottom */
904         memcpy(buf - (i + 1) * wrap, buf, width);
905         memcpy(last_line + (i + 1) * wrap, last_line, width);
906     }
907     /* left and right */
908     ptr = buf;
909     for(i=0;i<height;i++) {
910         memset(ptr - w, ptr[0], w);
911         memset(ptr + width, ptr[width-1], w);
912         ptr += wrap;
913     }
914     /* corners */
915     for(i=0;i<w;i++) {
916         memset(buf - (i + 1) * wrap - w, buf[0], w); /* top left */
917         memset(buf - (i + 1) * wrap + width, buf[width-1], w); /* top right */
918         memset(last_line + (i + 1) * wrap - w, last_line[0], w); /* top left */
919         memset(last_line + (i + 1) * wrap + width, last_line[width-1], w); /* top right */
920     }
921 }
922
923 static int find_unused_picture(MpegEncContext *s, int shared){
924     int i;
925     
926     if(shared){
927         for(i=0; i<MAX_PICTURE_COUNT; i++){
928             if(s->picture[i].data[0]==NULL && s->picture[i].type==0) break;
929         }
930     }else{
931         for(i=0; i<MAX_PICTURE_COUNT; i++){
932             if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) break; //FIXME
933         }
934         for(i=0; i<MAX_PICTURE_COUNT; i++){
935             if(s->picture[i].data[0]==NULL) break;
936         }
937     }
938
939     assert(i<MAX_PICTURE_COUNT);
940     return i;
941 }
942
943 /* generic function for encode/decode called before a frame is coded/decoded */
944 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
945 {
946     int i;
947     AVFrame *pic;
948
949     s->mb_skiped = 0;
950
951     assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
952
953     /* mark&release old frames */
954     if (s->pict_type != B_TYPE && s->last_picture_ptr && s->last_picture_ptr->data[0]) {
955         avctx->release_buffer(avctx, (AVFrame*)s->last_picture_ptr);
956
957         /* release forgotten pictures */
958         /* if(mpeg124/h263) */
959         if(!s->encoding){
960             for(i=0; i<MAX_PICTURE_COUNT; i++){
961                 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
962                     fprintf(stderr, "releasing zombie picture\n");
963                     avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);                
964                 }
965             }
966         }
967     }
968 alloc:
969     if(!s->encoding){
970         /* release non refernce frames */
971         for(i=0; i<MAX_PICTURE_COUNT; i++){
972             if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
973                 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
974             }
975         }
976
977         i= find_unused_picture(s, 0);
978     
979         pic= (AVFrame*)&s->picture[i];
980         pic->reference= s->pict_type != B_TYPE ? 3 : 0;
981
982         if(s->current_picture_ptr)
983             pic->coded_picture_number= s->current_picture_ptr->coded_picture_number+1;
984         
985         if( alloc_picture(s, (Picture*)pic, 0) < 0)
986             return -1;
987
988         s->current_picture_ptr= &s->picture[i];
989     }
990
991     s->current_picture_ptr->pict_type= s->pict_type;
992     s->current_picture_ptr->quality= s->qscale;
993     s->current_picture_ptr->key_frame= s->pict_type == I_TYPE;
994
995     s->current_picture= *s->current_picture_ptr;
996   
997   if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
998     if (s->pict_type != B_TYPE) {
999         s->last_picture_ptr= s->next_picture_ptr;
1000         s->next_picture_ptr= s->current_picture_ptr;
1001     }
1002     
1003     if(s->last_picture_ptr) s->last_picture= *s->last_picture_ptr;
1004     if(s->next_picture_ptr) s->next_picture= *s->next_picture_ptr;
1005     if(s->new_picture_ptr ) s->new_picture = *s->new_picture_ptr;
1006     
1007     if(s->pict_type != I_TYPE && s->last_picture_ptr==NULL){
1008         fprintf(stderr, "warning: first frame is no keyframe\n");
1009         assert(s->pict_type != B_TYPE); //these should have been dropped if we dont have a reference
1010         goto alloc;
1011     }
1012
1013     assert(s->pict_type == I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
1014
1015     if(s->picture_structure!=PICT_FRAME){
1016         int i;
1017         for(i=0; i<4; i++){
1018             if(s->picture_structure == PICT_BOTTOM_FIELD){
1019                  s->current_picture.data[i] += s->current_picture.linesize[i];
1020             } 
1021             s->current_picture.linesize[i] *= 2;
1022             s->last_picture.linesize[i] *=2;
1023             s->next_picture.linesize[i] *=2;
1024         }
1025     }
1026   }
1027    
1028     s->hurry_up= s->avctx->hurry_up;
1029     s->error_resilience= avctx->error_resilience;
1030
1031     /* set dequantizer, we cant do it during init as it might change for mpeg4
1032        and we cant do it in the header decode as init isnt called for mpeg4 there yet */
1033     if(s->out_format == FMT_H263){
1034         if(s->mpeg_quant)
1035             s->dct_unquantize = s->dct_unquantize_mpeg2;
1036         else
1037             s->dct_unquantize = s->dct_unquantize_h263;
1038     }else 
1039         s->dct_unquantize = s->dct_unquantize_mpeg1;
1040
1041 #ifdef HAVE_XVMC
1042     if(s->avctx->xvmc_acceleration)
1043         return XVMC_field_start(s, avctx);
1044 #endif
1045     return 0;
1046 }
1047
1048 /* generic function for encode/decode called after a frame has been coded/decoded */
1049 void MPV_frame_end(MpegEncContext *s)
1050 {
1051     int i;
1052     /* draw edge for correct motion prediction if outside */
1053 #ifdef HAVE_XVMC
1054 //just to make sure that all data is rendered.
1055     if(s->avctx->xvmc_acceleration){
1056         XVMC_field_end(s);
1057     }else
1058 #endif
1059     if(s->codec_id!=CODEC_ID_SVQ1 && s->codec_id != CODEC_ID_MPEG1VIDEO){
1060         if (s->pict_type != B_TYPE && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1061             draw_edges(s->current_picture.data[0], s->linesize  , s->h_edge_pos   , s->v_edge_pos   , EDGE_WIDTH  );
1062             draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
1063             draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
1064         }
1065     }
1066     emms_c();
1067     
1068     s->last_pict_type    = s->pict_type;
1069     if(s->pict_type!=B_TYPE){
1070         s->last_non_b_pict_type= s->pict_type;
1071     }
1072 #if 0
1073         /* copy back current_picture variables */
1074     for(i=0; i<MAX_PICTURE_COUNT; i++){
1075         if(s->picture[i].data[0] == s->current_picture.data[0]){
1076             s->picture[i]= s->current_picture;
1077             break;
1078         }    
1079     }
1080     assert(i<MAX_PICTURE_COUNT);
1081 #endif    
1082
1083     if(s->encoding){
1084         /* release non refernce frames */
1085         for(i=0; i<MAX_PICTURE_COUNT; i++){
1086             if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1087                 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
1088             }
1089         }
1090     }
1091     // clear copies, to avoid confusion
1092 #if 0
1093     memset(&s->last_picture, 0, sizeof(Picture));
1094     memset(&s->next_picture, 0, sizeof(Picture));
1095     memset(&s->current_picture, 0, sizeof(Picture));
1096 #endif
1097 }
1098
1099 /**
1100  * draws an line from (ex, ey) -> (sx, sy).
1101  * @param w width of the image
1102  * @param h height of the image
1103  * @param stride stride/linesize of the image
1104  * @param color color of the arrow
1105  */
1106 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1107     int t, x, y, f;
1108     
1109     sx= clip(sx, 0, w-1);
1110     sy= clip(sy, 0, h-1);
1111     ex= clip(ex, 0, w-1);
1112     ey= clip(ey, 0, h-1);
1113     
1114     buf[sy*stride + sx]+= color;
1115     
1116     if(ABS(ex - sx) > ABS(ey - sy)){
1117         if(sx > ex){
1118             t=sx; sx=ex; ex=t;
1119             t=sy; sy=ey; ey=t;
1120         }
1121         buf+= sx + sy*stride;
1122         ex-= sx;
1123         f= ((ey-sy)<<16)/ex;
1124         for(x= 0; x <= ex; x++){
1125             y= ((x*f) + (1<<15))>>16;
1126             buf[y*stride + x]+= color;
1127         }
1128     }else{
1129         if(sy > ey){
1130             t=sx; sx=ex; ex=t;
1131             t=sy; sy=ey; ey=t;
1132         }
1133         buf+= sx + sy*stride;
1134         ey-= sy;
1135         if(ey) f= ((ex-sx)<<16)/ey;
1136         else   f= 0;
1137         for(y= 0; y <= ey; y++){
1138             x= ((y*f) + (1<<15))>>16;
1139             buf[y*stride + x]+= color;
1140         }
1141     }
1142 }
1143
1144 /**
1145  * draws an arrow from (ex, ey) -> (sx, sy).
1146  * @param w width of the image
1147  * @param h height of the image
1148  * @param stride stride/linesize of the image
1149  * @param color color of the arrow
1150  */
1151 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){ 
1152     int dx,dy;
1153
1154     sx= clip(sx, -100, w+100);
1155     sy= clip(sy, -100, h+100);
1156     ex= clip(ex, -100, w+100);
1157     ey= clip(ey, -100, h+100);
1158     
1159     dx= ex - sx;
1160     dy= ey - sy;
1161     
1162     if(dx*dx + dy*dy > 3*3){
1163         int rx=  dx + dy;
1164         int ry= -dx + dy;
1165         int length= ff_sqrt((rx*rx + ry*ry)<<8);
1166         
1167         //FIXME subpixel accuracy
1168         rx= ROUNDED_DIV(rx*3<<4, length);
1169         ry= ROUNDED_DIV(ry*3<<4, length);
1170         
1171         draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1172         draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1173     }
1174     draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1175 }
1176
1177 /**
1178  * prints debuging info for the given picture.
1179  */
1180 void ff_print_debug_info(MpegEncContext *s, Picture *pict){
1181
1182     if(!pict || !pict->mb_type) return;
1183
1184     if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1185         int x,y;
1186
1187         for(y=0; y<s->mb_height; y++){
1188             for(x=0; x<s->mb_width; x++){
1189                 if(s->avctx->debug&FF_DEBUG_SKIP){
1190                     int count= s->mbskip_table[x + y*s->mb_stride];
1191                     if(count>9) count=9;
1192                     printf("%1d", count);
1193                 }
1194                 if(s->avctx->debug&FF_DEBUG_QP){
1195                     printf("%2d", pict->qscale_table[x + y*s->mb_stride]);
1196                 }
1197                 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1198                     int mb_type= pict->mb_type[x + y*s->mb_stride];
1199                     
1200                     //Type & MV direction
1201                     if(IS_PCM(mb_type))
1202                         printf("P");
1203                     else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1204                         printf("A");
1205                     else if(IS_INTRA4x4(mb_type))
1206                         printf("i");
1207                     else if(IS_INTRA16x16(mb_type))
1208                         printf("I");
1209                     else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1210                         printf("d");
1211                     else if(IS_DIRECT(mb_type))
1212                         printf("D");
1213                     else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1214                         printf("g");
1215                     else if(IS_GMC(mb_type))
1216                         printf("G");
1217                     else if(IS_SKIP(mb_type))
1218                         printf("S");
1219                     else if(!USES_LIST(mb_type, 1))
1220                         printf(">");
1221                     else if(!USES_LIST(mb_type, 0))
1222                         printf("<");
1223                     else{
1224                         assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1225                         printf("X");
1226                     }
1227                     
1228                     //segmentation
1229                     if(IS_8X8(mb_type))
1230                         printf("+");
1231                     else if(IS_16X8(mb_type))
1232                         printf("-");
1233                     else if(IS_8X16(mb_type))
1234                         printf("¦");
1235                     else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1236                         printf(" ");
1237                     else
1238                         printf("?");
1239                     
1240                         
1241                     if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264)
1242                         printf("=");
1243                     else
1244                         printf(" ");
1245                 }
1246 //                printf(" ");
1247             }
1248             printf("\n");
1249         }
1250     }
1251     
1252     if((s->avctx->debug&FF_DEBUG_VIS_MV) && s->motion_val){
1253         const int shift= 1 + s->quarter_sample;
1254         int mb_y;
1255         uint8_t *ptr= pict->data[0];
1256         s->low_delay=0; //needed to see the vectors without trashing the buffers
1257
1258         for(mb_y=0; mb_y<s->mb_height; mb_y++){
1259             int mb_x;
1260             for(mb_x=0; mb_x<s->mb_width; mb_x++){
1261                 const int mb_index= mb_x + mb_y*s->mb_stride;
1262                 if(IS_8X8(s->current_picture.mb_type[mb_index])){
1263                     int i;
1264                     for(i=0; i<4; i++){
1265                         int sx= mb_x*16 + 4 + 8*(i&1);
1266                         int sy= mb_y*16 + 4 + 8*(i>>1);
1267                         int xy= 1 + mb_x*2 + (i&1) + (mb_y*2 + 1 + (i>>1))*(s->mb_width*2 + 2);
1268                         int mx= (s->motion_val[xy][0]>>shift) + sx;
1269                         int my= (s->motion_val[xy][1]>>shift) + sy;
1270                         draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100);
1271                     }
1272                 }else{
1273                     int sx= mb_x*16 + 8;
1274                     int sy= mb_y*16 + 8;
1275                     int xy= 1 + mb_x*2 + (mb_y*2 + 1)*(s->mb_width*2 + 2);
1276                     int mx= (s->motion_val[xy][0]>>shift) + sx;
1277                     int my= (s->motion_val[xy][1]>>shift) + sy;
1278                     draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100);
1279                 }
1280                 s->mbskip_table[mb_index]=0;
1281             }
1282         }
1283     }
1284 }
1285
1286 #ifdef CONFIG_ENCODERS
1287
1288 static int get_sae(uint8_t *src, int ref, int stride){
1289     int x,y;
1290     int acc=0;
1291     
1292     for(y=0; y<16; y++){
1293         for(x=0; x<16; x++){
1294             acc+= ABS(src[x+y*stride] - ref);
1295         }
1296     }
1297     
1298     return acc;
1299 }
1300
1301 static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride){
1302     int x, y, w, h;
1303     int acc=0;
1304     
1305     w= s->width &~15;
1306     h= s->height&~15;
1307     
1308     for(y=0; y<h; y+=16){
1309         for(x=0; x<w; x+=16){
1310             int offset= x + y*stride;
1311             int sad = s->dsp.pix_abs16x16(src + offset, ref + offset, stride);
1312             int mean= (s->dsp.pix_sum(src + offset, stride) + 128)>>8;
1313             int sae = get_sae(src + offset, mean, stride);
1314             
1315             acc+= sae + 500 < sad;
1316         }
1317     }
1318     return acc;
1319 }
1320
1321
1322 static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
1323     AVFrame *pic=NULL;
1324     int i;
1325     const int encoding_delay= s->max_b_frames;
1326     int direct=1;
1327     
1328   if(pic_arg){
1329     if(encoding_delay && !(s->flags&CODEC_FLAG_INPUT_PRESERVED)) direct=0;
1330     if(pic_arg->linesize[0] != s->linesize) direct=0;
1331     if(pic_arg->linesize[1] != s->uvlinesize) direct=0;
1332     if(pic_arg->linesize[2] != s->uvlinesize) direct=0;
1333   
1334 //    printf("%d %d %d %d\n",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize);
1335     
1336     if(direct){
1337         i= find_unused_picture(s, 1);
1338
1339         pic= (AVFrame*)&s->picture[i];
1340         pic->reference= 3;
1341     
1342         for(i=0; i<4; i++){
1343             pic->data[i]= pic_arg->data[i];
1344             pic->linesize[i]= pic_arg->linesize[i];
1345         }
1346         alloc_picture(s, (Picture*)pic, 1);
1347     }else{
1348         i= find_unused_picture(s, 0);
1349
1350         pic= (AVFrame*)&s->picture[i];
1351         pic->reference= 3;
1352
1353         alloc_picture(s, (Picture*)pic, 0);
1354         for(i=0; i<4; i++){
1355             /* the input will be 16 pixels to the right relative to the actual buffer start
1356              * and the current_pic, so the buffer can be reused, yes its not beatifull 
1357              */
1358             pic->data[i]+= 16; 
1359         }
1360
1361         if(   pic->data[0] == pic_arg->data[0] 
1362            && pic->data[1] == pic_arg->data[1]
1363            && pic->data[2] == pic_arg->data[2]){
1364        // empty
1365         }else{
1366             int h_chroma_shift, v_chroma_shift;
1367             avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1368         
1369             for(i=0; i<3; i++){
1370                 int src_stride= pic_arg->linesize[i];
1371                 int dst_stride= i ? s->uvlinesize : s->linesize;
1372                 int h_shift= i ? h_chroma_shift : 0;
1373                 int v_shift= i ? v_chroma_shift : 0;
1374                 int w= s->width >>h_shift;
1375                 int h= s->height>>v_shift;
1376                 uint8_t *src= pic_arg->data[i];
1377                 uint8_t *dst= pic->data[i];
1378             
1379                 if(src_stride==dst_stride)
1380                     memcpy(dst, src, src_stride*h);
1381                 else{
1382                     while(h--){
1383                         memcpy(dst, src, w);
1384                         dst += dst_stride;
1385                         src += src_stride;
1386                     }
1387                 }
1388             }
1389         }
1390     }
1391     pic->quality= pic_arg->quality;
1392     pic->pict_type= pic_arg->pict_type;
1393     pic->pts = pic_arg->pts;
1394     
1395     if(s->input_picture[encoding_delay])
1396         pic->display_picture_number= s->input_picture[encoding_delay]->display_picture_number + 1;
1397     
1398   }
1399
1400     /* shift buffer entries */
1401     for(i=1; i<MAX_PICTURE_COUNT /*s->encoding_delay+1*/; i++)
1402         s->input_picture[i-1]= s->input_picture[i];
1403         
1404     s->input_picture[encoding_delay]= (Picture*)pic;
1405
1406     return 0;
1407 }
1408
1409 static void select_input_picture(MpegEncContext *s){
1410     int i;
1411     const int encoding_delay= s->max_b_frames;
1412     int coded_pic_num=0;    
1413
1414     if(s->reordered_input_picture[0])
1415         coded_pic_num= s->reordered_input_picture[0]->coded_picture_number + 1;
1416
1417     for(i=1; i<MAX_PICTURE_COUNT; i++)
1418         s->reordered_input_picture[i-1]= s->reordered_input_picture[i];
1419     s->reordered_input_picture[MAX_PICTURE_COUNT-1]= NULL;
1420
1421     /* set next picture types & ordering */
1422     if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){
1423         if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){
1424             s->reordered_input_picture[0]= s->input_picture[0];
1425             s->reordered_input_picture[0]->pict_type= I_TYPE;
1426             s->reordered_input_picture[0]->coded_picture_number= coded_pic_num;
1427         }else{
1428             int b_frames;
1429             
1430             if(s->flags&CODEC_FLAG_PASS2){
1431                 for(i=0; i<s->max_b_frames+1; i++){
1432                     int pict_num= s->input_picture[0]->display_picture_number + i;
1433                     int pict_type= s->rc_context.entry[pict_num].new_pict_type;
1434                     s->input_picture[i]->pict_type= pict_type;
1435                     
1436                     if(i + 1 >= s->rc_context.num_entries) break;
1437                 }
1438             }
1439
1440             if(s->input_picture[0]->pict_type){
1441                 /* user selected pict_type */
1442                 for(b_frames=0; b_frames<s->max_b_frames+1; b_frames++){
1443                     if(s->input_picture[b_frames]->pict_type!=B_TYPE) break;
1444                 }
1445             
1446                 if(b_frames > s->max_b_frames){
1447                     fprintf(stderr, "warning, too many bframes in a row\n");
1448                     b_frames = s->max_b_frames;
1449                 }
1450             }else if(s->b_frame_strategy==0){
1451                 b_frames= s->max_b_frames;
1452                 while(b_frames && !s->input_picture[b_frames]) b_frames--;
1453             }else if(s->b_frame_strategy==1){
1454                 for(i=1; i<s->max_b_frames+1; i++){
1455                     if(s->input_picture[i] && s->input_picture[i]->b_frame_score==0){
1456                         s->input_picture[i]->b_frame_score= 
1457                             get_intra_count(s, s->input_picture[i  ]->data[0], 
1458                                                s->input_picture[i-1]->data[0], s->linesize) + 1;
1459                     }
1460                 }
1461                 for(i=0; i<s->max_b_frames; i++){
1462                     if(s->input_picture[i]==NULL || s->input_picture[i]->b_frame_score - 1 > s->mb_num/40) break;
1463                 }
1464                                 
1465                 b_frames= FFMAX(0, i-1);
1466                 
1467                 /* reset scores */
1468                 for(i=0; i<b_frames+1; i++){
1469                     s->input_picture[i]->b_frame_score=0;
1470                 }
1471             }else{
1472                 fprintf(stderr, "illegal b frame strategy\n");
1473                 b_frames=0;
1474             }
1475
1476             emms_c();
1477 //static int b_count=0;
1478 //b_count+= b_frames;
1479 //printf("b_frames: %d\n", b_count);
1480                         
1481             s->reordered_input_picture[0]= s->input_picture[b_frames];
1482             if(   s->picture_in_gop_number + b_frames >= s->gop_size 
1483                || s->reordered_input_picture[0]->pict_type== I_TYPE)
1484                 s->reordered_input_picture[0]->pict_type= I_TYPE;
1485             else
1486                 s->reordered_input_picture[0]->pict_type= P_TYPE;
1487             s->reordered_input_picture[0]->coded_picture_number= coded_pic_num;
1488             for(i=0; i<b_frames; i++){
1489                 coded_pic_num++;
1490                 s->reordered_input_picture[i+1]= s->input_picture[i];
1491                 s->reordered_input_picture[i+1]->pict_type= B_TYPE;
1492                 s->reordered_input_picture[i+1]->coded_picture_number= coded_pic_num;
1493             }
1494         }
1495     }
1496     
1497     if(s->reordered_input_picture[0]){
1498         s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE ? 3 : 0;
1499
1500         s->new_picture= *s->reordered_input_picture[0];
1501
1502         if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED){
1503             // input is a shared pix, so we cant modifiy it -> alloc a new one & ensure that the shared one is reuseable
1504         
1505             int i= find_unused_picture(s, 0);
1506             Picture *pic= &s->picture[i];
1507
1508             /* mark us unused / free shared pic */
1509             for(i=0; i<4; i++)
1510                 s->reordered_input_picture[0]->data[i]= NULL;
1511             s->reordered_input_picture[0]->type= 0;
1512             
1513             //FIXME bad, copy * except
1514             pic->pict_type = s->reordered_input_picture[0]->pict_type;
1515             pic->quality   = s->reordered_input_picture[0]->quality;
1516             pic->coded_picture_number = s->reordered_input_picture[0]->coded_picture_number;
1517             pic->reference = s->reordered_input_picture[0]->reference;
1518             
1519             alloc_picture(s, pic, 0);
1520
1521             s->current_picture_ptr= pic;
1522         }else{
1523             // input is not a shared pix -> reuse buffer for current_pix
1524
1525             assert(   s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_USER 
1526                    || s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL);
1527             
1528             s->current_picture_ptr= s->reordered_input_picture[0];
1529             for(i=0; i<4; i++){
1530                 //reverse the +16 we did before storing the input
1531                 s->current_picture_ptr->data[i]-=16;
1532             }
1533         }
1534         s->current_picture= *s->current_picture_ptr;
1535     
1536         s->picture_number= s->new_picture.display_picture_number;
1537 //printf("dpn:%d\n", s->picture_number);
1538     }else{
1539        memset(&s->new_picture, 0, sizeof(Picture));
1540     }
1541 }
1542
1543 int MPV_encode_picture(AVCodecContext *avctx,
1544                        unsigned char *buf, int buf_size, void *data)
1545 {
1546     MpegEncContext *s = avctx->priv_data;
1547     AVFrame *pic_arg = data;
1548     int i;
1549
1550     if(avctx->pix_fmt != PIX_FMT_YUV420P){
1551         fprintf(stderr, "this codec supports only YUV420P\n");
1552         return -1;
1553     }
1554     
1555     init_put_bits(&s->pb, buf, buf_size, NULL, NULL);
1556
1557     s->picture_in_gop_number++;
1558
1559     load_input_picture(s, pic_arg);
1560     
1561     select_input_picture(s);
1562     
1563     /* output? */
1564     if(s->new_picture.data[0]){
1565
1566         s->pict_type= s->new_picture.pict_type;
1567         if (s->fixed_qscale){ /* the ratecontrol needs the last qscale so we dont touch it for CBR */
1568             s->qscale= (int)(s->new_picture.quality+0.5);
1569             assert(s->qscale);
1570         }
1571 //emms_c();
1572 //printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale);
1573         MPV_frame_start(s, avctx);
1574
1575         encode_picture(s, s->picture_number);
1576         
1577         avctx->real_pict_num  = s->picture_number;
1578         avctx->header_bits = s->header_bits;
1579         avctx->mv_bits     = s->mv_bits;
1580         avctx->misc_bits   = s->misc_bits;
1581         avctx->i_tex_bits  = s->i_tex_bits;
1582         avctx->p_tex_bits  = s->p_tex_bits;
1583         avctx->i_count     = s->i_count;
1584         avctx->p_count     = s->mb_num - s->i_count - s->skip_count; //FIXME f/b_count in avctx
1585         avctx->skip_count  = s->skip_count;
1586
1587         MPV_frame_end(s);
1588
1589         if (s->out_format == FMT_MJPEG)
1590             mjpeg_picture_trailer(s);
1591         
1592         if(s->flags&CODEC_FLAG_PASS1)
1593             ff_write_pass1_stats(s);
1594
1595         for(i=0; i<4; i++){
1596             avctx->error[i] += s->current_picture_ptr->error[i];
1597         }
1598     }
1599
1600     s->input_picture_number++;
1601
1602     flush_put_bits(&s->pb);
1603     s->frame_bits  = (pbBufPtr(&s->pb) - s->pb.buf) * 8;
1604     
1605     s->total_bits += s->frame_bits;
1606     avctx->frame_bits  = s->frame_bits;
1607     
1608     return pbBufPtr(&s->pb) - s->pb.buf;
1609 }
1610
1611 #endif //CONFIG_ENCODERS
1612
1613 static inline void gmc1_motion(MpegEncContext *s,
1614                                uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1615                                int dest_offset,
1616                                uint8_t **ref_picture, int src_offset)
1617 {
1618     uint8_t *ptr;
1619     int offset, src_x, src_y, linesize, uvlinesize;
1620     int motion_x, motion_y;
1621     int emu=0;
1622
1623     motion_x= s->sprite_offset[0][0];
1624     motion_y= s->sprite_offset[0][1];
1625     src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy+1));
1626     src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy+1));
1627     motion_x<<=(3-s->sprite_warping_accuracy);
1628     motion_y<<=(3-s->sprite_warping_accuracy);
1629     src_x = clip(src_x, -16, s->width);
1630     if (src_x == s->width)
1631         motion_x =0;
1632     src_y = clip(src_y, -16, s->height);
1633     if (src_y == s->height)
1634         motion_y =0;
1635
1636     linesize = s->linesize;
1637     uvlinesize = s->uvlinesize;
1638     
1639     ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset;
1640
1641     dest_y+=dest_offset;
1642     if(s->flags&CODEC_FLAG_EMU_EDGE){
1643         if(src_x<0 || src_y<0 || src_x + 17 >= s->h_edge_pos
1644                               || src_y + 17 >= s->v_edge_pos){
1645             ff_emulated_edge_mc(s->edge_emu_buffer, ptr, linesize, 17, 17, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
1646             ptr= s->edge_emu_buffer;
1647         }
1648     }
1649     
1650     if((motion_x|motion_y)&7){
1651         s->dsp.gmc1(dest_y  , ptr  , linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
1652         s->dsp.gmc1(dest_y+8, ptr+8, linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
1653     }else{
1654         int dxy;
1655         
1656         dxy= ((motion_x>>3)&1) | ((motion_y>>2)&2);
1657         if (s->no_rounding){
1658             s->dsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
1659         }else{
1660             s->dsp.put_pixels_tab       [0][dxy](dest_y, ptr, linesize, 16);
1661         }
1662     }
1663     
1664     if(s->flags&CODEC_FLAG_GRAY) return;
1665
1666     motion_x= s->sprite_offset[1][0];
1667     motion_y= s->sprite_offset[1][1];
1668     src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy+1));
1669     src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy+1));
1670     motion_x<<=(3-s->sprite_warping_accuracy);
1671     motion_y<<=(3-s->sprite_warping_accuracy);
1672     src_x = clip(src_x, -8, s->width>>1);
1673     if (src_x == s->width>>1)
1674         motion_x =0;
1675     src_y = clip(src_y, -8, s->height>>1);
1676     if (src_y == s->height>>1)
1677         motion_y =0;
1678
1679     offset = (src_y * uvlinesize) + src_x + (src_offset>>1);
1680     ptr = ref_picture[1] + offset;
1681     if(s->flags&CODEC_FLAG_EMU_EDGE){
1682         if(src_x<0 || src_y<0 || src_x + 9 >= s->h_edge_pos>>1
1683                               || src_y + 9 >= s->v_edge_pos>>1){
1684             ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1685             ptr= s->edge_emu_buffer;
1686             emu=1;
1687         }
1688     }
1689     s->dsp.gmc1(dest_cb + (dest_offset>>1), ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
1690     
1691     ptr = ref_picture[2] + offset;
1692     if(emu){
1693         ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1694         ptr= s->edge_emu_buffer;
1695     }
1696     s->dsp.gmc1(dest_cr + (dest_offset>>1), ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
1697     
1698     return;
1699 }
1700
1701 static inline void gmc_motion(MpegEncContext *s,
1702                                uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1703                                int dest_offset,
1704                                uint8_t **ref_picture, int src_offset)
1705 {
1706     uint8_t *ptr;
1707     int linesize, uvlinesize;
1708     const int a= s->sprite_warping_accuracy;
1709     int ox, oy;
1710
1711     linesize = s->linesize;
1712     uvlinesize = s->uvlinesize;
1713
1714     ptr = ref_picture[0] + src_offset;
1715
1716     dest_y+=dest_offset;
1717     
1718     ox= s->sprite_offset[0][0] + s->sprite_delta[0][0]*s->mb_x*16 + s->sprite_delta[0][1]*s->mb_y*16;
1719     oy= s->sprite_offset[0][1] + s->sprite_delta[1][0]*s->mb_x*16 + s->sprite_delta[1][1]*s->mb_y*16;
1720
1721     s->dsp.gmc(dest_y, ptr, linesize, 16,
1722            ox, 
1723            oy, 
1724            s->sprite_delta[0][0], s->sprite_delta[0][1],
1725            s->sprite_delta[1][0], s->sprite_delta[1][1], 
1726            a+1, (1<<(2*a+1)) - s->no_rounding,
1727            s->h_edge_pos, s->v_edge_pos);
1728     s->dsp.gmc(dest_y+8, ptr, linesize, 16,
1729            ox + s->sprite_delta[0][0]*8, 
1730            oy + s->sprite_delta[1][0]*8, 
1731            s->sprite_delta[0][0], s->sprite_delta[0][1],
1732            s->sprite_delta[1][0], s->sprite_delta[1][1], 
1733            a+1, (1<<(2*a+1)) - s->no_rounding,
1734            s->h_edge_pos, s->v_edge_pos);
1735
1736     if(s->flags&CODEC_FLAG_GRAY) return;
1737
1738
1739     dest_cb+=dest_offset>>1;
1740     dest_cr+=dest_offset>>1;
1741     
1742     ox= s->sprite_offset[1][0] + s->sprite_delta[0][0]*s->mb_x*8 + s->sprite_delta[0][1]*s->mb_y*8;
1743     oy= s->sprite_offset[1][1] + s->sprite_delta[1][0]*s->mb_x*8 + s->sprite_delta[1][1]*s->mb_y*8;
1744
1745     ptr = ref_picture[1] + (src_offset>>1);
1746     s->dsp.gmc(dest_cb, ptr, uvlinesize, 8,
1747            ox, 
1748            oy, 
1749            s->sprite_delta[0][0], s->sprite_delta[0][1],
1750            s->sprite_delta[1][0], s->sprite_delta[1][1], 
1751            a+1, (1<<(2*a+1)) - s->no_rounding,
1752            s->h_edge_pos>>1, s->v_edge_pos>>1);
1753     
1754     ptr = ref_picture[2] + (src_offset>>1);
1755     s->dsp.gmc(dest_cr, ptr, uvlinesize, 8,
1756            ox, 
1757            oy, 
1758            s->sprite_delta[0][0], s->sprite_delta[0][1],
1759            s->sprite_delta[1][0], s->sprite_delta[1][1], 
1760            a+1, (1<<(2*a+1)) - s->no_rounding,
1761            s->h_edge_pos>>1, s->v_edge_pos>>1);
1762 }
1763
1764 /**
1765  * Copies a rectangular area of samples to a temporary buffer and replicates the boarder samples.
1766  * @param buf destination buffer
1767  * @param src source buffer
1768  * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers
1769  * @param block_w width of block
1770  * @param block_h height of block
1771  * @param src_x x coordinate of the top left sample of the block in the source buffer
1772  * @param src_y y coordinate of the top left sample of the block in the source buffer
1773  * @param w width of the source buffer
1774  * @param h height of the source buffer
1775  */
1776 void ff_emulated_edge_mc(uint8_t *buf, uint8_t *src, int linesize, int block_w, int block_h, 
1777                                     int src_x, int src_y, int w, int h){
1778     int x, y;
1779     int start_y, start_x, end_y, end_x;
1780
1781     if(src_y>= h){
1782         src+= (h-1-src_y)*linesize;
1783         src_y=h-1;
1784     }else if(src_y<=-block_h){
1785         src+= (1-block_h-src_y)*linesize;
1786         src_y=1-block_h;
1787     }
1788     if(src_x>= w){
1789         src+= (w-1-src_x);
1790         src_x=w-1;
1791     }else if(src_x<=-block_w){
1792         src+= (1-block_w-src_x);
1793         src_x=1-block_w;
1794     }
1795
1796     start_y= FFMAX(0, -src_y);
1797     start_x= FFMAX(0, -src_x);
1798     end_y= FFMIN(block_h, h-src_y);
1799     end_x= FFMIN(block_w, w-src_x);
1800
1801     // copy existing part
1802     for(y=start_y; y<end_y; y++){
1803         for(x=start_x; x<end_x; x++){
1804             buf[x + y*linesize]= src[x + y*linesize];
1805         }
1806     }
1807
1808     //top
1809     for(y=0; y<start_y; y++){
1810         for(x=start_x; x<end_x; x++){
1811             buf[x + y*linesize]= buf[x + start_y*linesize];
1812         }
1813     }
1814
1815     //bottom
1816     for(y=end_y; y<block_h; y++){
1817         for(x=start_x; x<end_x; x++){
1818             buf[x + y*linesize]= buf[x + (end_y-1)*linesize];
1819         }
1820     }
1821                                     
1822     for(y=0; y<block_h; y++){
1823        //left
1824         for(x=0; x<start_x; x++){
1825             buf[x + y*linesize]= buf[start_x + y*linesize];
1826         }
1827        
1828        //right
1829         for(x=end_x; x<block_w; x++){
1830             buf[x + y*linesize]= buf[end_x - 1 + y*linesize];
1831         }
1832     }
1833 }
1834
1835
1836 /* apply one mpeg motion vector to the three components */
1837 static inline void mpeg_motion(MpegEncContext *s,
1838                                uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1839                                int dest_offset,
1840                                uint8_t **ref_picture, int src_offset,
1841                                int field_based, op_pixels_func (*pix_op)[4],
1842                                int motion_x, int motion_y, int h)
1843 {
1844     uint8_t *ptr;
1845     int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize;
1846     int emu=0;
1847 #if 0    
1848 if(s->quarter_sample)
1849 {
1850     motion_x>>=1;
1851     motion_y>>=1;
1852 }
1853 #endif
1854     dxy = ((motion_y & 1) << 1) | (motion_x & 1);
1855     src_x = s->mb_x * 16 + (motion_x >> 1);
1856     src_y = s->mb_y * (16 >> field_based) + (motion_y >> 1);
1857                 
1858     /* WARNING: do no forget half pels */
1859     height = s->height >> field_based;
1860     v_edge_pos = s->v_edge_pos >> field_based;
1861     src_x = clip(src_x, -16, s->width);
1862     if (src_x == s->width)
1863         dxy &= ~1;
1864     src_y = clip(src_y, -16, height);
1865     if (src_y == height)
1866         dxy &= ~2;
1867     linesize   = s->current_picture.linesize[0] << field_based;
1868     uvlinesize = s->current_picture.linesize[1] << field_based;
1869     ptr = ref_picture[0] + (src_y * linesize) + (src_x) + src_offset;
1870     dest_y += dest_offset;
1871
1872     if(s->flags&CODEC_FLAG_EMU_EDGE){
1873         if(src_x<0 || src_y<0 || src_x + (motion_x&1) + 16 > s->h_edge_pos
1874                               || src_y + (motion_y&1) + h  > v_edge_pos){
1875             ff_emulated_edge_mc(s->edge_emu_buffer, ptr - src_offset, s->linesize, 17, 17+field_based,  //FIXME linesize? and uv below
1876                              src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
1877             ptr= s->edge_emu_buffer + src_offset;
1878             emu=1;
1879         }
1880     }
1881     pix_op[0][dxy](dest_y, ptr, linesize, h);
1882
1883     if(s->flags&CODEC_FLAG_GRAY) return;
1884
1885     if (s->out_format == FMT_H263) {
1886         dxy = 0;
1887         if ((motion_x & 3) != 0)
1888             dxy |= 1;
1889         if ((motion_y & 3) != 0)
1890             dxy |= 2;
1891         mx = motion_x >> 2;
1892         my = motion_y >> 2;
1893     } else {
1894         mx = motion_x / 2;
1895         my = motion_y / 2;
1896         dxy = ((my & 1) << 1) | (mx & 1);
1897         mx >>= 1;
1898         my >>= 1;
1899     }
1900     
1901     src_x = s->mb_x * 8 + mx;
1902     src_y = s->mb_y * (8 >> field_based) + my;
1903     src_x = clip(src_x, -8, s->width >> 1);
1904     if (src_x == (s->width >> 1))
1905         dxy &= ~1;
1906     src_y = clip(src_y, -8, height >> 1);
1907     if (src_y == (height >> 1))
1908         dxy &= ~2;
1909     offset = (src_y * uvlinesize) + src_x + (src_offset >> 1);
1910     ptr = ref_picture[1] + offset;
1911     if(emu){
1912         ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based, 
1913                          src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
1914         ptr= s->edge_emu_buffer + (src_offset >> 1);
1915     }
1916     pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
1917
1918     ptr = ref_picture[2] + offset;
1919     if(emu){
1920         ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based, 
1921                          src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
1922         ptr= s->edge_emu_buffer + (src_offset >> 1);
1923     }
1924     pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
1925 }
1926
1927 static inline void qpel_motion(MpegEncContext *s,
1928                                uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1929                                int dest_offset,
1930                                uint8_t **ref_picture, int src_offset,
1931                                int field_based, op_pixels_func (*pix_op)[4],
1932                                qpel_mc_func (*qpix_op)[16],
1933                                int motion_x, int motion_y, int h)
1934 {
1935     uint8_t *ptr;
1936     int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize;
1937     int emu=0;
1938
1939     dxy = ((motion_y & 3) << 2) | (motion_x & 3);
1940     src_x = s->mb_x * 16 + (motion_x >> 2);
1941     src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
1942
1943     height = s->height >> field_based;
1944     v_edge_pos = s->v_edge_pos >> field_based;
1945     src_x = clip(src_x, -16, s->width);
1946     if (src_x == s->width)
1947         dxy &= ~3;
1948     src_y = clip(src_y, -16, height);
1949     if (src_y == height)
1950         dxy &= ~12;
1951     linesize = s->linesize << field_based;
1952     uvlinesize = s->uvlinesize << field_based;
1953     ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset;
1954     dest_y += dest_offset;
1955 //printf("%d %d %d\n", src_x, src_y, dxy);
1956     
1957     if(s->flags&CODEC_FLAG_EMU_EDGE){
1958         if(src_x<0 || src_y<0 || src_x + (motion_x&3) + 16 > s->h_edge_pos
1959                               || src_y + (motion_y&3) + h  > v_edge_pos){
1960             ff_emulated_edge_mc(s->edge_emu_buffer, ptr - src_offset, s->linesize, 17, 17+field_based, 
1961                              src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
1962             ptr= s->edge_emu_buffer + src_offset;
1963             emu=1;
1964         }
1965     }
1966     if(!field_based)
1967         qpix_op[0][dxy](dest_y, ptr, linesize);
1968     else{
1969         //damn interlaced mode
1970         //FIXME boundary mirroring is not exactly correct here
1971         qpix_op[1][dxy](dest_y  , ptr  , linesize);
1972         qpix_op[1][dxy](dest_y+8, ptr+8, linesize);
1973     }
1974
1975     if(s->flags&CODEC_FLAG_GRAY) return;
1976
1977     if(field_based){
1978         mx= motion_x/2;
1979         my= motion_y>>1;
1980     }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA2){
1981         static const int rtab[8]= {0,0,1,1,0,0,0,1};
1982         mx= (motion_x>>1) + rtab[motion_x&7];
1983         my= (motion_y>>1) + rtab[motion_y&7];
1984     }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA){
1985         mx= (motion_x>>1)|(motion_x&1);
1986         my= (motion_y>>1)|(motion_y&1);
1987     }else{
1988         mx= motion_x/2;
1989         my= motion_y/2;
1990     }
1991     mx= (mx>>1)|(mx&1);
1992     my= (my>>1)|(my&1);
1993
1994     dxy= (mx&1) | ((my&1)<<1);
1995     mx>>=1;
1996     my>>=1;
1997
1998     src_x = s->mb_x * 8 + mx;
1999     src_y = s->mb_y * (8 >> field_based) + my;
2000     src_x = clip(src_x, -8, s->width >> 1);
2001     if (src_x == (s->width >> 1))
2002         dxy &= ~1;
2003     src_y = clip(src_y, -8, height >> 1);
2004     if (src_y == (height >> 1))
2005         dxy &= ~2;
2006
2007     offset = (src_y * uvlinesize) + src_x + (src_offset >> 1);
2008     ptr = ref_picture[1] + offset;
2009     if(emu){
2010         ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based, 
2011                          src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
2012         ptr= s->edge_emu_buffer + (src_offset >> 1);
2013     }
2014     pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr,  uvlinesize, h >> 1);
2015     
2016     ptr = ref_picture[2] + offset;
2017     if(emu){
2018         ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based, 
2019                          src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
2020         ptr= s->edge_emu_buffer + (src_offset >> 1);
2021     }
2022     pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr,  uvlinesize, h >> 1);
2023 }
2024
2025 inline int ff_h263_round_chroma(int x){
2026     if (x >= 0)
2027         return  (h263_chroma_roundtab[x & 0xf] + ((x >> 3) & ~1));
2028     else {
2029         x = -x;
2030         return -(h263_chroma_roundtab[x & 0xf] + ((x >> 3) & ~1));
2031     }
2032 }
2033
2034 /**
2035  * motion compesation of a single macroblock
2036  * @param s context
2037  * @param dest_y luma destination pointer
2038  * @param dest_cb chroma cb/u destination pointer
2039  * @param dest_cr chroma cr/v destination pointer
2040  * @param dir direction (0->forward, 1->backward)
2041  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2042  * @param pic_op halfpel motion compensation function (average or put normally)
2043  * @param pic_op qpel motion compensation function (average or put normally)
2044  * the motion vectors are taken from s->mv and the MV type from s->mv_type
2045  */
2046 static inline void MPV_motion(MpegEncContext *s, 
2047                               uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2048                               int dir, uint8_t **ref_picture, 
2049                               op_pixels_func (*pix_op)[4], qpel_mc_func (*qpix_op)[16])
2050 {
2051     int dxy, offset, mx, my, src_x, src_y, motion_x, motion_y;
2052     int mb_x, mb_y, i;
2053     uint8_t *ptr, *dest;
2054     int emu=0;
2055
2056     mb_x = s->mb_x;
2057     mb_y = s->mb_y;
2058
2059     switch(s->mv_type) {
2060     case MV_TYPE_16X16:
2061 #ifdef CONFIG_RISKY
2062         if(s->mcsel){
2063             if(s->real_sprite_warping_points==1){
2064                 gmc1_motion(s, dest_y, dest_cb, dest_cr, 0,
2065                             ref_picture, 0);
2066             }else{
2067                 gmc_motion(s, dest_y, dest_cb, dest_cr, 0,
2068                             ref_picture, 0);
2069             }
2070         }else if(s->quarter_sample){
2071             qpel_motion(s, dest_y, dest_cb, dest_cr, 0,
2072                         ref_picture, 0,
2073                         0, pix_op, qpix_op,
2074                         s->mv[dir][0][0], s->mv[dir][0][1], 16);
2075         }else if(s->mspel){
2076             ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
2077                         ref_picture, pix_op,
2078                         s->mv[dir][0][0], s->mv[dir][0][1], 16);
2079         }else
2080 #endif
2081         {
2082             mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2083                         ref_picture, 0,
2084                         0, pix_op,
2085                         s->mv[dir][0][0], s->mv[dir][0][1], 16);
2086         }           
2087         break;
2088     case MV_TYPE_8X8:
2089         mx = 0;
2090         my = 0;
2091         if(s->quarter_sample){
2092             for(i=0;i<4;i++) {
2093                 motion_x = s->mv[dir][i][0];
2094                 motion_y = s->mv[dir][i][1];
2095
2096                 dxy = ((motion_y & 3) << 2) | (motion_x & 3);
2097                 src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
2098                 src_y = mb_y * 16 + (motion_y >> 2) + (i >>1) * 8;
2099                     
2100                 /* WARNING: do no forget half pels */
2101                 src_x = clip(src_x, -16, s->width);
2102                 if (src_x == s->width)
2103                     dxy &= ~3;
2104                 src_y = clip(src_y, -16, s->height);
2105                 if (src_y == s->height)
2106                     dxy &= ~12;
2107                     
2108                 ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
2109                 if(s->flags&CODEC_FLAG_EMU_EDGE){
2110                     if(src_x<0 || src_y<0 || src_x + (motion_x&3) + 8 > s->h_edge_pos
2111                                           || src_y + (motion_y&3) + 8 > s->v_edge_pos){
2112                         ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
2113                         ptr= s->edge_emu_buffer;
2114                     }
2115                 }
2116                 dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
2117                 qpix_op[1][dxy](dest, ptr, s->linesize);
2118
2119                 mx += s->mv[dir][i][0]/2;
2120                 my += s->mv[dir][i][1]/2;
2121             }
2122         }else{
2123             for(i=0;i<4;i++) {
2124                 motion_x = s->mv[dir][i][0];
2125                 motion_y = s->mv[dir][i][1];
2126
2127                 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
2128                 src_x = mb_x * 16 + (motion_x >> 1) + (i & 1) * 8;
2129                 src_y = mb_y * 16 + (motion_y >> 1) + (i >>1) * 8;
2130                     
2131                 /* WARNING: do no forget half pels */
2132                 src_x = clip(src_x, -16, s->width);
2133                 if (src_x == s->width)
2134                     dxy &= ~1;
2135                 src_y = clip(src_y, -16, s->height);
2136                 if (src_y == s->height)
2137                     dxy &= ~2;
2138                     
2139                 ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
2140                 if(s->flags&CODEC_FLAG_EMU_EDGE){
2141                     if(src_x<0 || src_y<0 || src_x + (motion_x&1) + 8 > s->h_edge_pos
2142                                           || src_y + (motion_y&1) + 8 > s->v_edge_pos){
2143                         ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
2144                         ptr= s->edge_emu_buffer;
2145                     }
2146                 }
2147                 dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
2148                 pix_op[1][dxy](dest, ptr, s->linesize, 8);
2149
2150                 mx += s->mv[dir][i][0];
2151                 my += s->mv[dir][i][1];
2152             }
2153         }
2154
2155         if(s->flags&CODEC_FLAG_GRAY) break;
2156         /* In case of 8X8, we construct a single chroma motion vector
2157            with a special rounding */
2158         mx= ff_h263_round_chroma(mx);
2159         my= ff_h263_round_chroma(my);
2160         dxy = ((my & 1) << 1) | (mx & 1);
2161         mx >>= 1;
2162         my >>= 1;
2163
2164         src_x = mb_x * 8 + mx;
2165         src_y = mb_y * 8 + my;
2166         src_x = clip(src_x, -8, s->width/2);
2167         if (src_x == s->width/2)
2168             dxy &= ~1;
2169         src_y = clip(src_y, -8, s->height/2);
2170         if (src_y == s->height/2)
2171             dxy &= ~2;
2172         
2173         offset = (src_y * (s->uvlinesize)) + src_x;
2174         ptr = ref_picture[1] + offset;
2175         if(s->flags&CODEC_FLAG_EMU_EDGE){
2176                 if(src_x<0 || src_y<0 || src_x + (dxy &1) + 8 > s->h_edge_pos>>1
2177                                       || src_y + (dxy>>1) + 8 > s->v_edge_pos>>1){
2178                     ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
2179                     ptr= s->edge_emu_buffer;
2180                     emu=1;
2181                 }
2182             }
2183         pix_op[1][dxy](dest_cb, ptr, s->uvlinesize, 8);
2184
2185         ptr = ref_picture[2] + offset;
2186         if(emu){
2187             ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
2188             ptr= s->edge_emu_buffer;
2189         }
2190         pix_op[1][dxy](dest_cr, ptr, s->uvlinesize, 8);
2191         break;
2192     case MV_TYPE_FIELD:
2193         if (s->picture_structure == PICT_FRAME) {
2194             if(s->quarter_sample){
2195                 /* top field */
2196                 qpel_motion(s, dest_y, dest_cb, dest_cr, 0,
2197                             ref_picture, s->field_select[dir][0] ? s->linesize : 0,
2198                             1, pix_op, qpix_op,
2199                             s->mv[dir][0][0], s->mv[dir][0][1], 8);
2200                 /* bottom field */
2201                 qpel_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
2202                             ref_picture, s->field_select[dir][1] ? s->linesize : 0,
2203                             1, pix_op, qpix_op,
2204                             s->mv[dir][1][0], s->mv[dir][1][1], 8);
2205             }else{
2206                 /* top field */       
2207                 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2208                             ref_picture, s->field_select[dir][0] ? s->linesize : 0,
2209                             1, pix_op,
2210                             s->mv[dir][0][0], s->mv[dir][0][1], 8);
2211                 /* bottom field */
2212                 mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
2213                             ref_picture, s->field_select[dir][1] ? s->linesize : 0,
2214                             1, pix_op,
2215                             s->mv[dir][1][0], s->mv[dir][1][1], 8);
2216             }
2217         } else {
2218             int offset;
2219             if(s->picture_structure == s->field_select[dir][0] + 1 || s->pict_type == B_TYPE || s->first_field){
2220                 offset= s->field_select[dir][0] ? s->linesize : 0;
2221             }else{
2222                 ref_picture= s->current_picture.data;
2223                 offset= s->field_select[dir][0] ? s->linesize : -s->linesize; 
2224             } 
2225
2226             mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2227                         ref_picture, offset,
2228                         0, pix_op,
2229                         s->mv[dir][0][0], s->mv[dir][0][1], 16);
2230         }
2231         break;
2232     case MV_TYPE_16X8:{
2233         int offset;
2234          uint8_t ** ref2picture;
2235
2236             if(s->picture_structure == s->field_select[dir][0] + 1 || s->pict_type == B_TYPE || s->first_field){
2237                 ref2picture= ref_picture;
2238                 offset= s->field_select[dir][0] ? s->linesize : 0;
2239             }else{
2240                 ref2picture= s->current_picture.data;
2241                 offset= s->field_select[dir][0] ? s->linesize : -s->linesize; 
2242             } 
2243
2244             mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2245                         ref2picture, offset,
2246                         0, pix_op,
2247                         s->mv[dir][0][0], s->mv[dir][0][1], 8);
2248
2249
2250             if(s->picture_structure == s->field_select[dir][1] + 1 || s->pict_type == B_TYPE || s->first_field){
2251                 ref2picture= ref_picture;
2252                 offset= s->field_select[dir][1] ? s->linesize : 0;
2253             }else{
2254                 ref2picture= s->current_picture.data;
2255                 offset= s->field_select[dir][1] ? s->linesize : -s->linesize; 
2256             } 
2257             // I know it is ugly but this is the only way to fool emu_edge without rewrite mpeg_motion
2258             mpeg_motion(s, dest_y+16*s->linesize, dest_cb+8*s->uvlinesize, dest_cr+8*s->uvlinesize,
2259                         0,
2260                         ref2picture, offset,
2261                         0, pix_op,
2262                         s->mv[dir][1][0], s->mv[dir][1][1]+16, 8);
2263         }
2264         
2265         break;
2266     case MV_TYPE_DMV:
2267     {
2268     op_pixels_func (*dmv_pix_op)[4];
2269     int offset;
2270
2271         dmv_pix_op = s->dsp.put_pixels_tab;
2272
2273         if(s->picture_structure == PICT_FRAME){
2274             //put top field from top field
2275             mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2276                         ref_picture, 0,
2277                         1, dmv_pix_op,
2278                         s->mv[dir][0][0], s->mv[dir][0][1], 8);
2279             //put bottom field from bottom field
2280             mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
2281                         ref_picture, s->linesize,
2282                         1, dmv_pix_op,
2283                         s->mv[dir][0][0], s->mv[dir][0][1], 8);
2284
2285             dmv_pix_op = s->dsp.avg_pixels_tab; 
2286         
2287             //avg top field from bottom field
2288             mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2289                         ref_picture, s->linesize,
2290                         1, dmv_pix_op,
2291                         s->mv[dir][2][0], s->mv[dir][2][1], 8);
2292             //avg bottom field from top field
2293             mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
2294                         ref_picture, 0,
2295                         1, dmv_pix_op,
2296                         s->mv[dir][3][0], s->mv[dir][3][1], 8);
2297
2298         }else{
2299             offset=(s->picture_structure == PICT_BOTTOM_FIELD)? 
2300                          s->linesize : 0;
2301
2302             //put field from the same parity
2303             //same parity is never in the same frame
2304             mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2305                         ref_picture,offset,
2306                         0,dmv_pix_op,
2307                         s->mv[dir][0][0],s->mv[dir][0][1],16);
2308
2309             // after put we make avg of the same block
2310             dmv_pix_op=s->dsp.avg_pixels_tab; 
2311
2312             //opposite parity is always in the same frame if this is second field
2313             if(!s->first_field){
2314                 ref_picture = s->current_picture.data;    
2315                 //top field is one linesize from frame beginig
2316                 offset=(s->picture_structure == PICT_BOTTOM_FIELD)? 
2317                         -s->linesize : s->linesize;
2318             }else 
2319                 offset=(s->picture_structure == PICT_BOTTOM_FIELD)? 
2320                         0 : s->linesize;
2321
2322             //avg field from the opposite parity
2323             mpeg_motion(s, dest_y, dest_cb, dest_cr,0,
2324                         ref_picture, offset,
2325                         0,dmv_pix_op,
2326                         s->mv[dir][2][0],s->mv[dir][2][1],16);
2327         }
2328     }
2329     break;
2330
2331     }
2332 }
2333
2334
2335 /* put block[] to dest[] */
2336 static inline void put_dct(MpegEncContext *s, 
2337                            DCTELEM *block, int i, uint8_t *dest, int line_size)
2338 {
2339     s->dct_unquantize(s, block, i, s->qscale);
2340     s->dsp.idct_put (dest, line_size, block);
2341 }
2342
2343 /* add block[] to dest[] */
2344 static inline void add_dct(MpegEncContext *s, 
2345                            DCTELEM *block, int i, uint8_t *dest, int line_size)
2346 {
2347     if (s->block_last_index[i] >= 0) {
2348         s->dsp.idct_add (dest, line_size, block);
2349     }
2350 }
2351
2352 static inline void add_dequant_dct(MpegEncContext *s, 
2353                            DCTELEM *block, int i, uint8_t *dest, int line_size)
2354 {
2355     if (s->block_last_index[i] >= 0) {
2356         s->dct_unquantize(s, block, i, s->qscale);
2357
2358         s->dsp.idct_add (dest, line_size, block);
2359     }
2360 }
2361
2362 /**
2363  * cleans dc, ac, coded_block for the current non intra MB
2364  */
2365 void ff_clean_intra_table_entries(MpegEncContext *s)
2366 {
2367     int wrap = s->block_wrap[0];
2368     int xy = s->block_index[0];
2369     
2370     s->dc_val[0][xy           ] = 
2371     s->dc_val[0][xy + 1       ] = 
2372     s->dc_val[0][xy     + wrap] =
2373     s->dc_val[0][xy + 1 + wrap] = 1024;
2374     /* ac pred */
2375     memset(s->ac_val[0][xy       ], 0, 32 * sizeof(int16_t));
2376     memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2377     if (s->msmpeg4_version>=3) {
2378         s->coded_block[xy           ] =
2379         s->coded_block[xy + 1       ] =
2380         s->coded_block[xy     + wrap] =
2381         s->coded_block[xy + 1 + wrap] = 0;
2382     }
2383     /* chroma */
2384     wrap = s->block_wrap[4];
2385     xy = s->mb_x + 1 + (s->mb_y + 1) * wrap;
2386     s->dc_val[1][xy] =
2387     s->dc_val[2][xy] = 1024;
2388     /* ac pred */
2389     memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2390     memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2391     
2392     s->mbintra_table[s->mb_x + s->mb_y*s->mb_stride]= 0;
2393 }
2394
2395 /* generic function called after a macroblock has been parsed by the
2396    decoder or after it has been encoded by the encoder.
2397
2398    Important variables used:
2399    s->mb_intra : true if intra macroblock
2400    s->mv_dir   : motion vector direction
2401    s->mv_type  : motion vector type
2402    s->mv       : motion vector
2403    s->interlaced_dct : true if interlaced dct used (mpeg2)
2404  */
2405 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
2406 {
2407     int mb_x, mb_y;
2408     const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2409 #ifdef HAVE_XVMC
2410     if(s->avctx->xvmc_acceleration){
2411         XVMC_decode_mb(s,block);
2412         return;
2413     }
2414 #endif
2415
2416     mb_x = s->mb_x;
2417     mb_y = s->mb_y;
2418
2419     s->current_picture.qscale_table[mb_xy]= s->qscale;
2420
2421     /* update DC predictors for P macroblocks */
2422     if (!s->mb_intra) {
2423         if (s->h263_pred || s->h263_aic) {
2424             if(s->mbintra_table[mb_xy])
2425                 ff_clean_intra_table_entries(s);
2426         } else {
2427             s->last_dc[0] =
2428             s->last_dc[1] =
2429             s->last_dc[2] = 128 << s->intra_dc_precision;
2430         }
2431     }
2432     else if (s->h263_pred || s->h263_aic)
2433         s->mbintra_table[mb_xy]=1;
2434
2435     if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==B_TYPE))) { //FIXME precalc
2436         uint8_t *dest_y, *dest_cb, *dest_cr;
2437         int dct_linesize, dct_offset;
2438         op_pixels_func (*op_pix)[4];
2439         qpel_mc_func (*op_qpix)[16];
2440         const int linesize= s->current_picture.linesize[0]; //not s->linesize as this woulnd be wrong for field pics
2441         const int uvlinesize= s->current_picture.linesize[1];
2442
2443         /* avoid copy if macroblock skipped in last frame too */
2444         /* skip only during decoding as we might trash the buffers during encoding a bit */
2445         if(!s->encoding){
2446             uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2447             const int age= s->current_picture.age;
2448
2449             assert(age);
2450
2451             if (s->mb_skiped) {
2452                 s->mb_skiped= 0;
2453                 assert(s->pict_type!=I_TYPE);
2454  
2455                 (*mbskip_ptr) ++; /* indicate that this time we skiped it */
2456                 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2457
2458                 /* if previous was skipped too, then nothing to do !  */
2459                 if (*mbskip_ptr >= age && s->current_picture.reference){
2460                     return;
2461                 }
2462             } else if(!s->current_picture.reference){
2463                 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
2464                 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2465             } else{
2466                 *mbskip_ptr = 0; /* not skipped */
2467             }
2468         }
2469
2470         if (s->interlaced_dct) {
2471             dct_linesize = linesize * 2;
2472             dct_offset = linesize;
2473         } else {
2474             dct_linesize = linesize;
2475             dct_offset = linesize * 8;
2476         }
2477         
2478         dest_y=  s->dest[0];
2479         dest_cb= s->dest[1];
2480         dest_cr= s->dest[2];
2481
2482         if (!s->mb_intra) {
2483             /* motion handling */
2484             /* decoding or more than one mb_type (MC was allready done otherwise) */
2485             if(!s->encoding){
2486                 if ((!s->no_rounding) || s->pict_type==B_TYPE){                
2487                     op_pix = s->dsp.put_pixels_tab;
2488                     op_qpix= s->dsp.put_qpel_pixels_tab;
2489                 }else{
2490                     op_pix = s->dsp.put_no_rnd_pixels_tab;
2491                     op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab;
2492                 }
2493
2494                 if (s->mv_dir & MV_DIR_FORWARD) {
2495                     MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
2496                     op_pix = s->dsp.avg_pixels_tab;
2497                     op_qpix= s->dsp.avg_qpel_pixels_tab;
2498                 }
2499                 if (s->mv_dir & MV_DIR_BACKWARD) {
2500                     MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
2501                 }
2502             }
2503
2504             /* skip dequant / idct if we are really late ;) */
2505             if(s->hurry_up>1) return;
2506
2507             /* add dct residue */
2508             if(s->encoding || !(   s->mpeg2 || s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO 
2509                                 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2510                 add_dequant_dct(s, block[0], 0, dest_y, dct_linesize);
2511                 add_dequant_dct(s, block[1], 1, dest_y + 8, dct_linesize);
2512                 add_dequant_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
2513                 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
2514
2515                 if(!(s->flags&CODEC_FLAG_GRAY)){
2516                     add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize);
2517                     add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize);
2518                 }
2519             } else if(s->codec_id != CODEC_ID_WMV2){
2520                 add_dct(s, block[0], 0, dest_y, dct_linesize);
2521                 add_dct(s, block[1], 1, dest_y + 8, dct_linesize);
2522                 add_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
2523                 add_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
2524
2525                 if(!(s->flags&CODEC_FLAG_GRAY)){
2526                     add_dct(s, block[4], 4, dest_cb, uvlinesize);
2527                     add_dct(s, block[5], 5, dest_cr, uvlinesize);
2528                 }
2529             } 
2530 #ifdef CONFIG_RISKY
2531             else{
2532                 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2533             }
2534 #endif
2535         } else {
2536             /* dct only in intra block */
2537             if(s->encoding || !(s->mpeg2 || s->codec_id==CODEC_ID_MPEG1VIDEO)){
2538                 put_dct(s, block[0], 0, dest_y, dct_linesize);
2539                 put_dct(s, block[1], 1, dest_y + 8, dct_linesize);
2540                 put_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
2541                 put_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
2542
2543                 if(!(s->flags&CODEC_FLAG_GRAY)){
2544                     put_dct(s, block[4], 4, dest_cb, uvlinesize);
2545                     put_dct(s, block[5], 5, dest_cr, uvlinesize);
2546                 }
2547             }else{
2548                 s->dsp.idct_put(dest_y                 , dct_linesize, block[0]);
2549                 s->dsp.idct_put(dest_y              + 8, dct_linesize, block[1]);
2550                 s->dsp.idct_put(dest_y + dct_offset    , dct_linesize, block[2]);
2551                 s->dsp.idct_put(dest_y + dct_offset + 8, dct_linesize, block[3]);
2552
2553                 if(!(s->flags&CODEC_FLAG_GRAY)){
2554                     s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2555                     s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2556                 }
2557             }
2558         }
2559     }
2560 }
2561
2562 #ifdef CONFIG_ENCODERS
2563
2564 static inline void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
2565 {
2566     static const char tab[64]=
2567         {3,2,2,1,1,1,1,1,
2568          1,1,1,1,1,1,1,1,
2569          1,1,1,1,1,1,1,1,
2570          0,0,0,0,0,0,0,0,
2571          0,0,0,0,0,0,0,0,
2572          0,0,0,0,0,0,0,0,
2573          0,0,0,0,0,0,0,0,
2574          0,0,0,0,0,0,0,0};
2575     int score=0;
2576     int run=0;
2577     int i;
2578     DCTELEM *block= s->block[n];
2579     const int last_index= s->block_last_index[n];
2580     int skip_dc;
2581
2582     if(threshold<0){
2583         skip_dc=0;
2584         threshold= -threshold;
2585     }else
2586         skip_dc=1;
2587
2588     /* are all which we could set to zero are allready zero? */
2589     if(last_index<=skip_dc - 1) return;
2590
2591     for(i=0; i<=last_index; i++){
2592         const int j = s->intra_scantable.permutated[i];
2593         const int level = ABS(block[j]);
2594         if(level==1){
2595             if(skip_dc && i==0) continue;
2596             score+= tab[run];
2597             run=0;
2598         }else if(level>1){
2599             return;
2600         }else{
2601             run++;
2602         }
2603     }
2604     if(score >= threshold) return;
2605     for(i=skip_dc; i<=last_index; i++){
2606         const int j = s->intra_scantable.permutated[i];
2607         block[j]=0;
2608     }
2609     if(block[0]) s->block_last_index[n]= 0;
2610     else         s->block_last_index[n]= -1;
2611 }
2612
2613 static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block, int last_index)
2614 {
2615     int i;
2616     const int maxlevel= s->max_qcoeff;
2617     const int minlevel= s->min_qcoeff;
2618     
2619     if(s->mb_intra){
2620         i=1; //skip clipping of intra dc
2621     }else
2622         i=0;
2623     
2624     for(;i<=last_index; i++){
2625         const int j= s->intra_scantable.permutated[i];
2626         int level = block[j];
2627        
2628         if     (level>maxlevel) level=maxlevel;
2629         else if(level<minlevel) level=minlevel;
2630
2631         block[j]= level;
2632     }
2633 }
2634
2635 #if 0
2636 static int pix_vcmp16x8(uint8_t *s, int stride){ //FIXME move to dsputil & optimize
2637     int score=0;
2638     int x,y;
2639     
2640     for(y=0; y<7; y++){
2641         for(x=0; x<16; x+=4){
2642             score+= ABS(s[x  ] - s[x  +stride]) + ABS(s[x+1] - s[x+1+stride]) 
2643                    +ABS(s[x+2] - s[x+2+stride]) + ABS(s[x+3] - s[x+3+stride]);
2644         }
2645         s+= stride;
2646     }
2647     
2648     return score;
2649 }
2650
2651 static int pix_diff_vcmp16x8(uint8_t *s1, uint8_t*s2, int stride){ //FIXME move to dsputil & optimize
2652     int score=0;
2653     int x,y;
2654     
2655     for(y=0; y<7; y++){
2656         for(x=0; x<16; x++){
2657             score+= ABS(s1[x  ] - s2[x ] - s1[x  +stride] + s2[x +stride]);
2658         }
2659         s1+= stride;
2660         s2+= stride;
2661     }
2662     
2663     return score;
2664 }
2665 #else
2666 #define SQ(a) ((a)*(a))
2667
2668 static int pix_vcmp16x8(uint8_t *s, int stride){ //FIXME move to dsputil & optimize
2669     int score=0;
2670     int x,y;
2671     
2672     for(y=0; y<7; y++){
2673         for(x=0; x<16; x+=4){
2674             score+= SQ(s[x  ] - s[x  +stride]) + SQ(s[x+1] - s[x+1+stride]) 
2675                    +SQ(s[x+2] - s[x+2+stride]) + SQ(s[x+3] - s[x+3+stride]);
2676         }
2677         s+= stride;
2678     }
2679     
2680     return score;
2681 }
2682
2683 static int pix_diff_vcmp16x8(uint8_t *s1, uint8_t*s2, int stride){ //FIXME move to dsputil & optimize
2684     int score=0;
2685     int x,y;
2686     
2687     for(y=0; y<7; y++){
2688         for(x=0; x<16; x++){
2689             score+= SQ(s1[x  ] - s2[x ] - s1[x  +stride] + s2[x +stride]);
2690         }
2691         s1+= stride;
2692         s2+= stride;
2693     }
2694     
2695     return score;
2696 }
2697
2698 #endif
2699
2700 #endif //CONFIG_ENCODERS
2701
2702 /**
2703  *
2704  * @param h is the normal height, this will be reduced automatically if needed for the last row
2705  */
2706 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2707     if (s->avctx->draw_horiz_band) {
2708         AVFrame *src;
2709         uint8_t *src_ptr[3];
2710         int offset[4];
2711         
2712         if(s->picture_structure != PICT_FRAME){
2713             h <<= 1;
2714             y <<= 1;
2715             if(s->first_field  && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2716         }
2717
2718         h= FFMIN(h, s->height - y);
2719
2720         if(s->pict_type==B_TYPE || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER)) 
2721             src= (AVFrame*)s->current_picture_ptr;
2722         else if(s->last_picture_ptr)
2723             src= (AVFrame*)s->last_picture_ptr;
2724         else
2725             return;
2726             
2727         if(s->pict_type==B_TYPE && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2728             offset[0]=
2729             offset[1]=
2730             offset[2]=
2731             offset[3]= 0;
2732         }else{
2733             offset[0]= y * s->linesize;;
2734             offset[1]= 
2735             offset[2]= (y>>1) * s->uvlinesize;;
2736             offset[3]= 0;
2737         }
2738
2739         emms_c();
2740
2741         s->avctx->draw_horiz_band(s->avctx, src, offset,
2742                                   y, s->picture_structure, h);
2743     }
2744 }
2745
2746 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2747     const int linesize= s->current_picture.linesize[0]; //not s->linesize as this woulnd be wrong for field pics
2748     const int uvlinesize= s->current_picture.linesize[1];
2749         
2750     s->block_index[0]= s->block_wrap[0]*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2751     s->block_index[1]= s->block_wrap[0]*(s->mb_y*2 + 1)     + s->mb_x*2;
2752     s->block_index[2]= s->block_wrap[0]*(s->mb_y*2 + 2) - 1 + s->mb_x*2;
2753     s->block_index[3]= s->block_wrap[0]*(s->mb_y*2 + 2)     + s->mb_x*2;
2754     s->block_index[4]= s->block_wrap[4]*(s->mb_y + 1)                    + s->block_wrap[0]*(s->mb_height*2 + 2) + s->mb_x;
2755     s->block_index[5]= s->block_wrap[4]*(s->mb_y + 1 + s->mb_height + 2) + s->block_wrap[0]*(s->mb_height*2 + 2) + s->mb_x;
2756     
2757     if(s->pict_type==B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME){
2758         s->dest[0] = s->current_picture.data[0] + s->mb_x * 16 - 16;
2759         s->dest[1] = s->current_picture.data[1] + s->mb_x * 8 - 8;
2760         s->dest[2] = s->current_picture.data[2] + s->mb_x * 8 - 8;
2761     }else{
2762         s->dest[0] = s->current_picture.data[0] + (s->mb_y * 16* linesize  ) + s->mb_x * 16 - 16;
2763         s->dest[1] = s->current_picture.data[1] + (s->mb_y * 8 * uvlinesize) + s->mb_x * 8 - 8;
2764         s->dest[2] = s->current_picture.data[2] + (s->mb_y * 8 * uvlinesize) + s->mb_x * 8 - 8;
2765     }    
2766 }
2767
2768 #ifdef CONFIG_ENCODERS
2769
2770 static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2771 {
2772     const int mb_x= s->mb_x;
2773     const int mb_y= s->mb_y;
2774     int i;
2775     int skip_dct[6];
2776     int dct_offset   = s->linesize*8; //default for progressive frames
2777     
2778     for(i=0; i<6; i++) skip_dct[i]=0;
2779     
2780     if(s->adaptive_quant){
2781         s->dquant= s->current_picture.qscale_table[mb_x + mb_y*s->mb_stride] - s->qscale;
2782
2783         if(s->out_format==FMT_H263){
2784             if     (s->dquant> 2) s->dquant= 2;
2785             else if(s->dquant<-2) s->dquant=-2;
2786         }
2787             
2788         if(s->codec_id==CODEC_ID_MPEG4){        
2789             if(!s->mb_intra){
2790                 if(s->mv_dir&MV_DIRECT)
2791                     s->dquant=0;
2792
2793                 assert(s->dquant==0 || s->mv_type!=MV_TYPE_8X8);
2794             }
2795         }
2796         s->qscale+= s->dquant;
2797         s->y_dc_scale= s->y_dc_scale_table[ s->qscale ];
2798         s->c_dc_scale= s->c_dc_scale_table[ s->qscale ];
2799     }
2800
2801     if (s->mb_intra) {
2802         uint8_t *ptr;
2803         int wrap_y;
2804         int emu=0;
2805
2806         wrap_y = s->linesize;
2807         ptr = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
2808
2809         if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
2810             ff_emulated_edge_mc(s->edge_emu_buffer, ptr, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
2811             ptr= s->edge_emu_buffer;
2812             emu=1;
2813         }
2814         
2815         if(s->flags&CODEC_FLAG_INTERLACED_DCT){
2816             int progressive_score, interlaced_score;
2817             
2818             progressive_score= pix_vcmp16x8(ptr, wrap_y  ) + pix_vcmp16x8(ptr + wrap_y*8, wrap_y );
2819             interlaced_score = pix_vcmp16x8(ptr, wrap_y*2) + pix_vcmp16x8(ptr + wrap_y  , wrap_y*2);
2820             
2821             if(progressive_score > interlaced_score + 100){
2822                 s->interlaced_dct=1;
2823             
2824                 dct_offset= wrap_y;
2825                 wrap_y<<=1;
2826             }else
2827                 s->interlaced_dct=0;
2828         }
2829         
2830         s->dsp.get_pixels(s->block[0], ptr                 , wrap_y);
2831         s->dsp.get_pixels(s->block[1], ptr              + 8, wrap_y);
2832         s->dsp.get_pixels(s->block[2], ptr + dct_offset    , wrap_y);
2833         s->dsp.get_pixels(s->block[3], ptr + dct_offset + 8, wrap_y);
2834
2835         if(s->flags&CODEC_FLAG_GRAY){
2836             skip_dct[4]= 1;
2837             skip_dct[5]= 1;
2838         }else{
2839             int wrap_c = s->uvlinesize;
2840             ptr = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8;
2841             if(emu){
2842                 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2843                 ptr= s->edge_emu_buffer;
2844             }
2845             s->dsp.get_pixels(s->block[4], ptr, wrap_c);
2846
2847             ptr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8;
2848             if(emu){
2849                 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2850                 ptr= s->edge_emu_buffer;
2851             }
2852             s->dsp.get_pixels(s->block[5], ptr, wrap_c);
2853         }
2854     }else{
2855         op_pixels_func (*op_pix)[4];
2856         qpel_mc_func (*op_qpix)[16];
2857         uint8_t *dest_y, *dest_cb, *dest_cr;
2858         uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2859         int wrap_y, wrap_c;
2860         int emu=0;
2861
2862         dest_y  = s->dest[0];
2863         dest_cb = s->dest[1];
2864         dest_cr = s->dest[2];
2865         wrap_y = s->linesize;
2866         wrap_c = s->uvlinesize;
2867         ptr_y  = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
2868         ptr_cb = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8;
2869         ptr_cr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8;
2870
2871         if ((!s->no_rounding) || s->pict_type==B_TYPE){
2872             op_pix = s->dsp.put_pixels_tab;
2873             op_qpix= s->dsp.put_qpel_pixels_tab;
2874         }else{
2875             op_pix = s->dsp.put_no_rnd_pixels_tab;
2876             op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab;
2877         }
2878
2879         if (s->mv_dir & MV_DIR_FORWARD) {
2880             MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
2881             op_pix = s->dsp.avg_pixels_tab;
2882             op_qpix= s->dsp.avg_qpel_pixels_tab;
2883         }
2884         if (s->mv_dir & MV_DIR_BACKWARD) {
2885             MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
2886         }
2887
2888         if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
2889             ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
2890             ptr_y= s->edge_emu_buffer;
2891             emu=1;
2892         }
2893         
2894         if(s->flags&CODEC_FLAG_INTERLACED_DCT){
2895             int progressive_score, interlaced_score;
2896             
2897             progressive_score= pix_diff_vcmp16x8(ptr_y           , dest_y           , wrap_y  ) 
2898                              + pix_diff_vcmp16x8(ptr_y + wrap_y*8, dest_y + wrap_y*8, wrap_y  );
2899             interlaced_score = pix_diff_vcmp16x8(ptr_y           , dest_y           , wrap_y*2)
2900                              + pix_diff_vcmp16x8(ptr_y + wrap_y  , dest_y + wrap_y  , wrap_y*2);
2901             
2902             if(progressive_score > interlaced_score + 600){
2903                 s->interlaced_dct=1;
2904             
2905                 dct_offset= wrap_y;
2906                 wrap_y<<=1;
2907             }else
2908                 s->interlaced_dct=0;
2909         }
2910         
2911         s->dsp.diff_pixels(s->block[0], ptr_y                 , dest_y                 , wrap_y);
2912         s->dsp.diff_pixels(s->block[1], ptr_y              + 8, dest_y              + 8, wrap_y);
2913         s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset    , dest_y + dct_offset    , wrap_y);
2914         s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8, dest_y + dct_offset + 8, wrap_y);
2915         
2916         if(s->flags&CODEC_FLAG_GRAY){
2917             skip_dct[4]= 1;
2918             skip_dct[5]= 1;
2919         }else{
2920             if(emu){
2921                 ff_emulated_edge_mc(s->edge_emu_buffer, ptr_cb, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2922                 ptr_cb= s->edge_emu_buffer;
2923             }
2924             s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2925             if(emu){
2926                 ff_emulated_edge_mc(s->edge_emu_buffer, ptr_cr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2927                 ptr_cr= s->edge_emu_buffer;
2928             }
2929             s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2930         }
2931         /* pre quantization */         
2932         if(s->current_picture.mc_mb_var[s->mb_stride*mb_y+ mb_x]<2*s->qscale*s->qscale){
2933             //FIXME optimize
2934             if(s->dsp.pix_abs8x8(ptr_y               , dest_y               , wrap_y) < 20*s->qscale) skip_dct[0]= 1;
2935             if(s->dsp.pix_abs8x8(ptr_y            + 8, dest_y            + 8, wrap_y) < 20*s->qscale) skip_dct[1]= 1;
2936             if(s->dsp.pix_abs8x8(ptr_y +dct_offset   , dest_y +dct_offset   , wrap_y) < 20*s->qscale) skip_dct[2]= 1;
2937             if(s->dsp.pix_abs8x8(ptr_y +dct_offset+ 8, dest_y +dct_offset+ 8, wrap_y) < 20*s->qscale) skip_dct[3]= 1;
2938             if(s->dsp.pix_abs8x8(ptr_cb              , dest_cb              , wrap_c) < 20*s->qscale) skip_dct[4]= 1;
2939             if(s->dsp.pix_abs8x8(ptr_cr              , dest_cr              , wrap_c) < 20*s->qscale) skip_dct[5]= 1;
2940 #if 0
2941 {
2942  static int stat[7];
2943  int num=0;
2944  for(i=0; i<6; i++)
2945   if(skip_dct[i]) num++;
2946  stat[num]++;
2947  
2948  if(s->mb_x==0 && s->mb_y==0){
2949   for(i=0; i<7; i++){
2950    printf("%6d %1d\n", stat[i], i);
2951   }
2952  }
2953 }
2954 #endif
2955         }
2956
2957     }
2958             
2959 #if 0
2960             {
2961                 float adap_parm;
2962                 
2963                 adap_parm = ((s->avg_mb_var << 1) + s->mb_var[s->mb_stride*mb_y+mb_x] + 1.0) /
2964                             ((s->mb_var[s->mb_stride*mb_y+mb_x] << 1) + s->avg_mb_var + 1.0);
2965             
2966                 printf("\ntype=%c qscale=%2d adap=%0.2f dquant=%4.2f var=%4d avgvar=%4d", 
2967                         (s->mb_type[s->mb_stride*mb_y+mb_x] > 0) ? 'I' : 'P', 
2968                         s->qscale, adap_parm, s->qscale*adap_parm,
2969                         s->mb_var[s->mb_stride*mb_y+mb_x], s->avg_mb_var);
2970             }
2971 #endif
2972     /* DCT & quantize */
2973     if(s->out_format==FMT_MJPEG){
2974         for(i=0;i<6;i++) {
2975             int overflow;
2976             s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, 8, &overflow);
2977             if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
2978         }
2979     }else{
2980         for(i=0;i<6;i++) {
2981             if(!skip_dct[i]){
2982                 int overflow;
2983                 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2984             // FIXME we could decide to change to quantizer instead of clipping
2985             // JS: I don't think that would be a good idea it could lower quality instead
2986             //     of improve it. Just INTRADC clipping deserves changes in quantizer
2987                 if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
2988             }else
2989                 s->block_last_index[i]= -1;
2990         }
2991         if(s->luma_elim_threshold && !s->mb_intra)
2992             for(i=0; i<4; i++)
2993                 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2994         if(s->chroma_elim_threshold && !s->mb_intra)
2995             for(i=4; i<6; i++)
2996                 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2997     }
2998
2999     if((s->flags&CODEC_FLAG_GRAY) && s->mb_intra){
3000         s->block_last_index[4]=
3001         s->block_last_index[5]= 0;
3002         s->block[4][0]=
3003         s->block[5][0]= (1024 + s->c_dc_scale/2)/ s->c_dc_scale;
3004     }
3005
3006     /* huffman encode */
3007     switch(s->codec_id){ //FIXME funct ptr could be slightly faster
3008     case CODEC_ID_MPEG1VIDEO:
3009         mpeg1_encode_mb(s, s->block, motion_x, motion_y); break;
3010 #ifdef CONFIG_RISKY
3011     case CODEC_ID_MPEG4:
3012         mpeg4_encode_mb(s, s->block, motion_x, motion_y); break;
3013     case CODEC_ID_MSMPEG4V2:
3014     case CODEC_ID_MSMPEG4V3:
3015     case CODEC_ID_WMV1:
3016         msmpeg4_encode_mb(s, s->block, motion_x, motion_y); break;
3017     case CODEC_ID_WMV2:
3018          ff_wmv2_encode_mb(s, s->block, motion_x, motion_y); break;
3019     case CODEC_ID_H263:
3020     case CODEC_ID_H263P:
3021     case CODEC_ID_FLV1:
3022     case CODEC_ID_RV10:
3023         h263_encode_mb(s, s->block, motion_x, motion_y); break;
3024 #endif
3025     case CODEC_ID_MJPEG:
3026         mjpeg_encode_mb(s, s->block); break;
3027     default:
3028         assert(0);
3029     }
3030 }
3031
3032 #endif //CONFIG_ENCODERS
3033
3034 /**
3035  * combines the (truncated) bitstream to a complete frame
3036  * @returns -1 if no complete frame could be created
3037  */
3038 int ff_combine_frame( MpegEncContext *s, int next, uint8_t **buf, int *buf_size){
3039     ParseContext *pc= &s->parse_context;
3040
3041 #if 0
3042     if(pc->overread){
3043         printf("overread %d, state:%X next:%d index:%d o_index:%d\n", pc->overread, pc->state, next, pc->index, pc->overread_index);
3044         printf("%X %X %X %X\n", (*buf)[0], (*buf)[1],(*buf)[2],(*buf)[3]);
3045     }
3046 #endif
3047
3048     /* copy overreaded byes from last frame into buffer */
3049     for(; pc->overread>0; pc->overread--){
3050         pc->buffer[pc->index++]= pc->buffer[pc->overread_index++];
3051     }
3052     
3053     pc->last_index= pc->index;
3054
3055     /* copy into buffer end return */
3056     if(next == END_NOT_FOUND){
3057         pc->buffer= av_fast_realloc(pc->buffer, &pc->buffer_size, (*buf_size) + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
3058
3059         memcpy(&pc->buffer[pc->index], *buf, *buf_size);
3060         pc->index += *buf_size;
3061         return -1;
3062     }
3063
3064     *buf_size=
3065     pc->overread_index= pc->index + next;
3066     
3067     /* append to buffer */
3068     if(pc->index){
3069         pc->buffer= av_fast_realloc(pc->buffer, &pc->buffer_size, next + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
3070
3071         memcpy(&pc->buffer[pc->index], *buf, next + FF_INPUT_BUFFER_PADDING_SIZE );
3072         pc->index = 0;
3073         *buf= pc->buffer;
3074     }
3075
3076     /* store overread bytes */
3077     for(;next < 0; next++){
3078         pc->state = (pc->state<<8) | pc->buffer[pc->last_index + next];
3079         pc->overread++;
3080     }
3081
3082 #if 0
3083     if(pc->overread){
3084         printf("overread %d, state:%X next:%d index:%d o_index:%d\n", pc->overread, pc->state, next, pc->index, pc->overread_index);
3085         printf("%X %X %X %X\n", (*buf)[0], (*buf)[1],(*buf)[2],(*buf)[3]);
3086     }
3087 #endif
3088
3089     return 0;
3090 }
3091
3092 void ff_mpeg_flush(AVCodecContext *avctx){
3093     int i;
3094     MpegEncContext *s = avctx->priv_data;
3095     
3096     for(i=0; i<MAX_PICTURE_COUNT; i++){
3097        if(s->picture[i].data[0] && (   s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
3098                                     || s->picture[i].type == FF_BUFFER_TYPE_USER))
3099         avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
3100     }
3101     s->last_picture_ptr = s->next_picture_ptr = NULL;
3102     
3103     s->parse_context.state= -1;
3104     s->parse_context.frame_start_found= 0;
3105     s->parse_context.overread= 0;
3106     s->parse_context.overread_index= 0;
3107     s->parse_context.index= 0;
3108     s->parse_context.last_index= 0;
3109 }
3110
3111 #ifdef CONFIG_ENCODERS
3112 void ff_copy_bits(PutBitContext *pb, uint8_t *src, int length)
3113 {
3114     int bytes= length>>4;
3115     int bits= length&15;
3116     int i;
3117
3118     if(length==0) return;
3119
3120     for(i=0; i<bytes; i++) put_bits(pb, 16, be2me_16(((uint16_t*)src)[i]));
3121     put_bits(pb, bits, be2me_16(((uint16_t*)src)[i])>>(16-bits));
3122 }
3123
3124 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
3125     int i;
3126
3127     memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop?
3128
3129     /* mpeg1 */
3130     d->mb_skip_run= s->mb_skip_run;
3131     for(i=0; i<3; i++)
3132         d->last_dc[i]= s->last_dc[i];
3133     
3134     /* statistics */
3135     d->mv_bits= s->mv_bits;
3136     d->i_tex_bits= s->i_tex_bits;
3137     d->p_tex_bits= s->p_tex_bits;
3138     d->i_count= s->i_count;
3139     d->f_count= s->f_count;
3140     d->b_count= s->b_count;
3141     d->skip_count= s->skip_count;
3142     d->misc_bits= s->misc_bits;
3143     d->last_bits= 0;
3144
3145     d->mb_skiped= 0;
3146     d->qscale= s->qscale;
3147 }
3148
3149 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
3150     int i;
3151
3152     memcpy(d->mv, s->mv, 2*4*2*sizeof(int)); 
3153     memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop?
3154     
3155     /* mpeg1 */
3156     d->mb_skip_run= s->mb_skip_run;
3157     for(i=0; i<3; i++)
3158         d->last_dc[i]= s->last_dc[i];
3159     
3160     /* statistics */
3161     d->mv_bits= s->mv_bits;
3162     d->i_tex_bits= s->i_tex_bits;
3163     d->p_tex_bits= s->p_tex_bits;
3164     d->i_count= s->i_count;
3165     d->f_count= s->f_count;
3166     d->b_count= s->b_count;
3167     d->skip_count= s->skip_count;
3168     d->misc_bits= s->misc_bits;
3169
3170     d->mb_intra= s->mb_intra;
3171     d->mb_skiped= s->mb_skiped;
3172     d->mv_type= s->mv_type;
3173     d->mv_dir= s->mv_dir;
3174     d->pb= s->pb;
3175     if(s->data_partitioning){
3176         d->pb2= s->pb2;
3177         d->tex_pb= s->tex_pb;
3178     }
3179     d->block= s->block;
3180     for(i=0; i<6; i++)
3181         d->block_last_index[i]= s->block_last_index[i];
3182     d->interlaced_dct= s->interlaced_dct;
3183     d->qscale= s->qscale;
3184 }
3185
3186 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type, 
3187                            PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
3188                            int *dmin, int *next_block, int motion_x, int motion_y)
3189 {
3190     int score;
3191     uint8_t *dest_backup[3];
3192     
3193     copy_context_before_encode(s, backup, type);
3194
3195     s->block= s->blocks[*next_block];
3196     s->pb= pb[*next_block];
3197     if(s->data_partitioning){
3198         s->pb2   = pb2   [*next_block];
3199         s->tex_pb= tex_pb[*next_block];
3200     }
3201     
3202     if(*next_block){
3203         memcpy(dest_backup, s->dest, sizeof(s->dest));
3204         s->dest[0] = s->me.scratchpad;
3205         s->dest[1] = s->me.scratchpad + 16;
3206         s->dest[2] = s->me.scratchpad + 16 + 8;
3207         assert(2*s->uvlinesize == s->linesize); //should be no prob for encoding
3208         assert(s->linesize >= 64); //FIXME
3209     }
3210
3211     encode_mb(s, motion_x, motion_y);
3212     
3213     score= get_bit_count(&s->pb);
3214     if(s->data_partitioning){
3215         score+= get_bit_count(&s->pb2);
3216         score+= get_bit_count(&s->tex_pb);
3217     }
3218    
3219     if(s->avctx->mb_decision == FF_MB_DECISION_RD){
3220         MPV_decode_mb(s, s->block);
3221
3222         score *= s->qscale * s->qscale * 109;
3223         score += sse_mb(s) << 7;
3224     }
3225     
3226     if(*next_block){
3227         memcpy(s->dest, dest_backup, sizeof(s->dest));
3228     }
3229
3230     if(score<*dmin){
3231         *dmin= score;
3232         *next_block^=1;
3233
3234         copy_context_after_encode(best, s, type);
3235     }
3236 }
3237                 
3238 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
3239     uint32_t *sq = squareTbl + 256;
3240     int acc=0;
3241     int x,y;
3242     
3243     if(w==16 && h==16) 
3244         return s->dsp.sse[0](NULL, src1, src2, stride);
3245     else if(w==8 && h==8)
3246         return s->dsp.sse[1](NULL, src1, src2, stride);
3247     
3248     for(y=0; y<h; y++){
3249         for(x=0; x<w; x++){
3250             acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
3251         } 
3252     }
3253     
3254     assert(acc>=0);
3255     
3256     return acc;
3257 }
3258
3259 static int sse_mb(MpegEncContext *s){
3260     int w= 16;
3261     int h= 16;
3262
3263     if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3264     if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3265
3266     if(w==16 && h==16)
3267         return  s->dsp.sse[0](NULL, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize)
3268                +s->dsp.sse[1](NULL, s->new_picture.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize)
3269                +s->dsp.sse[1](NULL, s->new_picture.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize);
3270     else
3271         return  sse(s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
3272                +sse(s, s->new_picture.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
3273                +sse(s, s->new_picture.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
3274 }
3275
3276 static void encode_picture(MpegEncContext *s, int picture_number)
3277 {
3278     int mb_x, mb_y, pdif = 0;
3279     int i;
3280     int bits;
3281     MpegEncContext best_s, backup_s;
3282     uint8_t bit_buf[2][3000];
3283     uint8_t bit_buf2[2][3000];
3284     uint8_t bit_buf_tex[2][3000];
3285     PutBitContext pb[2], pb2[2], tex_pb[2];
3286
3287     for(i=0; i<2; i++){
3288         init_put_bits(&pb    [i], bit_buf    [i], 3000, NULL, NULL);
3289         init_put_bits(&pb2   [i], bit_buf2   [i], 3000, NULL, NULL);
3290         init_put_bits(&tex_pb[i], bit_buf_tex[i], 3000, NULL, NULL);
3291     }
3292
3293     s->picture_number = picture_number;
3294     
3295     /* Reset the average MB variance */
3296     s->current_picture.mb_var_sum = 0;
3297     s->current_picture.mc_mb_var_sum = 0;
3298
3299 #ifdef CONFIG_RISKY
3300     /* we need to initialize some time vars before we can encode b-frames */
3301     // RAL: Condition added for MPEG1VIDEO
3302     if (s->codec_id == CODEC_ID_MPEG1VIDEO || (s->h263_pred && !s->h263_msmpeg4))
3303         ff_set_mpeg4_time(s, s->picture_number); 
3304 #endif
3305         
3306     s->scene_change_score=0;
3307     
3308     s->qscale= (int)(s->frame_qscale + 0.5); //FIXME qscale / ... stuff for ME ratedistoration
3309     
3310     if(s->pict_type==I_TYPE){
3311         if(s->msmpeg4_version >= 3) s->no_rounding=1;
3312         else                        s->no_rounding=0;
3313     }else if(s->pict_type!=B_TYPE){
3314         if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4)
3315             s->no_rounding ^= 1;          
3316     }
3317     
3318     /* Estimate motion for every MB */
3319     s->mb_intra=0; //for the rate distoration & bit compare functions
3320     if(s->pict_type != I_TYPE){
3321         if(s->pict_type != B_TYPE){
3322             if((s->avctx->pre_me && s->last_non_b_pict_type==I_TYPE) || s->avctx->pre_me==2){
3323                 s->me.pre_pass=1;
3324                 s->me.dia_size= s->avctx->pre_dia_size;
3325
3326                 for(mb_y=s->mb_height-1; mb_y >=0 ; mb_y--) {
3327                     for(mb_x=s->mb_width-1; mb_x >=0 ; mb_x--) {
3328                         s->mb_x = mb_x;
3329                         s->mb_y = mb_y;
3330                         ff_pre_estimate_p_frame_motion(s, mb_x, mb_y);
3331                     }
3332                 }
3333                 s->me.pre_pass=0;
3334             }
3335         }
3336
3337         s->me.dia_size= s->avctx->dia_size;
3338         for(mb_y=0; mb_y < s->mb_height; mb_y++) {
3339             s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1;
3340             s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1);
3341             s->block_index[2]= s->block_wrap[0]*(mb_y*2 + 2) - 1;
3342             s->block_index[3]= s->block_wrap[0]*(mb_y*2 + 2);
3343             for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3344                 s->mb_x = mb_x;
3345                 s->mb_y = mb_y;
3346                 s->block_index[0]+=2;
3347                 s->block_index[1]+=2;
3348                 s->block_index[2]+=2;
3349                 s->block_index[3]+=2;
3350                 
3351                 /* compute motion vector & mb_type and store in context */
3352                 if(s->pict_type==B_TYPE)
3353                     ff_estimate_b_frame_motion(s, mb_x, mb_y);
3354                 else
3355                     ff_estimate_p_frame_motion(s, mb_x, mb_y);
3356             }
3357         }
3358     }else /* if(s->pict_type == I_TYPE) */{
3359         /* I-Frame */
3360         //FIXME do we need to zero them?
3361         memset(s->motion_val[0], 0, sizeof(int16_t)*(s->mb_width*2 + 2)*(s->mb_height*2 + 2)*2);
3362         memset(s->p_mv_table   , 0, sizeof(int16_t)*(s->mb_stride)*s->mb_height*2);
3363         memset(s->mb_type      , MB_TYPE_INTRA, sizeof(uint8_t)*s->mb_stride*s->mb_height);
3364         
3365         if(!s->fixed_qscale){
3366             /* finding spatial complexity for I-frame rate control */
3367             for(mb_y=0; mb_y < s->mb_height; mb_y++) {
3368                 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3369                     int xx = mb_x * 16;
3370                     int yy = mb_y * 16;
3371                     uint8_t *pix = s->new_picture.data[0] + (yy * s->linesize) + xx;
3372                     int varc;
3373                     int sum = s->dsp.pix_sum(pix, s->linesize);
3374     
3375                     varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)(sum*sum))>>8) + 500 + 128)>>8;
3376
3377                     s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
3378                     s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
3379                     s->current_picture.mb_var_sum    += varc;
3380                 }
3381             }
3382         }
3383     }
3384     emms_c();
3385
3386     if(s->scene_change_score > 0 && s->pict_type == P_TYPE){
3387         s->pict_type= I_TYPE;
3388         memset(s->mb_type   , MB_TYPE_INTRA, sizeof(uint8_t)*s->mb_stride*s->mb_height);
3389 //printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3390     }
3391
3392     if(!s->umvplus){
3393         if(s->pict_type==P_TYPE || s->pict_type==S_TYPE) {
3394             s->f_code= ff_get_best_fcode(s, s->p_mv_table, MB_TYPE_INTER);
3395         
3396             ff_fix_long_p_mvs(s);
3397         }
3398
3399         if(s->pict_type==B_TYPE){
3400             int a, b;
3401
3402             a = ff_get_best_fcode(s, s->b_forw_mv_table, MB_TYPE_FORWARD);
3403             b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, MB_TYPE_BIDIR);
3404             s->f_code = FFMAX(a, b);
3405
3406             a = ff_get_best_fcode(s, s->b_back_mv_table, MB_TYPE_BACKWARD);
3407             b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, MB_TYPE_BIDIR);
3408             s->b_code = FFMAX(a, b);
3409
3410             ff_fix_long_b_mvs(s, s->b_forw_mv_table, s->f_code, MB_TYPE_FORWARD);
3411             ff_fix_long_b_mvs(s, s->b_back_mv_table, s->b_code, MB_TYPE_BACKWARD);
3412             ff_fix_long_b_mvs(s, s->b_bidir_forw_mv_table, s->f_code, MB_TYPE_BIDIR);
3413             ff_fix_long_b_mvs(s, s->b_bidir_back_mv_table, s->b_code, MB_TYPE_BIDIR);
3414         }
3415     }
3416     
3417     if (s->fixed_qscale) 
3418         s->frame_qscale = s->current_picture.quality;
3419     else
3420         s->frame_qscale = ff_rate_estimate_qscale(s);
3421
3422     if(s->adaptive_quant){
3423 #ifdef CONFIG_RISKY
3424         switch(s->codec_id){
3425         case CODEC_ID_MPEG4:
3426             ff_clean_mpeg4_qscales(s);
3427             break;
3428         case CODEC_ID_H263:
3429         case CODEC_ID_H263P:
3430         case CODEC_ID_FLV1:
3431             ff_clean_h263_qscales(s);
3432             break;
3433         }
3434 #endif
3435
3436         s->qscale= s->current_picture.qscale_table[0];
3437     }else
3438         s->qscale= (int)(s->frame_qscale + 0.5);
3439         
3440     if (s->out_format == FMT_MJPEG) {
3441         /* for mjpeg, we do include qscale in the matrix */
3442         s->intra_matrix[0] = ff_mpeg1_default_intra_matrix[0];
3443         for(i=1;i<64;i++){
3444             int j= s->dsp.idct_permutation[i];
3445
3446             s->intra_matrix[j] = CLAMP_TO_8BIT((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3447         }
3448         convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16, 
3449                        s->q_intra_matrix16_bias, s->intra_matrix, s->intra_quant_bias, 8, 8);
3450     }
3451     
3452     //FIXME var duplication
3453     s->current_picture.key_frame= s->pict_type == I_TYPE;
3454     s->current_picture.pict_type= s->pict_type;
3455
3456     if(s->current_picture.key_frame)
3457         s->picture_in_gop_number=0;
3458
3459     s->last_bits= get_bit_count(&s->pb);
3460     switch(s->out_format) {
3461     case FMT_MJPEG:
3462         mjpeg_picture_header(s);
3463         break;
3464 #ifdef CONFIG_RISKY
3465     case FMT_H263:
3466         if (s->codec_id == CODEC_ID_WMV2) 
3467             ff_wmv2_encode_picture_header(s, picture_number);
3468         else if (s->h263_msmpeg4) 
3469             msmpeg4_encode_picture_header(s, picture_number);
3470         else if (s->h263_pred)
3471             mpeg4_encode_picture_header(s, picture_number);
3472         else if (s->h263_rv10) 
3473             rv10_encode_picture_header(s, picture_number);
3474         else if (s->codec_id == CODEC_ID_FLV1)
3475             ff_flv_encode_picture_header(s, picture_number);
3476         else
3477             h263_encode_picture_header(s, picture_number);
3478         break;
3479 #endif
3480     case FMT_MPEG1:
3481         mpeg1_encode_picture_header(s, picture_number);
3482         break;
3483     }
3484     bits= get_bit_count(&s->pb);
3485     s->header_bits= bits - s->last_bits;
3486     s->last_bits= bits;
3487     s->mv_bits=0;
3488     s->misc_bits=0;
3489     s->i_tex_bits=0;
3490     s->p_tex_bits=0;
3491     s->i_count=0;
3492     s->f_count=0;
3493     s->b_count=0;
3494     s->skip_count=0;
3495
3496     for(i=0; i<3; i++){
3497         /* init last dc values */
3498         /* note: quant matrix value (8) is implied here */
3499         s->last_dc[i] = 128;
3500         
3501         s->current_picture_ptr->error[i] = 0;
3502     }
3503     s->mb_skip_run = 0;
3504     s->last_mv[0][0][0] = 0;
3505     s->last_mv[0][0][1] = 0;
3506     s->last_mv[1][0][0] = 0;
3507     s->last_mv[1][0][1] = 0;
3508      
3509     s->last_mv_dir = 0;
3510
3511 #ifdef CONFIG_RISKY
3512     switch(s->codec_id){
3513     case CODEC_ID_H263:
3514     case CODEC_ID_H263P:
3515     case CODEC_ID_FLV1:
3516         s->gob_index = ff_h263_get_gob_height(s);
3517         break;
3518     case CODEC_ID_MPEG4:
3519         if(s->partitioned_frame)
3520             ff_mpeg4_init_partitions(s);
3521         break;
3522     }
3523 #endif
3524
3525     s->resync_mb_x=0;
3526     s->resync_mb_y=0;
3527     s->first_slice_line = 1;
3528     s->ptr_lastgob = s->pb.buf;
3529     for(mb_y=0; mb_y < s->mb_height; mb_y++) {
3530         s->mb_x=0;
3531         s->mb_y= mb_y;
3532
3533         s->y_dc_scale= s->y_dc_scale_table[ s->qscale ];
3534         s->c_dc_scale= s->c_dc_scale_table[ s->qscale ];
3535         ff_init_block_index(s);
3536         
3537         for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3538             const int xy= mb_y*s->mb_stride + mb_x;
3539             int mb_type= s->mb_type[xy];
3540 //            int d;
3541             int dmin= INT_MAX;
3542
3543             s->mb_x = mb_x;
3544             ff_update_block_index(s);
3545
3546             /* write gob / video packet header  */
3547 #ifdef CONFIG_RISKY
3548             if(s->rtp_mode){
3549                 int current_packet_size, is_gob_start;
3550                 
3551                 current_packet_size= pbBufPtr(&s->pb) - s->ptr_lastgob;
3552                 is_gob_start=0;
3553                 
3554                 if(s->codec_id==CODEC_ID_MPEG4){
3555                     if(current_packet_size >= s->rtp_payload_size
3556                        && s->mb_y + s->mb_x>0){
3557
3558                         if(s->partitioned_frame){
3559                             ff_mpeg4_merge_partitions(s);
3560                             ff_mpeg4_init_partitions(s);
3561                         }
3562                         ff_mpeg4_encode_video_packet_header(s);
3563
3564                         if(s->flags&CODEC_FLAG_PASS1){
3565                             int bits= get_bit_count(&s->pb);
3566                             s->misc_bits+= bits - s->last_bits;
3567                             s->last_bits= bits;
3568                         }
3569                         ff_mpeg4_clean_buffers(s);
3570                         is_gob_start=1;
3571                     }
3572                 }else if(s->codec_id==CODEC_ID_MPEG1VIDEO){
3573                     if(   current_packet_size >= s->rtp_payload_size 
3574                        && s->mb_y + s->mb_x>0 && s->mb_skip_run==0){
3575                         ff_mpeg1_encode_slice_header(s);
3576                         ff_mpeg1_clean_buffers(s);
3577                         is_gob_start=1;
3578                     }
3579                 }else{
3580                     if(current_packet_size >= s->rtp_payload_size
3581                        && s->mb_x==0 && s->mb_y>0 && s->mb_y%s->gob_index==0){
3582                        
3583                         h263_encode_gob_header(s, mb_y);                       
3584                         is_gob_start=1;
3585                     }
3586                 }
3587
3588                 if(is_gob_start){
3589                     s->ptr_lastgob = pbBufPtr(&s->pb);
3590                     s->first_slice_line=1;
3591                     s->resync_mb_x=mb_x;
3592                     s->resync_mb_y=mb_y;
3593                 }
3594             }
3595 #endif
3596
3597             if(  (s->resync_mb_x   == s->mb_x)
3598                && s->resync_mb_y+1 == s->mb_y){
3599                 s->first_slice_line=0; 
3600             }
3601
3602             s->mb_skiped=0;
3603
3604             if(mb_type & (mb_type-1)){ // more than 1 MB type possible
3605                 int next_block=0;
3606                 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3607
3608                 copy_context_before_encode(&backup_s, s, -1);
3609                 backup_s.pb= s->pb;
3610                 best_s.data_partitioning= s->data_partitioning;
3611                 best_s.partitioned_frame= s->partitioned_frame;
3612                 if(s->data_partitioning){
3613                     backup_s.pb2= s->pb2;
3614                     backup_s.tex_pb= s->tex_pb;
3615                 }
3616
3617                 if(mb_type&MB_TYPE_INTER){
3618                     s->mv_dir = MV_DIR_FORWARD;
3619                     s->mv_type = MV_TYPE_16X16;
3620                     s->mb_intra= 0;
3621                     s->mv[0][0][0] = s->p_mv_table[xy][0];
3622                     s->mv[0][0][1] = s->p_mv_table[xy][1];
3623                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER, pb, pb2, tex_pb, 
3624                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3625                 }
3626                 if(mb_type&MB_TYPE_INTER4V){                 
3627                     s->mv_dir = MV_DIR_FORWARD;
3628                     s->mv_type = MV_TYPE_8X8;
3629                     s->mb_intra= 0;
3630                     for(i=0; i<4; i++){
3631                         s->mv[0][i][0] = s->motion_val[s->block_index[i]][0];
3632                         s->mv[0][i][1] = s->motion_val[s->block_index[i]][1];
3633                     }
3634                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER4V, pb, pb2, tex_pb, 
3635                                  &dmin, &next_block, 0, 0);
3636                 }
3637                 if(mb_type&MB_TYPE_FORWARD){
3638                     s->mv_dir = MV_DIR_FORWARD;
3639                     s->mv_type = MV_TYPE_16X16;
3640                     s->mb_intra= 0;
3641                     s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3642                     s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3643                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_FORWARD, pb, pb2, tex_pb, 
3644                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3645                 }
3646                 if(mb_type&MB_TYPE_BACKWARD){
3647                     s->mv_dir = MV_DIR_BACKWARD;
3648                     s->mv_type = MV_TYPE_16X16;
3649                     s->mb_intra= 0;
3650                     s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3651                     s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3652                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BACKWARD, pb, pb2, tex_pb, 
3653                                  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3654                 }
3655                 if(mb_type&MB_TYPE_BIDIR){
3656                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3657                     s->mv_type = MV_TYPE_16X16;
3658                     s->mb_intra= 0;
3659                     s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3660                     s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3661                     s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3662                     s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3663                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BIDIR, pb, pb2, tex_pb, 
3664                                  &dmin, &next_block, 0, 0);
3665                 }
3666                 if(mb_type&MB_TYPE_DIRECT){
3667                     int mx= s->b_direct_mv_table[xy][0];
3668                     int my= s->b_direct_mv_table[xy][1];
3669                     
3670                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3671                     s->mb_intra= 0;
3672 #ifdef CONFIG_RISKY
3673                     ff_mpeg4_set_direct_mv(s, mx, my);
3674 #endif
3675                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_DIRECT, pb, pb2, tex_pb, 
3676                                  &dmin, &next_block, mx, my);
3677                 }
3678                 if(mb_type&MB_TYPE_INTRA){
3679                     s->mv_dir = 0;
3680                     s->mv_type = MV_TYPE_16X16;
3681                     s->mb_intra= 1;
3682                     s->mv[0][0][0] = 0;
3683                     s->mv[0][0][1] = 0;
3684                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTRA, pb, pb2, tex_pb, 
3685                                  &dmin, &next_block, 0, 0);
3686                     if(s->h263_pred || s->h263_aic){
3687                         if(best_s.mb_intra)
3688                             s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3689                         else
3690                             ff_clean_intra_table_entries(s); //old mode?
3691                     }
3692                 }
3693                 copy_context_after_encode(s, &best_s, -1);
3694                 
3695                 pb_bits_count= get_bit_count(&s->pb);
3696                 flush_put_bits(&s->pb);
3697                 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3698                 s->pb= backup_s.pb;
3699                 
3700                 if(s->data_partitioning){
3701                     pb2_bits_count= get_bit_count(&s->pb2);
3702                     flush_put_bits(&s->pb2);
3703                     ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3704                     s->pb2= backup_s.pb2;
3705                     
3706                     tex_pb_bits_count= get_bit_count(&s->tex_pb);
3707                     flush_put_bits(&s->tex_pb);
3708                     ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3709                     s->tex_pb= backup_s.tex_pb;
3710                 }
3711                 s->last_bits= get_bit_count(&s->pb);
3712                 
3713                 if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE)
3714                     ff_h263_update_motion_val(s);
3715         
3716                 if(next_block==0){
3717                     s->dsp.put_pixels_tab[0][0](s->dest[0], s->me.scratchpad     , s->linesize  ,16);
3718                     s->dsp.put_pixels_tab[1][0](s->dest[1], s->me.scratchpad + 16, s->uvlinesize, 8);
3719                     s->dsp.put_pixels_tab[1][0](s->dest[2], s->me.scratchpad + 24, s->uvlinesize, 8);
3720                 }
3721
3722                 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3723                     MPV_decode_mb(s, s->block);
3724             } else {
3725                 int motion_x, motion_y;
3726                 int intra_score;
3727                 int inter_score= s->current_picture.mb_cmp_score[mb_x + mb_y*s->mb_stride];
3728                 
3729               if(s->avctx->mb_decision==FF_MB_DECISION_SIMPLE && s->pict_type==P_TYPE){ //FIXME check if the mess is usefull at all
3730                 /* get luma score */
3731                 if((s->avctx->mb_cmp&0xFF)==FF_CMP_SSE){
3732                     intra_score= (s->current_picture.mb_var[mb_x + mb_y*s->mb_stride]<<8) - 500; //FIXME dont scale it down so we dont have to fix it
3733                 }else{
3734                     uint8_t *dest_y;
3735
3736                     int mean= s->current_picture.mb_mean[mb_x + mb_y*s->mb_stride]; //FIXME
3737                     mean*= 0x01010101;
3738                     
3739                     dest_y  = s->new_picture.data[0] + (mb_y * 16 * s->linesize    ) + mb_x * 16;
3740                 
3741                     for(i=0; i<16; i++){
3742                         *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 0]) = mean;
3743                         *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 4]) = mean;
3744                         *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 8]) = mean;
3745                         *(uint32_t*)(&s->me.scratchpad[i*s->linesize+12]) = mean;
3746                     }
3747
3748                     s->mb_intra=1;
3749                     intra_score= s->dsp.mb_cmp[0](s, s->me.scratchpad, dest_y, s->linesize);
3750                                         
3751 /*                    printf("intra:%7d inter:%7d var:%7d mc_var.%7d\n", intra_score>>8, inter_score>>8, 
3752                         s->current_picture.mb_var[mb_x + mb_y*s->mb_stride],
3753                         s->current_picture.mc_mb_var[mb_x + mb_y*s->mb_stride]);*/
3754                 }
3755                 
3756                 /* get chroma score */
3757                 if(s->avctx->mb_cmp&FF_CMP_CHROMA){
3758                     int i;
3759                     
3760                     s->mb_intra=1;
3761                     for(i=1; i<3; i++){
3762                         uint8_t *dest_c;
3763                         int mean;
3764                         
3765                         if(s->out_format == FMT_H263){
3766                             mean= (s->dc_val[i][mb_x + (mb_y+1)*(s->mb_width+2)] + 4)>>3; //FIXME not exact but simple ;)
3767                         }else{
3768                             mean= (s->last_dc[i] + 4)>>3;
3769                         }
3770                         dest_c = s->new_picture.data[i] + (mb_y * 8  * (s->uvlinesize)) + mb_x * 8;
3771                         
3772                         mean*= 0x01010101;
3773                         for(i=0; i<8; i++){
3774                             *(uint32_t*)(&s->me.scratchpad[i*s->uvlinesize+ 0]) = mean;
3775                             *(uint32_t*)(&s->me.scratchpad[i*s->uvlinesize+ 4]) = mean;
3776                         }
3777                         
3778                         intra_score+= s->dsp.mb_cmp[1](s, s->me.scratchpad, dest_c, s->uvlinesize);
3779                     }                
3780                 }
3781
3782                 /* bias */
3783                 switch(s->avctx->mb_cmp&0xFF){
3784                 default:
3785                 case FF_CMP_SAD:
3786                     intra_score+= 32*s->qscale;
3787                     break;
3788                 case FF_CMP_SSE:
3789                     intra_score+= 24*s->qscale*s->qscale;
3790                     break;
3791                 case FF_CMP_SATD:
3792                     intra_score+= 96*s->qscale;
3793                     break;
3794                 case FF_CMP_DCT:
3795                     intra_score+= 48*s->qscale;
3796                     break;
3797                 case FF_CMP_BIT:
3798                     intra_score+= 16;
3799                     break;
3800                 case FF_CMP_PSNR:
3801                 case FF_CMP_RD:
3802                     intra_score+= (s->qscale*s->qscale*109*8 + 64)>>7;
3803                     break;
3804                 }
3805
3806                 if(intra_score < inter_score)
3807                     mb_type= MB_TYPE_INTRA;
3808               }  
3809                 
3810                 s->mv_type=MV_TYPE_16X16;
3811                 // only one MB-Type possible
3812                 
3813                 switch(mb_type){
3814                 case MB_TYPE_INTRA:
3815                     s->mv_dir = 0;
3816                     s->mb_intra= 1;
3817                     motion_x= s->mv[0][0][0] = 0;
3818                     motion_y= s->mv[0][0][1] = 0;
3819                     break;
3820                 case MB_TYPE_INTER:
3821                     s->mv_dir = MV_DIR_FORWARD;
3822                     s->mb_intra= 0;
3823                     motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3824                     motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3825                     break;
3826                 case MB_TYPE_INTER4V:
3827                     s->mv_dir = MV_DIR_FORWARD;
3828                     s->mv_type = MV_TYPE_8X8;
3829                     s->mb_intra= 0;
3830                     for(i=0; i<4; i++){
3831                         s->mv[0][i][0] = s->motion_val[s->block_index[i]][0];
3832                         s->mv[0][i][1] = s->motion_val[s->block_index[i]][1];
3833                     }
3834                     motion_x= motion_y= 0;
3835                     break;
3836                 case MB_TYPE_DIRECT:
3837                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3838                     s->mb_intra= 0;
3839                     motion_x=s->b_direct_mv_table[xy][0];
3840                     motion_y=s->b_direct_mv_table[xy][1];
3841 #ifdef CONFIG_RISKY
3842                     ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3843 #endif
3844                     break;
3845                 case MB_TYPE_BIDIR:
3846                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3847                     s->mb_intra= 0;
3848                     motion_x=0;
3849                     motion_y=0;
3850                     s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3851                     s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3852                     s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3853                     s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3854                     break;
3855                 case MB_TYPE_BACKWARD:
3856                     s->mv_dir = MV_DIR_BACKWARD;
3857                     s->mb_intra= 0;
3858                     motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3859                     motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3860                     break;
3861                 case MB_TYPE_FORWARD:
3862                     s->mv_dir = MV_DIR_FORWARD;
3863                     s->mb_intra= 0;
3864                     motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3865                     motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3866 //                    printf(" %d %d ", motion_x, motion_y);
3867                     break;
3868                 default:
3869                     motion_x=motion_y=0; //gcc warning fix
3870                     printf("illegal MB type\n");
3871                 }
3872
3873                 encode_mb(s, motion_x, motion_y);
3874
3875                 // RAL: Update last macrobloc type
3876                 s->last_mv_dir = s->mv_dir;
3877             
3878                 if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE)
3879                     ff_h263_update_motion_val(s);
3880
3881                 MPV_decode_mb(s, s->block);
3882             }
3883
3884             /* clean the MV table in IPS frames for direct mode in B frames */
3885             if(s->mb_intra /* && I,P,S_TYPE */){
3886                 s->p_mv_table[xy][0]=0;
3887                 s->p_mv_table[xy][1]=0;
3888             }
3889             
3890             if(s->flags&CODEC_FLAG_PSNR){
3891                 int w= 16;
3892                 int h= 16;
3893
3894                 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3895                 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3896
3897                 s->current_picture_ptr->error[0] += sse(
3898                     s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3899                     s->dest[0], w, h, s->linesize);
3900                 s->current_picture_ptr->error[1] += sse(
3901                     s, s->new_picture.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,
3902                     s->dest[1], w>>1, h>>1, s->uvlinesize);
3903                 s->current_picture_ptr->error[2] += sse(
3904                     s, s->new_picture    .data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,
3905                     s->dest[2], w>>1, h>>1, s->uvlinesize);
3906             }
3907 //printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_stride, get_bit_count(&s->pb));
3908         }
3909     }
3910     emms_c();
3911
3912 #ifdef CONFIG_RISKY
3913     if(s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame)
3914         ff_mpeg4_merge_partitions(s);
3915
3916     if (s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == I_TYPE)
3917         msmpeg4_encode_ext_header(s);
3918
3919     if(s->codec_id==CODEC_ID_MPEG4) 
3920         ff_mpeg4_stuffing(&s->pb);
3921 #endif
3922
3923     //if (s->gob_number)
3924     //    fprintf(stderr,"\nNumber of GOB: %d", s->gob_number);
3925     
3926     /* Send the last GOB if RTP */    
3927     if (s->rtp_mode) {
3928         flush_put_bits(&s->pb);
3929         pdif = pbBufPtr(&s->pb) - s->ptr_lastgob;
3930         /* Call the RTP callback to send the last GOB */
3931         if (s->rtp_callback)
3932             s->rtp_callback(s->ptr_lastgob, pdif, s->gob_number);
3933         s->ptr_lastgob = pbBufPtr(&s->pb);
3934         //fprintf(stderr,"\nGOB: %2d size: %d (last)", s->gob_number, pdif);
3935     }
3936 }
3937
3938 static int dct_quantize_trellis_c(MpegEncContext *s, 
3939                         DCTELEM *block, int n,
3940                         int qscale, int *overflow){
3941     const int *qmat;
3942     const uint8_t *scantable= s->intra_scantable.scantable;
3943     int max=0;
3944     unsigned int threshold1, threshold2;
3945     int bias=0;
3946     int run_tab[65];
3947     int level_tab[65];
3948     int score_tab[65];
3949     int last_run=0;
3950     int last_level=0;
3951     int last_score= 0;
3952     int last_i= 0;
3953     int coeff[3][64];
3954     int coeff_count[64];
3955     int lambda, qmul, qadd, start_i, last_non_zero, i;
3956     const int esc_length= s->ac_esc_length;
3957     uint8_t * length;
3958     uint8_t * last_length;
3959     int score_limit=0;
3960     int left_limit= 0;
3961         
3962     s->dsp.fdct (block);
3963
3964     qmul= qscale*16;
3965     qadd= ((qscale-1)|1)*8;
3966
3967     if (s->mb_intra) {
3968         int q;
3969         if (!s->h263_aic) {
3970             if (n < 4)
3971                 q = s->y_dc_scale;
3972             else
3973                 q = s->c_dc_scale;
3974             q = q << 3;
3975         } else{
3976             /* For AIC we skip quant/dequant of INTRADC */
3977             q = 1 << 3;
3978             qadd=0;
3979         }
3980             
3981         /* note: block[0] is assumed to be positive */
3982         block[0] = (block[0] + (q >> 1)) / q;
3983         start_i = 1;
3984         last_non_zero = 0;
3985         qmat = s->q_intra_matrix[qscale];
3986         if(s->mpeg_quant || s->codec_id== CODEC_ID_MPEG1VIDEO)
3987             bias= 1<<(QMAT_SHIFT-1);
3988         length     = s->intra_ac_vlc_length;
3989         last_length= s->intra_ac_vlc_last_length;
3990     } else {
3991         start_i = 0;
3992         last_non_zero = -1;
3993         qmat = s->q_inter_matrix[qscale];
3994         length     = s->inter_ac_vlc_length;
3995         last_length= s->inter_ac_vlc_last_length;
3996     }
3997
3998     threshold1= (1<<QMAT_SHIFT) - bias - 1;
3999     threshold2= (threshold1<<1);
4000
4001     for(i=start_i; i<64; i++) {
4002         const int j = scantable[i];
4003         const int k= i-start_i;
4004         int level = block[j];
4005         level = level * qmat[j];
4006
4007 //        if(   bias+level >= (1<<(QMAT_SHIFT - 3))
4008 //           || bias-level >= (1<<(QMAT_SHIFT - 3))){
4009         if(((unsigned)(level+threshold1))>threshold2){
4010             if(level>0){
4011                 level= (bias + level)>>QMAT_SHIFT;
4012                 coeff[0][k]= level;
4013                 coeff[1][k]= level-1;
4014 //                coeff[2][k]= level-2;
4015             }else{
4016                 level= (bias - level)>>QMAT_SHIFT;
4017                 coeff[0][k]= -level;
4018                 coeff[1][k]= -level+1;
4019 //                coeff[2][k]= -level+2;
4020             }
4021             coeff_count[k]= FFMIN(level, 2);
4022             max |=level;
4023             last_non_zero = i;
4024         }else{
4025             coeff[0][k]= (level>>31)|1;
4026             coeff_count[k]= 1;
4027         }
4028     }
4029     
4030     *overflow= s->max_qcoeff < max; //overflow might have happend
4031     
4032     if(last_non_zero < start_i){
4033         memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
4034         return last_non_zero;
4035     }
4036
4037     lambda= (qscale*qscale*64*105 + 64)>>7; //FIXME finetune
4038         
4039     score_tab[0]= 0;
4040     for(i=0; i<=last_non_zero - start_i; i++){
4041         int level_index, run, j;
4042         const int dct_coeff= block[ scantable[i + start_i] ];
4043         const int zero_distoration= dct_coeff*dct_coeff;
4044         int best_score=256*256*256*120;
4045
4046         last_score += zero_distoration;
4047         for(level_index=0; level_index < coeff_count[i]; level_index++){
4048             int distoration;
4049             int level= coeff[level_index][i];
4050             int unquant_coeff;
4051             
4052             assert(level);
4053
4054             if(s->out_format == FMT_H263){
4055                 if(level>0){
4056                     unquant_coeff= level*qmul + qadd;
4057                 }else{
4058                     unquant_coeff= level*qmul - qadd;
4059                 }
4060             }else{ //MPEG1
4061                 j= s->dsp.idct_permutation[ scantable[i + start_i] ]; //FIXME optimize
4062                 if(s->mb_intra){
4063                     if (level < 0) {
4064                         unquant_coeff = (int)((-level) * qscale * s->intra_matrix[j]) >> 3;
4065                         unquant_coeff = -((unquant_coeff - 1) | 1);
4066                     } else {
4067                         unquant_coeff = (int)(  level  * qscale * s->intra_matrix[j]) >> 3;
4068                         unquant_coeff =   (unquant_coeff - 1) | 1;
4069                     }
4070                 }else{
4071                     if (level < 0) {
4072                         unquant_coeff = ((((-level) << 1)&n