qpel encoding
[ffmpeg.git] / libavcodec / mpegvideo.c
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard.
4  *
5  * This library is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU Lesser General Public
7  * License as published by the Free Software Foundation; either
8  * version 2 of the License, or (at your option) any later version.
9  *
10  * This library is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * Lesser General Public License for more details.
14  *
15  * You should have received a copy of the GNU Lesser General Public
16  * License along with this library; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  *
19  * 4MV & hq & b-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
20  */
21  
22 #include <ctype.h>
23 #include "avcodec.h"
24 #include "dsputil.h"
25 #include "mpegvideo.h"
26 #include "simple_idct.h"
27
28 #ifdef USE_FASTMEMCPY
29 #include "fastmemcpy.h"
30 #endif
31
32 //#undef NDEBUG
33 //#include <assert.h>
34
35 static void encode_picture(MpegEncContext *s, int picture_number);
36 static void dct_unquantize_mpeg1_c(MpegEncContext *s, 
37                                    DCTELEM *block, int n, int qscale);
38 static void dct_unquantize_mpeg2_c(MpegEncContext *s,
39                                    DCTELEM *block, int n, int qscale);
40 static void dct_unquantize_h263_c(MpegEncContext *s, 
41                                   DCTELEM *block, int n, int qscale);
42 static void draw_edges_c(UINT8 *buf, int wrap, int width, int height, int w);
43 static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
44
45 void (*draw_edges)(UINT8 *buf, int wrap, int width, int height, int w)= draw_edges_c;
46
47
48 /* enable all paranoid tests for rounding, overflows, etc... */
49 //#define PARANOID
50
51 //#define DEBUG
52
53
54 /* for jpeg fast DCT */
55 #define CONST_BITS 14
56
57 static const uint16_t aanscales[64] = {
58     /* precomputed values scaled up by 14 bits */
59     16384, 22725, 21407, 19266, 16384, 12873,  8867,  4520,
60     22725, 31521, 29692, 26722, 22725, 17855, 12299,  6270,
61     21407, 29692, 27969, 25172, 21407, 16819, 11585,  5906,
62     19266, 26722, 25172, 22654, 19266, 15137, 10426,  5315,
63     16384, 22725, 21407, 19266, 16384, 12873,  8867,  4520,
64     12873, 17855, 16819, 15137, 12873, 10114,  6967,  3552,
65     8867 , 12299, 11585, 10426,  8867,  6967,  4799,  2446,
66     4520 ,  6270,  5906,  5315,  4520,  3552,  2446,  1247
67 };
68
69 /* Input permutation for the simple_idct_mmx */
70 static const uint8_t simple_mmx_permutation[64]={
71         0x00, 0x08, 0x04, 0x09, 0x01, 0x0C, 0x05, 0x0D, 
72         0x10, 0x18, 0x14, 0x19, 0x11, 0x1C, 0x15, 0x1D, 
73         0x20, 0x28, 0x24, 0x29, 0x21, 0x2C, 0x25, 0x2D, 
74         0x12, 0x1A, 0x16, 0x1B, 0x13, 0x1E, 0x17, 0x1F, 
75         0x02, 0x0A, 0x06, 0x0B, 0x03, 0x0E, 0x07, 0x0F, 
76         0x30, 0x38, 0x34, 0x39, 0x31, 0x3C, 0x35, 0x3D, 
77         0x22, 0x2A, 0x26, 0x2B, 0x23, 0x2E, 0x27, 0x2F, 
78         0x32, 0x3A, 0x36, 0x3B, 0x33, 0x3E, 0x37, 0x3F,
79 };
80
81 static const uint8_t h263_chroma_roundtab[16] = {
82     0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
83 };
84
85 static UINT16 (*default_mv_penalty)[MAX_MV*2+1]=NULL;
86 static UINT8 default_fcode_tab[MAX_MV*2+1];
87
88 static void convert_matrix(MpegEncContext *s, int (*qmat)[64], uint16_t (*qmat16)[64], uint16_t (*qmat16_bias)[64],
89                            const UINT16 *quant_matrix, int bias, int qmin, int qmax)
90 {
91     int qscale;
92
93     for(qscale=qmin; qscale<=qmax; qscale++){
94         int i;
95         if (s->fdct == ff_jpeg_fdct_islow) {
96             for(i=0;i<64;i++) {
97                 const int j= s->idct_permutation[i];
98                 /* 16 <= qscale * quant_matrix[i] <= 7905 */
99                 /* 19952         <= aanscales[i] * qscale * quant_matrix[i]           <= 249205026 */
100                 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
101                 /* 3444240       >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
102                 
103                 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / 
104                                 (qscale * quant_matrix[j]));
105             }
106         } else if (s->fdct == fdct_ifast) {
107             for(i=0;i<64;i++) {
108                 const int j= s->idct_permutation[i];
109                 /* 16 <= qscale * quant_matrix[i] <= 7905 */
110                 /* 19952         <= aanscales[i] * qscale * quant_matrix[i]           <= 249205026 */
111                 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
112                 /* 3444240       >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
113                 
114                 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) / 
115                                 (aanscales[i] * qscale * quant_matrix[j]));
116             }
117         } else {
118             for(i=0;i<64;i++) {
119                 const int j= s->idct_permutation[i];
120                 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
121                    So 16           <= qscale * quant_matrix[i]             <= 7905
122                    so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905
123                    so 32768        >= (1<<19) / (qscale * quant_matrix[i]) >= 67
124                 */
125                 qmat  [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]);
126                 qmat16[qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[j]);
127
128                 if(qmat16[qscale][i]==0 || qmat16[qscale][i]==128*256) qmat16[qscale][i]=128*256-1;
129                 qmat16_bias[qscale][i]= ROUNDED_DIV(bias<<(16-QUANT_BIAS_SHIFT), qmat16[qscale][i]);
130             }
131         }
132     }
133 }
134 // move into common.c perhaps 
135 #define CHECKED_ALLOCZ(p, size)\
136 {\
137     p= av_mallocz(size);\
138     if(p==NULL){\
139         perror("malloc");\
140         goto fail;\
141     }\
142 }
143
144 void ff_init_scantable(MpegEncContext *s, ScanTable *st, const UINT8 *src_scantable){
145     int i;
146     int end;
147     
148     st->scantable= src_scantable;
149
150     for(i=0; i<64; i++){
151         int j;
152         j = src_scantable[i];
153         st->permutated[i] = s->idct_permutation[j];
154 #ifdef ARCH_POWERPC
155         st->inverse[j] = i;
156 #endif
157     }
158     
159     end=-1;
160     for(i=0; i<64; i++){
161         int j;
162         j = st->permutated[i];
163         if(j>end) end=j;
164         st->raster_end[i]= end;
165     }
166 }
167
168 /* XXX: those functions should be suppressed ASAP when all IDCTs are
169  converted */
170 // *FIXME* this is ugly hack using local static
171 static void (*ff_put_pixels_clamped)(const DCTELEM *block, UINT8 *pixels, int line_size);
172 static void (*ff_add_pixels_clamped)(const DCTELEM *block, UINT8 *pixels, int line_size);
173 static void ff_jref_idct_put(UINT8 *dest, int line_size, DCTELEM *block)
174 {
175     j_rev_dct (block);
176     ff_put_pixels_clamped(block, dest, line_size);
177 }
178 static void ff_jref_idct_add(UINT8 *dest, int line_size, DCTELEM *block)
179 {
180     j_rev_dct (block);
181     ff_add_pixels_clamped(block, dest, line_size);
182 }
183
184 /* init common dct for both encoder and decoder */
185 int DCT_common_init(MpegEncContext *s)
186 {
187     int i;
188
189     ff_put_pixels_clamped = s->dsp.put_pixels_clamped;
190     ff_add_pixels_clamped = s->dsp.add_pixels_clamped;
191
192     s->dct_unquantize_h263 = dct_unquantize_h263_c;
193     s->dct_unquantize_mpeg1 = dct_unquantize_mpeg1_c;
194     s->dct_unquantize_mpeg2 = dct_unquantize_mpeg2_c;
195     s->dct_quantize= dct_quantize_c;
196
197     if(s->avctx->dct_algo==FF_DCT_FASTINT)
198         s->fdct = fdct_ifast;
199     else
200         s->fdct = ff_jpeg_fdct_islow; //slow/accurate/default
201
202     if(s->avctx->idct_algo==FF_IDCT_INT){
203         s->idct_put= ff_jref_idct_put;
204         s->idct_add= ff_jref_idct_add;
205         s->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
206     }else{ //accurate/default
207         s->idct_put= simple_idct_put;
208         s->idct_add= simple_idct_add;
209         s->idct_permutation_type= FF_NO_IDCT_PERM;
210     }
211         
212 #ifdef HAVE_MMX
213     MPV_common_init_mmx(s);
214 #endif
215 #ifdef ARCH_ALPHA
216     MPV_common_init_axp(s);
217 #endif
218 #ifdef HAVE_MLIB
219     MPV_common_init_mlib(s);
220 #endif
221 #ifdef HAVE_MMI
222     MPV_common_init_mmi(s);
223 #endif
224 #ifdef ARCH_ARMV4L
225     MPV_common_init_armv4l(s);
226 #endif
227 #ifdef ARCH_POWERPC
228     MPV_common_init_ppc(s);
229 #endif
230
231     switch(s->idct_permutation_type){
232     case FF_NO_IDCT_PERM:
233         for(i=0; i<64; i++)
234             s->idct_permutation[i]= i;
235         break;
236     case FF_LIBMPEG2_IDCT_PERM:
237         for(i=0; i<64; i++)
238             s->idct_permutation[i]= (i & 0x38) | ((i & 6) >> 1) | ((i & 1) << 2);
239         break;
240     case FF_SIMPLE_IDCT_PERM:
241         for(i=0; i<64; i++)
242             s->idct_permutation[i]= simple_mmx_permutation[i];
243         break;
244     case FF_TRANSPOSE_IDCT_PERM:
245         for(i=0; i<64; i++)
246             s->idct_permutation[i]= ((i&7)<<3) | (i>>3);
247         break;
248     default:
249         fprintf(stderr, "Internal error, IDCT permutation not set\n");
250         return -1;
251     }
252
253
254     /* load & permutate scantables
255        note: only wmv uses differnt ones 
256     */
257     ff_init_scantable(s, &s->inter_scantable  , ff_zigzag_direct);
258     ff_init_scantable(s, &s->intra_scantable  , ff_zigzag_direct);
259     ff_init_scantable(s, &s->intra_h_scantable, ff_alternate_horizontal_scan);
260     ff_init_scantable(s, &s->intra_v_scantable, ff_alternate_vertical_scan);
261
262     return 0;
263 }
264
265 /**
266  * allocates a Picture
267  * The pixels are allocated/set by calling get_buffer() if shared=0
268  */
269 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
270     
271     if(shared){
272         assert(pic->data[0]);
273         assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
274         pic->type= FF_BUFFER_TYPE_SHARED;
275     }else{
276         int r;
277         
278         assert(!pic->data[0]);
279         
280         r= s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
281         
282         if(r<0 || !pic->age || !pic->type || !pic->data[0]){
283             fprintf(stderr, "get_buffer() failed (%d %d %d %X)\n", r, pic->age, pic->type, (int)pic->data[0]);
284             return -1;
285         }
286
287         if(s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])){
288             fprintf(stderr, "get_buffer() failed (stride changed)\n");
289             return -1;
290         }
291
292         if(pic->linesize[1] != pic->linesize[2]){
293             fprintf(stderr, "get_buffer() failed (uv stride missmatch)\n");
294             return -1;
295         }
296
297         s->linesize  = pic->linesize[0];
298         s->uvlinesize= pic->linesize[1];
299     }
300     
301     if(pic->qscale_table==NULL){
302         if (s->encoding) {        
303             CHECKED_ALLOCZ(pic->mb_var   , s->mb_num * sizeof(INT16))
304             CHECKED_ALLOCZ(pic->mc_mb_var, s->mb_num * sizeof(INT16))
305             CHECKED_ALLOCZ(pic->mb_mean  , s->mb_num * sizeof(INT8))
306         }
307
308         CHECKED_ALLOCZ(pic->mbskip_table , s->mb_num * sizeof(UINT8)+1) //the +1 is for the slice end check
309         CHECKED_ALLOCZ(pic->qscale_table , s->mb_num * sizeof(UINT8))
310         pic->qstride= s->mb_width;
311     }
312     
313     return 0;
314 fail: //for the CHECKED_ALLOCZ macro
315     return -1;
316 }
317
318 /**
319  * deallocates a picture
320  */
321 static void free_picture(MpegEncContext *s, Picture *pic){
322     int i;
323
324     if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
325         s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
326     }
327
328     av_freep(&pic->mb_var);
329     av_freep(&pic->mc_mb_var);
330     av_freep(&pic->mb_mean);
331     av_freep(&pic->mbskip_table);
332     av_freep(&pic->qscale_table);
333     
334     if(pic->type == FF_BUFFER_TYPE_INTERNAL){
335         for(i=0; i<4; i++){
336             av_freep(&pic->base[i]);
337             pic->data[i]= NULL;
338         }
339         av_freep(&pic->opaque);
340         pic->type= 0;
341     }else if(pic->type == FF_BUFFER_TYPE_SHARED){
342         for(i=0; i<4; i++){
343             pic->base[i]=
344             pic->data[i]= NULL;
345         }
346         pic->type= 0;        
347     }
348 }
349
350 /* init common structure for both encoder and decoder */
351 int MPV_common_init(MpegEncContext *s)
352 {
353     int y_size, c_size, yc_size, i;
354
355     dsputil_init(&s->dsp, s->avctx->dsp_mask);
356     DCT_common_init(s);
357
358     s->flags= s->avctx->flags;
359
360     s->mb_width  = (s->width  + 15) / 16;
361     s->mb_height = (s->height + 15) / 16;
362
363     /* set default edge pos, will be overriden in decode_header if needed */
364     s->h_edge_pos= s->mb_width*16;
365     s->v_edge_pos= s->mb_height*16;
366
367     s->mb_num = s->mb_width * s->mb_height;
368
369     y_size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
370     c_size = (s->mb_width + 2) * (s->mb_height + 2);
371     yc_size = y_size + 2 * c_size;
372
373     /* convert fourcc to upper case */
374     s->avctx->fourcc=   toupper( s->avctx->fourcc     &0xFF)          
375                      + (toupper((s->avctx->fourcc>>8 )&0xFF)<<8 )
376                      + (toupper((s->avctx->fourcc>>16)&0xFF)<<16) 
377                      + (toupper((s->avctx->fourcc>>24)&0xFF)<<24);
378
379     CHECKED_ALLOCZ(s->edge_emu_buffer, (s->width+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance
380
381     s->avctx->coded_frame= (AVFrame*)&s->current_picture;
382
383     if (s->encoding) {
384         int mv_table_size= (s->mb_width+2)*(s->mb_height+2);
385
386         /* Allocate MV tables */
387         CHECKED_ALLOCZ(s->p_mv_table            , mv_table_size * 2 * sizeof(INT16))
388         CHECKED_ALLOCZ(s->b_forw_mv_table       , mv_table_size * 2 * sizeof(INT16))
389         CHECKED_ALLOCZ(s->b_back_mv_table       , mv_table_size * 2 * sizeof(INT16))
390         CHECKED_ALLOCZ(s->b_bidir_forw_mv_table , mv_table_size * 2 * sizeof(INT16))
391         CHECKED_ALLOCZ(s->b_bidir_back_mv_table , mv_table_size * 2 * sizeof(INT16))
392         CHECKED_ALLOCZ(s->b_direct_mv_table     , mv_table_size * 2 * sizeof(INT16))
393
394         //FIXME should be linesize instead of s->width*2 but that isnt known before get_buffer()
395         CHECKED_ALLOCZ(s->me.scratchpad,  s->width*2*16*3*sizeof(uint8_t)) 
396         
397         CHECKED_ALLOCZ(s->me.map      , ME_MAP_SIZE*sizeof(uint32_t))
398         CHECKED_ALLOCZ(s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t))
399
400         if(s->codec_id==CODEC_ID_MPEG4){
401             CHECKED_ALLOCZ(s->tex_pb_buffer, PB_BUFFER_SIZE);
402             CHECKED_ALLOCZ(   s->pb2_buffer, PB_BUFFER_SIZE);
403         }
404         
405         if(s->msmpeg4_version){
406             CHECKED_ALLOCZ(s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int));
407         }
408         CHECKED_ALLOCZ(s->avctx->stats_out, 256);
409     }
410         
411     CHECKED_ALLOCZ(s->error_status_table, s->mb_num*sizeof(UINT8))
412     
413     if (s->out_format == FMT_H263 || s->encoding) {
414         int size;
415         /* Allocate MB type table */
416         CHECKED_ALLOCZ(s->mb_type  , s->mb_num * sizeof(UINT8))
417
418         /* MV prediction */
419         size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
420         CHECKED_ALLOCZ(s->motion_val, size * 2 * sizeof(INT16));
421     }
422
423     if(s->codec_id==CODEC_ID_MPEG4){
424         /* interlaced direct mode decoding tables */
425         CHECKED_ALLOCZ(s->field_mv_table, s->mb_num*2*2 * sizeof(INT16))
426         CHECKED_ALLOCZ(s->field_select_table, s->mb_num*2* sizeof(INT8))
427     }
428     /* 4mv b frame decoding table */
429     //note this is needed for h263 without b frames too (segfault on damaged streams otherwise)
430     CHECKED_ALLOCZ(s->co_located_type_table, s->mb_num * sizeof(UINT8))
431     if (s->out_format == FMT_H263) {
432         /* ac values */
433         CHECKED_ALLOCZ(s->ac_val[0], yc_size * sizeof(INT16) * 16);
434         s->ac_val[1] = s->ac_val[0] + y_size;
435         s->ac_val[2] = s->ac_val[1] + c_size;
436         
437         /* cbp values */
438         CHECKED_ALLOCZ(s->coded_block, y_size);
439         
440         /* divx501 bitstream reorder buffer */
441         CHECKED_ALLOCZ(s->bitstream_buffer, BITSTREAM_BUFFER_SIZE);
442
443         /* cbp, ac_pred, pred_dir */
444         CHECKED_ALLOCZ(s->cbp_table  , s->mb_num * sizeof(UINT8))
445         CHECKED_ALLOCZ(s->pred_dir_table, s->mb_num * sizeof(UINT8))
446     }
447     
448     if (s->h263_pred || s->h263_plus || !s->encoding) {
449         /* dc values */
450         //MN: we need these for error resilience of intra-frames
451         CHECKED_ALLOCZ(s->dc_val[0], yc_size * sizeof(INT16));
452         s->dc_val[1] = s->dc_val[0] + y_size;
453         s->dc_val[2] = s->dc_val[1] + c_size;
454         for(i=0;i<yc_size;i++)
455             s->dc_val[0][i] = 1024;
456     }
457
458     /* which mb is a intra block */
459     CHECKED_ALLOCZ(s->mbintra_table, s->mb_num);
460     memset(s->mbintra_table, 1, s->mb_num);
461     
462     /* default structure is frame */
463     s->picture_structure = PICT_FRAME;
464     
465     /* init macroblock skip table */
466     CHECKED_ALLOCZ(s->mbskip_table, s->mb_num+1);
467     //Note the +1 is for a quicker mpeg4 slice_end detection
468     
469     s->block= s->blocks[0];
470
471     s->parse_context.state= -1;
472
473     s->context_initialized = 1;
474     return 0;
475  fail:
476     MPV_common_end(s);
477     return -1;
478 }
479
480
481 //extern int sads;
482
483 /* init common structure for both encoder and decoder */
484 void MPV_common_end(MpegEncContext *s)
485 {
486     int i;
487
488     av_freep(&s->mb_type);
489     av_freep(&s->p_mv_table);
490     av_freep(&s->b_forw_mv_table);
491     av_freep(&s->b_back_mv_table);
492     av_freep(&s->b_bidir_forw_mv_table);
493     av_freep(&s->b_bidir_back_mv_table);
494     av_freep(&s->b_direct_mv_table);
495     av_freep(&s->motion_val);
496     av_freep(&s->dc_val[0]);
497     av_freep(&s->ac_val[0]);
498     av_freep(&s->coded_block);
499     av_freep(&s->mbintra_table);
500     av_freep(&s->cbp_table);
501     av_freep(&s->pred_dir_table);
502     av_freep(&s->me.scratchpad);
503     av_freep(&s->me.map);
504     av_freep(&s->me.score_map);
505     
506     av_freep(&s->mbskip_table);
507     av_freep(&s->bitstream_buffer);
508     av_freep(&s->tex_pb_buffer);
509     av_freep(&s->pb2_buffer);
510     av_freep(&s->edge_emu_buffer);
511     av_freep(&s->co_located_type_table);
512     av_freep(&s->field_mv_table);
513     av_freep(&s->field_select_table);
514     av_freep(&s->avctx->stats_out);
515     av_freep(&s->ac_stats);
516     av_freep(&s->error_status_table);
517
518     for(i=0; i<MAX_PICTURE_COUNT; i++){
519         free_picture(s, &s->picture[i]);
520     }
521     s->context_initialized = 0;
522 }
523
524 /* init video encoder */
525 int MPV_encode_init(AVCodecContext *avctx)
526 {
527     MpegEncContext *s = avctx->priv_data;
528     int i;
529
530     avctx->pix_fmt = PIX_FMT_YUV420P;
531
532     s->bit_rate = avctx->bit_rate;
533     s->bit_rate_tolerance = avctx->bit_rate_tolerance;
534     s->frame_rate = avctx->frame_rate;
535     s->width = avctx->width;
536     s->height = avctx->height;
537     if(avctx->gop_size > 600){
538         fprintf(stderr, "Warning keyframe interval too large! reducing it ...\n");
539         avctx->gop_size=600;
540     }
541     s->gop_size = avctx->gop_size;
542     s->rtp_mode = avctx->rtp_mode;
543     s->rtp_payload_size = avctx->rtp_payload_size;
544     if (avctx->rtp_callback)
545         s->rtp_callback = avctx->rtp_callback;
546     s->qmin= avctx->qmin;
547     s->qmax= avctx->qmax;
548     s->max_qdiff= avctx->max_qdiff;
549     s->qcompress= avctx->qcompress;
550     s->qblur= avctx->qblur;
551     s->avctx = avctx;
552     s->flags= avctx->flags;
553     s->max_b_frames= avctx->max_b_frames;
554     s->b_frame_strategy= avctx->b_frame_strategy;
555     s->codec_id= avctx->codec->id;
556     s->luma_elim_threshold  = avctx->luma_elim_threshold;
557     s->chroma_elim_threshold= avctx->chroma_elim_threshold;
558     s->strict_std_compliance= avctx->strict_std_compliance;
559     s->data_partitioning= avctx->flags & CODEC_FLAG_PART;
560     s->quarter_sample= (avctx->flags & CODEC_FLAG_QPEL)!=0;
561     s->mpeg_quant= avctx->mpeg_quant;
562
563     if (s->gop_size <= 1) {
564         s->intra_only = 1;
565         s->gop_size = 12;
566     } else {
567         s->intra_only = 0;
568     }
569
570     s->me_method = avctx->me_method;
571
572     /* Fixed QSCALE */
573     s->fixed_qscale = (avctx->flags & CODEC_FLAG_QSCALE);
574     
575     s->adaptive_quant= (   s->avctx->lumi_masking
576                         || s->avctx->dark_masking
577                         || s->avctx->temporal_cplx_masking 
578                         || s->avctx->spatial_cplx_masking
579                         || s->avctx->p_masking)
580                        && !s->fixed_qscale;
581     
582     s->progressive_sequence= !(avctx->flags & CODEC_FLAG_INTERLACED_DCT);
583
584     switch(avctx->codec->id) {
585     case CODEC_ID_MPEG1VIDEO:
586         s->out_format = FMT_MPEG1;
587         avctx->delay=0; //FIXME not sure, should check the spec
588         break;
589     case CODEC_ID_MJPEG:
590         s->out_format = FMT_MJPEG;
591         s->intra_only = 1; /* force intra only for jpeg */
592         s->mjpeg_write_tables = 1; /* write all tables */
593         s->mjpeg_data_only_frames = 0; /* write all the needed headers */
594         s->mjpeg_vsample[0] = 2; /* set up default sampling factors */
595         s->mjpeg_vsample[1] = 1; /* the only currently supported values */
596         s->mjpeg_vsample[2] = 1; 
597         s->mjpeg_hsample[0] = 2;
598         s->mjpeg_hsample[1] = 1; 
599         s->mjpeg_hsample[2] = 1; 
600         if (mjpeg_init(s) < 0)
601             return -1;
602         avctx->delay=0;
603         s->low_delay=1;
604         break;
605     case CODEC_ID_H263:
606         if (h263_get_picture_format(s->width, s->height) == 7) {
607             printf("Input picture size isn't suitable for h263 codec! try h263+\n");
608             return -1;
609         }
610         s->out_format = FMT_H263;
611         avctx->delay=0;
612         s->low_delay=1;
613         break;
614     case CODEC_ID_H263P:
615         s->out_format = FMT_H263;
616         s->h263_plus = 1;
617         s->unrestricted_mv = 1;
618         s->h263_aic = 1;
619         
620         /* These are just to be sure */
621         s->umvplus = 0;
622         s->umvplus_dec = 0;
623         avctx->delay=0;
624         s->low_delay=1;
625         break;
626     case CODEC_ID_RV10:
627         s->out_format = FMT_H263;
628         s->h263_rv10 = 1;
629         avctx->delay=0;
630         s->low_delay=1;
631         break;
632     case CODEC_ID_MPEG4:
633         s->out_format = FMT_H263;
634         s->h263_pred = 1;
635         s->unrestricted_mv = 1;
636         s->low_delay= s->max_b_frames ? 0 : 1;
637         avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
638         break;
639     case CODEC_ID_MSMPEG4V1:
640         s->out_format = FMT_H263;
641         s->h263_msmpeg4 = 1;
642         s->h263_pred = 1;
643         s->unrestricted_mv = 1;
644         s->msmpeg4_version= 1;
645         avctx->delay=0;
646         s->low_delay=1;
647         break;
648     case CODEC_ID_MSMPEG4V2:
649         s->out_format = FMT_H263;
650         s->h263_msmpeg4 = 1;
651         s->h263_pred = 1;
652         s->unrestricted_mv = 1;
653         s->msmpeg4_version= 2;
654         avctx->delay=0;
655         s->low_delay=1;
656         break;
657     case CODEC_ID_MSMPEG4V3:
658         s->out_format = FMT_H263;
659         s->h263_msmpeg4 = 1;
660         s->h263_pred = 1;
661         s->unrestricted_mv = 1;
662         s->msmpeg4_version= 3;
663         avctx->delay=0;
664         s->low_delay=1;
665         break;
666     case CODEC_ID_WMV1:
667         s->out_format = FMT_H263;
668         s->h263_msmpeg4 = 1;
669         s->h263_pred = 1;
670         s->unrestricted_mv = 1;
671         s->msmpeg4_version= 4;
672         avctx->delay=0;
673         s->low_delay=1;
674         break;
675     case CODEC_ID_WMV2:
676         s->out_format = FMT_H263;
677         s->h263_msmpeg4 = 1;
678         s->h263_pred = 1;
679         s->unrestricted_mv = 1;
680         s->msmpeg4_version= 5;
681         avctx->delay=0;
682         s->low_delay=1;
683         break;
684     default:
685         return -1;
686     }
687     
688     { /* set up some save defaults, some codecs might override them later */
689         static int done=0;
690         if(!done){
691             int i;
692             done=1;
693
694             default_mv_penalty= av_mallocz( sizeof(UINT16)*(MAX_FCODE+1)*(2*MAX_MV+1) );
695             memset(default_mv_penalty, 0, sizeof(UINT16)*(MAX_FCODE+1)*(2*MAX_MV+1));
696             memset(default_fcode_tab , 0, sizeof(UINT8)*(2*MAX_MV+1));
697
698             for(i=-16; i<16; i++){
699                 default_fcode_tab[i + MAX_MV]= 1;
700             }
701         }
702     }
703     s->me.mv_penalty= default_mv_penalty;
704     s->fcode_tab= default_fcode_tab;
705     s->y_dc_scale_table=
706     s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
707  
708     /* dont use mv_penalty table for crap MV as it would be confused */
709     //FIXME remove after fixing / removing old ME
710     if (s->me_method < ME_EPZS) s->me.mv_penalty = default_mv_penalty;
711
712     s->encoding = 1;
713
714     /* init */
715     if (MPV_common_init(s) < 0)
716         return -1;
717     
718     ff_init_me(s);
719
720 #ifdef CONFIG_ENCODERS
721     if (s->out_format == FMT_H263)
722         h263_encode_init(s);
723     else if (s->out_format == FMT_MPEG1)
724         ff_mpeg1_encode_init(s);
725     if(s->msmpeg4_version)
726         ff_msmpeg4_encode_init(s);
727 #endif
728
729     /* init default q matrix */
730     for(i=0;i<64;i++) {
731         int j= s->idct_permutation[i];
732         if(s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){
733             s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
734             s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
735         }else if(s->out_format == FMT_H263){
736             s->intra_matrix[j] =
737             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
738         }else{ /* mpeg1 */
739             s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
740             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
741         }
742     }
743
744     /* precompute matrix */
745     /* for mjpeg, we do include qscale in the matrix */
746     if (s->out_format != FMT_MJPEG) {
747         convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16, s->q_intra_matrix16_bias, 
748                        s->intra_matrix, s->intra_quant_bias, 1, 31);
749         convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16, s->q_inter_matrix16_bias, 
750                        s->inter_matrix, s->inter_quant_bias, 1, 31);
751     }
752
753     if(ff_rate_control_init(s) < 0)
754         return -1;
755
756     s->picture_number = 0;
757     s->picture_in_gop_number = 0;
758     s->fake_picture_number = 0;
759     /* motion detector init */
760     s->f_code = 1;
761     s->b_code = 1;
762
763     return 0;
764 }
765
766 int MPV_encode_end(AVCodecContext *avctx)
767 {
768     MpegEncContext *s = avctx->priv_data;
769
770 #ifdef STATS
771     print_stats();
772 #endif
773
774     ff_rate_control_uninit(s);
775
776     MPV_common_end(s);
777     if (s->out_format == FMT_MJPEG)
778         mjpeg_close(s);
779       
780     return 0;
781 }
782
783 /* draw the edges of width 'w' of an image of size width, height */
784 //FIXME check that this is ok for mpeg4 interlaced
785 static void draw_edges_c(UINT8 *buf, int wrap, int width, int height, int w)
786 {
787     UINT8 *ptr, *last_line;
788     int i;
789
790     last_line = buf + (height - 1) * wrap;
791     for(i=0;i<w;i++) {
792         /* top and bottom */
793         memcpy(buf - (i + 1) * wrap, buf, width);
794         memcpy(last_line + (i + 1) * wrap, last_line, width);
795     }
796     /* left and right */
797     ptr = buf;
798     for(i=0;i<height;i++) {
799         memset(ptr - w, ptr[0], w);
800         memset(ptr + width, ptr[width-1], w);
801         ptr += wrap;
802     }
803     /* corners */
804     for(i=0;i<w;i++) {
805         memset(buf - (i + 1) * wrap - w, buf[0], w); /* top left */
806         memset(buf - (i + 1) * wrap + width, buf[width-1], w); /* top right */
807         memset(last_line + (i + 1) * wrap - w, last_line[0], w); /* top left */
808         memset(last_line + (i + 1) * wrap + width, last_line[width-1], w); /* top right */
809     }
810 }
811
812 static int find_unused_picture(MpegEncContext *s, int shared){
813     int i;
814     
815     if(shared){
816         for(i=0; i<MAX_PICTURE_COUNT; i++){
817             if(s->picture[i].data[0]==NULL && s->picture[i].type==0) break;
818         }
819     }else{
820         for(i=0; i<MAX_PICTURE_COUNT; i++){
821             if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) break;
822         }
823         for(i=0; i<MAX_PICTURE_COUNT; i++){
824             if(s->picture[i].data[0]==NULL) break;
825         }
826     }
827
828     assert(i<MAX_PICTURE_COUNT);
829     return i;
830 }
831
832 /* generic function for encode/decode called before a frame is coded/decoded */
833 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
834 {
835     int i;
836     AVFrame *pic;
837
838     s->mb_skiped = 0;
839     
840     /* mark&release old frames */
841     if (s->pict_type != B_TYPE && s->last_picture.data[0]) {
842         for(i=0; i<MAX_PICTURE_COUNT; i++){
843 //printf("%8X %d %d %X %X\n", s->picture[i].data[0], s->picture[i].type, i, s->next_picture.data[0], s->last_picture.data[0]);
844             if(s->picture[i].data[0] == s->last_picture.data[0]){
845 //                s->picture[i].reference=0;
846                 avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
847                 break;
848             }    
849         }
850         assert(i<MAX_PICTURE_COUNT);
851
852         /* release forgotten pictures */
853         /* if(mpeg124/h263) */
854         if(!s->encoding){
855             for(i=0; i<MAX_PICTURE_COUNT; i++){
856                 if(s->picture[i].data[0] && s->picture[i].data[0] != s->next_picture.data[0] && s->picture[i].reference){
857                     fprintf(stderr, "releasing zombie picture\n");
858                     avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);                
859                 }
860             }
861         }
862     }
863 alloc:
864     if(!s->encoding){
865         i= find_unused_picture(s, 0);
866     
867         pic= (AVFrame*)&s->picture[i];
868         pic->reference= s->pict_type != B_TYPE;
869         pic->coded_picture_number= s->current_picture.coded_picture_number+1;
870         
871         alloc_picture(s, (Picture*)pic, 0);
872
873         s->current_picture= s->picture[i];
874     }
875
876     if (s->pict_type != B_TYPE) {
877         s->last_picture= s->next_picture;
878         s->next_picture= s->current_picture;
879     }
880     
881     if(s->pict_type != I_TYPE && s->last_picture.data[0]==NULL){
882         fprintf(stderr, "warning: first frame is no keyframe\n");
883         assert(s->pict_type != B_TYPE); //these should have been dropped if we dont have a reference
884         goto alloc;
885     }
886    
887     s->hurry_up= s->avctx->hurry_up;
888     s->error_resilience= avctx->error_resilience;
889
890     /* set dequantizer, we cant do it during init as it might change for mpeg4
891        and we cant do it in the header decode as init isnt called for mpeg4 there yet */
892     if(s->out_format == FMT_H263){
893         if(s->mpeg_quant)
894             s->dct_unquantize = s->dct_unquantize_mpeg2;
895         else
896             s->dct_unquantize = s->dct_unquantize_h263;
897     }else 
898         s->dct_unquantize = s->dct_unquantize_mpeg1;
899
900     return 0;
901 }
902
903 /* generic function for encode/decode called after a frame has been coded/decoded */
904 void MPV_frame_end(MpegEncContext *s)
905 {
906     int i;
907     /* draw edge for correct motion prediction if outside */
908     if(s->codec_id!=CODEC_ID_SVQ1){
909         if (s->pict_type != B_TYPE && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
910             draw_edges(s->current_picture.data[0], s->linesize  , s->h_edge_pos   , s->v_edge_pos   , EDGE_WIDTH  );
911             draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
912             draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
913         }
914     }
915     emms_c();
916     
917     s->last_pict_type    = s->pict_type;
918     if(s->pict_type!=B_TYPE){
919         s->last_non_b_pict_type= s->pict_type;
920     }
921     
922     s->current_picture.quality= s->qscale; //FIXME get average of qscale_table
923     s->current_picture.pict_type= s->pict_type;
924     s->current_picture.key_frame= s->pict_type == I_TYPE;
925     
926     /* copy back current_picture variables */
927     for(i=0; i<MAX_PICTURE_COUNT; i++){
928         if(s->picture[i].data[0] == s->current_picture.data[0]){
929             s->picture[i]= s->current_picture;
930             break;
931         }    
932     }
933     assert(i<MAX_PICTURE_COUNT);
934
935     /* release non refernce frames */
936     for(i=0; i<MAX_PICTURE_COUNT; i++){
937         if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/)
938             s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
939     }
940     if(s->avctx->debug&FF_DEBUG_SKIP){
941         int x,y;        
942         for(y=0; y<s->mb_height; y++){
943             for(x=0; x<s->mb_width; x++){
944                 int count= s->mbskip_table[x + y*s->mb_width];
945                 if(count>9) count=9;
946                 printf(" %1d", count);
947             }
948             printf("\n");
949         }
950         printf("pict type: %d\n", s->pict_type);
951     }
952 }
953
954 static int get_sae(uint8_t *src, int ref, int stride){
955     int x,y;
956     int acc=0;
957     
958     for(y=0; y<16; y++){
959         for(x=0; x<16; x++){
960             acc+= ABS(src[x+y*stride] - ref);
961         }
962     }
963     
964     return acc;
965 }
966
967 static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride){
968     int x, y, w, h;
969     int acc=0;
970     
971     w= s->width &~15;
972     h= s->height&~15;
973     
974     for(y=0; y<h; y+=16){
975         for(x=0; x<w; x+=16){
976             int offset= x + y*stride;
977             int sad = s->dsp.pix_abs16x16(src + offset, ref + offset, stride);
978             int mean= (s->dsp.pix_sum(src + offset, stride) + 128)>>8;
979             int sae = get_sae(src + offset, mean, stride);
980             
981             acc+= sae + 500 < sad;
982         }
983     }
984     return acc;
985 }
986
987
988 static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
989     AVFrame *pic;
990     int i;
991     const int encoding_delay= s->max_b_frames;
992     int direct=1;
993
994     if(encoding_delay && !(s->flags&CODEC_FLAG_INPUT_PRESERVED)) direct=0;
995     if(pic_arg->linesize[0] != s->linesize) direct=0;
996     if(pic_arg->linesize[1] != s->uvlinesize) direct=0;
997     if(pic_arg->linesize[2] != s->uvlinesize) direct=0;
998   
999 //    printf("%d %d %d %d\n",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize);
1000     
1001     if(direct){
1002         i= find_unused_picture(s, 1);
1003
1004         pic= (AVFrame*)&s->picture[i];
1005         pic->reference= 1;
1006     
1007         for(i=0; i<4; i++){
1008             pic->data[i]= pic_arg->data[i];
1009             pic->linesize[i]= pic_arg->linesize[i];
1010         }
1011         alloc_picture(s, (Picture*)pic, 1);
1012     }else{
1013         i= find_unused_picture(s, 0);
1014
1015         pic= (AVFrame*)&s->picture[i];
1016         pic->reference= 1;
1017
1018         alloc_picture(s, (Picture*)pic, 0);
1019
1020         if(   pic->data[0] == pic_arg->data[0] 
1021            && pic->data[1] == pic_arg->data[1]
1022            && pic->data[2] == pic_arg->data[2]){
1023        // empty
1024         }else{
1025             int h_chroma_shift, v_chroma_shift;
1026         
1027             avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1028         
1029             for(i=0; i<3; i++){
1030                 int src_stride= pic_arg->linesize[i];
1031                 int dst_stride= i ? s->uvlinesize : s->linesize;
1032                 int h_shift= i ? h_chroma_shift : 0;
1033                 int v_shift= i ? v_chroma_shift : 0;
1034                 int w= s->width >>h_shift;
1035                 int h= s->height>>v_shift;
1036                 uint8_t *src= pic_arg->data[i];
1037                 uint8_t *dst= pic->data[i];
1038             
1039                 if(src_stride==dst_stride)
1040                     memcpy(dst, src, src_stride*h);
1041                 else{
1042                     while(h--){
1043                         memcpy(dst, src, w);
1044                         dst += dst_stride;
1045                         src += src_stride;
1046                     }
1047                 }
1048             }
1049         }
1050     }
1051     pic->quality= pic_arg->quality;
1052     pic->pict_type= pic_arg->pict_type;
1053     
1054     if(s->input_picture[encoding_delay])
1055         pic->display_picture_number= s->input_picture[encoding_delay]->display_picture_number + 1;
1056
1057     /* shift buffer entries */
1058     for(i=1; i<MAX_PICTURE_COUNT /*s->encoding_delay+1*/; i++)
1059         s->input_picture[i-1]= s->input_picture[i];
1060         
1061     s->input_picture[encoding_delay]= (Picture*)pic;
1062
1063     return 0;
1064 }
1065
1066 static void select_input_picture(MpegEncContext *s){
1067     int i;
1068     const int encoding_delay= s->max_b_frames;
1069     int coded_pic_num=0;    
1070
1071     if(s->reordered_input_picture[0])
1072         coded_pic_num= s->reordered_input_picture[0]->coded_picture_number + 1;
1073
1074     for(i=1; i<MAX_PICTURE_COUNT; i++)
1075         s->reordered_input_picture[i-1]= s->reordered_input_picture[i];
1076     s->reordered_input_picture[MAX_PICTURE_COUNT-1]= NULL;
1077
1078     /* set next picture types & ordering */
1079     if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){
1080         if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture.data[0]==NULL || s->intra_only){
1081             s->reordered_input_picture[0]= s->input_picture[0];
1082             s->reordered_input_picture[0]->pict_type= I_TYPE;
1083             s->reordered_input_picture[0]->coded_picture_number= coded_pic_num;
1084         }else{
1085             int b_frames;
1086             
1087             if(s->flags&CODEC_FLAG_PASS2){
1088                 for(i=0; i<s->max_b_frames+1; i++){
1089                     int pict_num= s->input_picture[0]->display_picture_number + i;
1090                     int pict_type= s->rc_context.entry[pict_num].new_pict_type;
1091                     s->input_picture[i]->pict_type= pict_type;
1092                     
1093                     if(i + 1 >= s->rc_context.num_entries) break;
1094                 }
1095             }
1096
1097             if(s->input_picture[0]->pict_type){
1098                 /* user selected pict_type */
1099                 for(b_frames=0; b_frames<s->max_b_frames+1; b_frames++){
1100                     if(s->input_picture[b_frames]->pict_type!=B_TYPE) break;
1101                 }
1102             
1103                 if(b_frames > s->max_b_frames){
1104                     fprintf(stderr, "warning, too many bframes in a row\n");
1105                     b_frames = s->max_b_frames;
1106                 }
1107             }else if(s->b_frame_strategy==0){
1108                 b_frames= s->max_b_frames;
1109             }else if(s->b_frame_strategy==1){
1110                 for(i=1; i<s->max_b_frames+1; i++){
1111                     if(s->input_picture[i]->b_frame_score==0){
1112                         s->input_picture[i]->b_frame_score= 
1113                             get_intra_count(s, s->input_picture[i  ]->data[0], 
1114                                                s->input_picture[i-1]->data[0], s->linesize) + 1;
1115                     }
1116                 }
1117                 for(i=0; i<s->max_b_frames; i++){
1118                     if(s->input_picture[i]->b_frame_score - 1 > s->mb_num/40) break;
1119                 }
1120                                 
1121                 b_frames= FFMAX(0, i-1);
1122                 
1123                 /* reset scores */
1124                 for(i=0; i<b_frames+1; i++){
1125                     s->input_picture[i]->b_frame_score=0;
1126                 }
1127             }else{
1128                 fprintf(stderr, "illegal b frame strategy\n");
1129                 b_frames=0;
1130             }
1131
1132             emms_c();
1133 //static int b_count=0;
1134 //b_count+= b_frames;
1135 //printf("b_frames: %d\n", b_count);
1136                         
1137             s->reordered_input_picture[0]= s->input_picture[b_frames];
1138             if(   s->picture_in_gop_number + b_frames >= s->gop_size 
1139                || s->reordered_input_picture[0]->pict_type== I_TYPE)
1140                 s->reordered_input_picture[0]->pict_type= I_TYPE;
1141             else
1142                 s->reordered_input_picture[0]->pict_type= P_TYPE;
1143             s->reordered_input_picture[0]->coded_picture_number= coded_pic_num;
1144             for(i=0; i<b_frames; i++){
1145                 coded_pic_num++;
1146                 s->reordered_input_picture[i+1]= s->input_picture[i];
1147                 s->reordered_input_picture[i+1]->pict_type= B_TYPE;
1148                 s->reordered_input_picture[i+1]->coded_picture_number= coded_pic_num;
1149             }
1150         }
1151     }
1152     
1153     if(s->reordered_input_picture[0]){
1154        s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE;
1155
1156         if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED){
1157             int i= find_unused_picture(s, 0);
1158             Picture *pic= &s->picture[i];
1159
1160             s->new_picture= *s->reordered_input_picture[0];
1161
1162             /* mark us unused / free shared pic */
1163             for(i=0; i<4; i++)
1164                 s->reordered_input_picture[0]->data[i]= NULL;
1165             s->reordered_input_picture[0]->type= 0;
1166             
1167             pic->pict_type = s->reordered_input_picture[0]->pict_type;
1168             pic->quality   = s->reordered_input_picture[0]->quality;
1169             pic->coded_picture_number = s->reordered_input_picture[0]->coded_picture_number;
1170             pic->reference = s->reordered_input_picture[0]->reference;
1171             
1172             alloc_picture(s, pic, 0);
1173
1174             s->current_picture= *pic;
1175         }else{
1176             assert(   s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_USER 
1177                    || s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL);
1178             
1179             s->new_picture= *s->reordered_input_picture[0];
1180
1181             for(i=0; i<4; i++){
1182                 s->reordered_input_picture[0]->data[i]-=16; //FIXME dirty
1183             }
1184             s->current_picture= *s->reordered_input_picture[0];
1185         }
1186     
1187         s->picture_number= s->new_picture.display_picture_number;
1188 //printf("dpn:%d\n", s->picture_number);
1189     }else{
1190        memset(&s->new_picture, 0, sizeof(Picture));
1191     }
1192 }
1193
1194 int MPV_encode_picture(AVCodecContext *avctx,
1195                        unsigned char *buf, int buf_size, void *data)
1196 {
1197     MpegEncContext *s = avctx->priv_data;
1198     AVFrame *pic_arg = data;
1199     int i;
1200
1201     init_put_bits(&s->pb, buf, buf_size, NULL, NULL);
1202
1203     s->picture_in_gop_number++;
1204
1205     load_input_picture(s, pic_arg);
1206     
1207     select_input_picture(s);
1208     
1209     /* output? */
1210     if(s->new_picture.data[0]){
1211
1212         s->pict_type= s->new_picture.pict_type;
1213         if (s->fixed_qscale){ /* the ratecontrol needs the last qscale so we dont touch it for CBR */
1214             s->qscale= (int)(s->new_picture.quality+0.5);
1215             assert(s->qscale);
1216         }
1217 //emms_c();
1218 //printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale);
1219         MPV_frame_start(s, avctx);
1220
1221         encode_picture(s, s->picture_number);
1222         
1223         avctx->real_pict_num  = s->picture_number;
1224         avctx->header_bits = s->header_bits;
1225         avctx->mv_bits     = s->mv_bits;
1226         avctx->misc_bits   = s->misc_bits;
1227         avctx->i_tex_bits  = s->i_tex_bits;
1228         avctx->p_tex_bits  = s->p_tex_bits;
1229         avctx->i_count     = s->i_count;
1230         avctx->p_count     = s->mb_num - s->i_count - s->skip_count; //FIXME f/b_count in avctx
1231         avctx->skip_count  = s->skip_count;
1232
1233         MPV_frame_end(s);
1234
1235         if (s->out_format == FMT_MJPEG)
1236             mjpeg_picture_trailer(s);
1237         
1238         if(s->flags&CODEC_FLAG_PASS1)
1239             ff_write_pass1_stats(s);
1240     }
1241
1242     s->input_picture_number++;
1243
1244     flush_put_bits(&s->pb);
1245     s->frame_bits  = (pbBufPtr(&s->pb) - s->pb.buf) * 8;
1246     
1247     s->total_bits += s->frame_bits;
1248     avctx->frame_bits  = s->frame_bits;
1249
1250     for(i=0; i<4; i++){
1251         avctx->error[i] += s->current_picture.error[i];
1252     }
1253     
1254     return pbBufPtr(&s->pb) - s->pb.buf;
1255 }
1256
1257 static inline void gmc1_motion(MpegEncContext *s,
1258                                UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
1259                                int dest_offset,
1260                                UINT8 **ref_picture, int src_offset)
1261 {
1262     UINT8 *ptr;
1263     int offset, src_x, src_y, linesize, uvlinesize;
1264     int motion_x, motion_y;
1265     int emu=0;
1266
1267     motion_x= s->sprite_offset[0][0];
1268     motion_y= s->sprite_offset[0][1];
1269     src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy+1));
1270     src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy+1));
1271     motion_x<<=(3-s->sprite_warping_accuracy);
1272     motion_y<<=(3-s->sprite_warping_accuracy);
1273     src_x = clip(src_x, -16, s->width);
1274     if (src_x == s->width)
1275         motion_x =0;
1276     src_y = clip(src_y, -16, s->height);
1277     if (src_y == s->height)
1278         motion_y =0;
1279
1280     linesize = s->linesize;
1281     uvlinesize = s->uvlinesize;
1282     
1283     ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset;
1284
1285     dest_y+=dest_offset;
1286     if(s->flags&CODEC_FLAG_EMU_EDGE){
1287         if(src_x<0 || src_y<0 || src_x + (motion_x&15) + 16 > s->h_edge_pos
1288                               || src_y + (motion_y&15) + 16 > s->v_edge_pos){
1289             ff_emulated_edge_mc(s, ptr, linesize, 17, 17, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
1290             ptr= s->edge_emu_buffer;
1291             emu=1;
1292         }
1293     }
1294     
1295     if((motion_x|motion_y)&7){
1296         s->dsp.gmc1(dest_y  , ptr  , linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
1297         s->dsp.gmc1(dest_y+8, ptr+8, linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
1298     }else{
1299         int dxy;
1300         
1301         dxy= ((motion_x>>3)&1) | ((motion_y>>2)&2);
1302         if (s->no_rounding){
1303             s->dsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
1304         }else{
1305             s->dsp.put_pixels_tab       [0][dxy](dest_y, ptr, linesize, 16);
1306         }
1307     }
1308     
1309     if(s->flags&CODEC_FLAG_GRAY) return;
1310
1311     motion_x= s->sprite_offset[1][0];
1312     motion_y= s->sprite_offset[1][1];
1313     src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy+1));
1314     src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy+1));
1315     motion_x<<=(3-s->sprite_warping_accuracy);
1316     motion_y<<=(3-s->sprite_warping_accuracy);
1317     src_x = clip(src_x, -8, s->width>>1);
1318     if (src_x == s->width>>1)
1319         motion_x =0;
1320     src_y = clip(src_y, -8, s->height>>1);
1321     if (src_y == s->height>>1)
1322         motion_y =0;
1323
1324     offset = (src_y * uvlinesize) + src_x + (src_offset>>1);
1325     ptr = ref_picture[1] + offset;
1326     if(emu){
1327         ff_emulated_edge_mc(s, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1328         ptr= s->edge_emu_buffer;
1329     }
1330     s->dsp.gmc1(dest_cb + (dest_offset>>1), ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
1331     
1332     ptr = ref_picture[2] + offset;
1333     if(emu){
1334         ff_emulated_edge_mc(s, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1335         ptr= s->edge_emu_buffer;
1336     }
1337     s->dsp.gmc1(dest_cr + (dest_offset>>1), ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
1338     
1339     return;
1340 }
1341
1342 static inline void gmc_motion(MpegEncContext *s,
1343                                UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
1344                                int dest_offset,
1345                                UINT8 **ref_picture, int src_offset)
1346 {
1347     UINT8 *ptr;
1348     int linesize, uvlinesize;
1349     const int a= s->sprite_warping_accuracy;
1350     int ox, oy;
1351
1352     linesize = s->linesize;
1353     uvlinesize = s->uvlinesize;
1354
1355     ptr = ref_picture[0] + src_offset;
1356
1357     dest_y+=dest_offset;
1358     
1359     ox= s->sprite_offset[0][0] + s->sprite_delta[0][0]*s->mb_x*16 + s->sprite_delta[0][1]*s->mb_y*16;
1360     oy= s->sprite_offset[0][1] + s->sprite_delta[1][0]*s->mb_x*16 + s->sprite_delta[1][1]*s->mb_y*16;
1361
1362     s->dsp.gmc(dest_y, ptr, linesize, 16,
1363            ox, 
1364            oy, 
1365            s->sprite_delta[0][0], s->sprite_delta[0][1],
1366            s->sprite_delta[1][0], s->sprite_delta[1][1], 
1367            a+1, (1<<(2*a+1)) - s->no_rounding,
1368            s->h_edge_pos, s->v_edge_pos);
1369     s->dsp.gmc(dest_y+8, ptr, linesize, 16,
1370            ox + s->sprite_delta[0][0]*8, 
1371            oy + s->sprite_delta[1][0]*8, 
1372            s->sprite_delta[0][0], s->sprite_delta[0][1],
1373            s->sprite_delta[1][0], s->sprite_delta[1][1], 
1374            a+1, (1<<(2*a+1)) - s->no_rounding,
1375            s->h_edge_pos, s->v_edge_pos);
1376
1377     if(s->flags&CODEC_FLAG_GRAY) return;
1378
1379
1380     dest_cb+=dest_offset>>1;
1381     dest_cr+=dest_offset>>1;
1382     
1383     ox= s->sprite_offset[1][0] + s->sprite_delta[0][0]*s->mb_x*8 + s->sprite_delta[0][1]*s->mb_y*8;
1384     oy= s->sprite_offset[1][1] + s->sprite_delta[1][0]*s->mb_x*8 + s->sprite_delta[1][1]*s->mb_y*8;
1385
1386     ptr = ref_picture[1] + (src_offset>>1);
1387     s->dsp.gmc(dest_cb, ptr, uvlinesize, 8,
1388            ox, 
1389            oy, 
1390            s->sprite_delta[0][0], s->sprite_delta[0][1],
1391            s->sprite_delta[1][0], s->sprite_delta[1][1], 
1392            a+1, (1<<(2*a+1)) - s->no_rounding,
1393            s->h_edge_pos>>1, s->v_edge_pos>>1);
1394     
1395     ptr = ref_picture[2] + (src_offset>>1);
1396     s->dsp.gmc(dest_cr, ptr, uvlinesize, 8,
1397            ox, 
1398            oy, 
1399            s->sprite_delta[0][0], s->sprite_delta[0][1],
1400            s->sprite_delta[1][0], s->sprite_delta[1][1], 
1401            a+1, (1<<(2*a+1)) - s->no_rounding,
1402            s->h_edge_pos>>1, s->v_edge_pos>>1);
1403 }
1404
1405
1406 void ff_emulated_edge_mc(MpegEncContext *s, UINT8 *src, int linesize, int block_w, int block_h, 
1407                                     int src_x, int src_y, int w, int h){
1408     int x, y;
1409     int start_y, start_x, end_y, end_x;
1410     UINT8 *buf= s->edge_emu_buffer;
1411
1412     if(src_y>= h){
1413         src+= (h-1-src_y)*linesize;
1414         src_y=h-1;
1415     }else if(src_y<=-block_h){
1416         src+= (1-block_h-src_y)*linesize;
1417         src_y=1-block_h;
1418     }
1419     if(src_x>= w){
1420         src+= (w-1-src_x);
1421         src_x=w-1;
1422     }else if(src_x<=-block_w){
1423         src+= (1-block_w-src_x);
1424         src_x=1-block_w;
1425     }
1426
1427     start_y= FFMAX(0, -src_y);
1428     start_x= FFMAX(0, -src_x);
1429     end_y= FFMIN(block_h, h-src_y);
1430     end_x= FFMIN(block_w, w-src_x);
1431
1432     // copy existing part
1433     for(y=start_y; y<end_y; y++){
1434         for(x=start_x; x<end_x; x++){
1435             buf[x + y*linesize]= src[x + y*linesize];
1436         }
1437     }
1438
1439     //top
1440     for(y=0; y<start_y; y++){
1441         for(x=start_x; x<end_x; x++){
1442             buf[x + y*linesize]= buf[x + start_y*linesize];
1443         }
1444     }
1445
1446     //bottom
1447     for(y=end_y; y<block_h; y++){
1448         for(x=start_x; x<end_x; x++){
1449             buf[x + y*linesize]= buf[x + (end_y-1)*linesize];
1450         }
1451     }
1452                                     
1453     for(y=0; y<block_h; y++){
1454        //left
1455         for(x=0; x<start_x; x++){
1456             buf[x + y*linesize]= buf[start_x + y*linesize];
1457         }
1458        
1459        //right
1460         for(x=end_x; x<block_w; x++){
1461             buf[x + y*linesize]= buf[end_x - 1 + y*linesize];
1462         }
1463     }
1464 }
1465
1466
1467 /* apply one mpeg motion vector to the three components */
1468 static inline void mpeg_motion(MpegEncContext *s,
1469                                UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
1470                                int dest_offset,
1471                                UINT8 **ref_picture, int src_offset,
1472                                int field_based, op_pixels_func (*pix_op)[4],
1473                                int motion_x, int motion_y, int h)
1474 {
1475     UINT8 *ptr;
1476     int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize;
1477     int emu=0;
1478 #if 0    
1479 if(s->quarter_sample)
1480 {
1481     motion_x>>=1;
1482     motion_y>>=1;
1483 }
1484 #endif
1485     dxy = ((motion_y & 1) << 1) | (motion_x & 1);
1486     src_x = s->mb_x * 16 + (motion_x >> 1);
1487     src_y = s->mb_y * (16 >> field_based) + (motion_y >> 1);
1488                 
1489     /* WARNING: do no forget half pels */
1490     height = s->height >> field_based;
1491     v_edge_pos = s->v_edge_pos >> field_based;
1492     src_x = clip(src_x, -16, s->width);
1493     if (src_x == s->width)
1494         dxy &= ~1;
1495     src_y = clip(src_y, -16, height);
1496     if (src_y == height)
1497         dxy &= ~2;
1498     linesize   = s->linesize << field_based;
1499     uvlinesize = s->uvlinesize << field_based;
1500     ptr = ref_picture[0] + (src_y * linesize) + (src_x) + src_offset;
1501     dest_y += dest_offset;
1502
1503     if(s->flags&CODEC_FLAG_EMU_EDGE){
1504         if(src_x<0 || src_y<0 || src_x + (motion_x&1) + 16 > s->h_edge_pos
1505                               || src_y + (motion_y&1) + h  > v_edge_pos){
1506             ff_emulated_edge_mc(s, ptr - src_offset, s->linesize, 17, 17+field_based, 
1507                              src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
1508             ptr= s->edge_emu_buffer + src_offset;
1509             emu=1;
1510         }
1511     }
1512     pix_op[0][dxy](dest_y, ptr, linesize, h);
1513
1514     if(s->flags&CODEC_FLAG_GRAY) return;
1515
1516     if (s->out_format == FMT_H263) {
1517         dxy = 0;
1518         if ((motion_x & 3) != 0)
1519             dxy |= 1;
1520         if ((motion_y & 3) != 0)
1521             dxy |= 2;
1522         mx = motion_x >> 2;
1523         my = motion_y >> 2;
1524     } else {
1525         mx = motion_x / 2;
1526         my = motion_y / 2;
1527         dxy = ((my & 1) << 1) | (mx & 1);
1528         mx >>= 1;
1529         my >>= 1;
1530     }
1531     
1532     src_x = s->mb_x * 8 + mx;
1533     src_y = s->mb_y * (8 >> field_based) + my;
1534     src_x = clip(src_x, -8, s->width >> 1);
1535     if (src_x == (s->width >> 1))
1536         dxy &= ~1;
1537     src_y = clip(src_y, -8, height >> 1);
1538     if (src_y == (height >> 1))
1539         dxy &= ~2;
1540     offset = (src_y * uvlinesize) + src_x + (src_offset >> 1);
1541     ptr = ref_picture[1] + offset;
1542     if(emu){
1543         ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based, 
1544                          src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
1545         ptr= s->edge_emu_buffer + (src_offset >> 1);
1546     }
1547     pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
1548
1549     ptr = ref_picture[2] + offset;
1550     if(emu){
1551         ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based, 
1552                          src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
1553         ptr= s->edge_emu_buffer + (src_offset >> 1);
1554     }
1555     pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
1556 }
1557
1558 static inline void qpel_motion(MpegEncContext *s,
1559                                UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
1560                                int dest_offset,
1561                                UINT8 **ref_picture, int src_offset,
1562                                int field_based, op_pixels_func (*pix_op)[4],
1563                                qpel_mc_func (*qpix_op)[16],
1564                                int motion_x, int motion_y, int h)
1565 {
1566     UINT8 *ptr;
1567     int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize;
1568     int emu=0;
1569
1570     dxy = ((motion_y & 3) << 2) | (motion_x & 3);
1571     src_x = s->mb_x * 16 + (motion_x >> 2);
1572     src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
1573
1574     height = s->height >> field_based;
1575     v_edge_pos = s->v_edge_pos >> field_based;
1576     src_x = clip(src_x, -16, s->width);
1577     if (src_x == s->width)
1578         dxy &= ~3;
1579     src_y = clip(src_y, -16, height);
1580     if (src_y == height)
1581         dxy &= ~12;
1582     linesize = s->linesize << field_based;
1583     uvlinesize = s->uvlinesize << field_based;
1584     ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset;
1585     dest_y += dest_offset;
1586 //printf("%d %d %d\n", src_x, src_y, dxy);
1587     
1588     if(s->flags&CODEC_FLAG_EMU_EDGE){
1589         if(src_x<0 || src_y<0 || src_x + (motion_x&3) + 16 > s->h_edge_pos
1590                               || src_y + (motion_y&3) + h  > v_edge_pos){
1591             ff_emulated_edge_mc(s, ptr - src_offset, s->linesize, 17, 17+field_based, 
1592                              src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
1593             ptr= s->edge_emu_buffer + src_offset;
1594             emu=1;
1595         }
1596     }
1597     if(!field_based)
1598         qpix_op[0][dxy](dest_y, ptr, linesize);
1599     else{
1600         //damn interlaced mode
1601         //FIXME boundary mirroring is not exactly correct here
1602         qpix_op[1][dxy](dest_y  , ptr  , linesize);
1603         qpix_op[1][dxy](dest_y+8, ptr+8, linesize);
1604     }
1605
1606     if(s->flags&CODEC_FLAG_GRAY) return;
1607
1608     if(field_based){
1609         mx= motion_x/2;
1610         my= motion_y>>1;
1611     }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA){
1612         mx= (motion_x>>1)|(motion_x&1);
1613         my= (motion_y>>1)|(motion_y&1);
1614     }else{
1615         mx= motion_x/2;
1616         my= motion_y/2;
1617     }
1618     mx= (mx>>1)|(mx&1);
1619     my= (my>>1)|(my&1);
1620     dxy= (mx&1) | ((my&1)<<1);
1621     mx>>=1;
1622     my>>=1;
1623
1624     src_x = s->mb_x * 8 + mx;
1625     src_y = s->mb_y * (8 >> field_based) + my;
1626     src_x = clip(src_x, -8, s->width >> 1);
1627     if (src_x == (s->width >> 1))
1628         dxy &= ~1;
1629     src_y = clip(src_y, -8, height >> 1);
1630     if (src_y == (height >> 1))
1631         dxy &= ~2;
1632
1633     offset = (src_y * uvlinesize) + src_x + (src_offset >> 1);
1634     ptr = ref_picture[1] + offset;
1635     if(emu){
1636         ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based, 
1637                          src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
1638         ptr= s->edge_emu_buffer + (src_offset >> 1);
1639     }
1640     pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr,  uvlinesize, h >> 1);
1641     
1642     ptr = ref_picture[2] + offset;
1643     if(emu){
1644         ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based, 
1645                          src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
1646         ptr= s->edge_emu_buffer + (src_offset >> 1);
1647     }
1648     pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr,  uvlinesize, h >> 1);
1649 }
1650
1651
1652 static inline void MPV_motion(MpegEncContext *s, 
1653                               UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
1654                               int dir, UINT8 **ref_picture, 
1655                               op_pixels_func (*pix_op)[4], qpel_mc_func (*qpix_op)[16])
1656 {
1657     int dxy, offset, mx, my, src_x, src_y, motion_x, motion_y;
1658     int mb_x, mb_y, i;
1659     UINT8 *ptr, *dest;
1660     int emu=0;
1661
1662     mb_x = s->mb_x;
1663     mb_y = s->mb_y;
1664
1665     switch(s->mv_type) {
1666     case MV_TYPE_16X16:
1667         if(s->mcsel){
1668             if(s->real_sprite_warping_points==1){
1669                 gmc1_motion(s, dest_y, dest_cb, dest_cr, 0,
1670                             ref_picture, 0);
1671             }else{
1672                 gmc_motion(s, dest_y, dest_cb, dest_cr, 0,
1673                             ref_picture, 0);
1674             }
1675         }else if(s->quarter_sample){
1676             qpel_motion(s, dest_y, dest_cb, dest_cr, 0,
1677                         ref_picture, 0,
1678                         0, pix_op, qpix_op,
1679                         s->mv[dir][0][0], s->mv[dir][0][1], 16);
1680         }else if(s->mspel){
1681             ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
1682                         ref_picture, pix_op,
1683                         s->mv[dir][0][0], s->mv[dir][0][1], 16);
1684         }else{
1685             mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
1686                         ref_picture, 0,
1687                         0, pix_op,
1688                         s->mv[dir][0][0], s->mv[dir][0][1], 16);
1689         }           
1690         break;
1691     case MV_TYPE_8X8:
1692         mx = 0;
1693         my = 0;
1694         if(s->quarter_sample){
1695             for(i=0;i<4;i++) {
1696                 motion_x = s->mv[dir][i][0];
1697                 motion_y = s->mv[dir][i][1];
1698
1699                 dxy = ((motion_y & 3) << 2) | (motion_x & 3);
1700                 src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
1701                 src_y = mb_y * 16 + (motion_y >> 2) + (i >>1) * 8;
1702                     
1703                 /* WARNING: do no forget half pels */
1704                 src_x = clip(src_x, -16, s->width);
1705                 if (src_x == s->width)
1706                     dxy &= ~3;
1707                 src_y = clip(src_y, -16, s->height);
1708                 if (src_y == s->height)
1709                     dxy &= ~12;
1710                     
1711                 ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
1712                 if(s->flags&CODEC_FLAG_EMU_EDGE){
1713                     if(src_x<0 || src_y<0 || src_x + (motion_x&3) + 8 > s->h_edge_pos
1714                                           || src_y + (motion_y&3) + 8 > s->v_edge_pos){
1715                         ff_emulated_edge_mc(s, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
1716                         ptr= s->edge_emu_buffer;
1717                     }
1718                 }
1719                 dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
1720                 qpix_op[1][dxy](dest, ptr, s->linesize);
1721
1722                 mx += s->mv[dir][i][0]/2;
1723                 my += s->mv[dir][i][1]/2;
1724             }
1725         }else{
1726             for(i=0;i<4;i++) {
1727                 motion_x = s->mv[dir][i][0];
1728                 motion_y = s->mv[dir][i][1];
1729
1730                 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
1731                 src_x = mb_x * 16 + (motion_x >> 1) + (i & 1) * 8;
1732                 src_y = mb_y * 16 + (motion_y >> 1) + (i >>1) * 8;
1733                     
1734                 /* WARNING: do no forget half pels */
1735                 src_x = clip(src_x, -16, s->width);
1736                 if (src_x == s->width)
1737                     dxy &= ~1;
1738                 src_y = clip(src_y, -16, s->height);
1739                 if (src_y == s->height)
1740                     dxy &= ~2;
1741                     
1742                 ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
1743                 if(s->flags&CODEC_FLAG_EMU_EDGE){
1744                     if(src_x<0 || src_y<0 || src_x + (motion_x&1) + 8 > s->h_edge_pos
1745                                           || src_y + (motion_y&1) + 8 > s->v_edge_pos){
1746                         ff_emulated_edge_mc(s, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
1747                         ptr= s->edge_emu_buffer;
1748                     }
1749                 }
1750                 dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
1751                 pix_op[1][dxy](dest, ptr, s->linesize, 8);
1752
1753                 mx += s->mv[dir][i][0];
1754                 my += s->mv[dir][i][1];
1755             }
1756         }
1757
1758         if(s->flags&CODEC_FLAG_GRAY) break;
1759         /* In case of 8X8, we construct a single chroma motion vector
1760            with a special rounding */
1761         for(i=0;i<4;i++) {
1762         }
1763         if (mx >= 0)
1764             mx = (h263_chroma_roundtab[mx & 0xf] + ((mx >> 3) & ~1));
1765         else {
1766             mx = -mx;
1767             mx = -(h263_chroma_roundtab[mx & 0xf] + ((mx >> 3) & ~1));
1768         }
1769         if (my >= 0)
1770             my = (h263_chroma_roundtab[my & 0xf] + ((my >> 3) & ~1));
1771         else {
1772             my = -my;
1773             my = -(h263_chroma_roundtab[my & 0xf] + ((my >> 3) & ~1));
1774         }
1775         dxy = ((my & 1) << 1) | (mx & 1);
1776         mx >>= 1;
1777         my >>= 1;
1778
1779         src_x = mb_x * 8 + mx;
1780         src_y = mb_y * 8 + my;
1781         src_x = clip(src_x, -8, s->width/2);
1782         if (src_x == s->width/2)
1783             dxy &= ~1;
1784         src_y = clip(src_y, -8, s->height/2);
1785         if (src_y == s->height/2)
1786             dxy &= ~2;
1787         
1788         offset = (src_y * (s->uvlinesize)) + src_x;
1789         ptr = ref_picture[1] + offset;
1790         if(s->flags&CODEC_FLAG_EMU_EDGE){
1791                 if(src_x<0 || src_y<0 || src_x + (dxy &1) + 8 > s->h_edge_pos>>1
1792                                       || src_y + (dxy>>1) + 8 > s->v_edge_pos>>1){
1793                     ff_emulated_edge_mc(s, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1794                     ptr= s->edge_emu_buffer;
1795                     emu=1;
1796                 }
1797             }
1798         pix_op[1][dxy](dest_cb, ptr, s->uvlinesize, 8);
1799
1800         ptr = ref_picture[2] + offset;
1801         if(emu){
1802             ff_emulated_edge_mc(s, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1803             ptr= s->edge_emu_buffer;
1804         }
1805         pix_op[1][dxy](dest_cr, ptr, s->uvlinesize, 8);
1806         break;
1807     case MV_TYPE_FIELD:
1808         if (s->picture_structure == PICT_FRAME) {
1809             if(s->quarter_sample){
1810                 /* top field */
1811                 qpel_motion(s, dest_y, dest_cb, dest_cr, 0,
1812                             ref_picture, s->field_select[dir][0] ? s->linesize : 0,
1813                             1, pix_op, qpix_op,
1814                             s->mv[dir][0][0], s->mv[dir][0][1], 8);
1815                 /* bottom field */
1816                 qpel_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
1817                             ref_picture, s->field_select[dir][1] ? s->linesize : 0,
1818                             1, pix_op, qpix_op,
1819                             s->mv[dir][1][0], s->mv[dir][1][1], 8);
1820             }else{
1821                 /* top field */       
1822                 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
1823                             ref_picture, s->field_select[dir][0] ? s->linesize : 0,
1824                             1, pix_op,
1825                             s->mv[dir][0][0], s->mv[dir][0][1], 8);
1826                 /* bottom field */
1827                 mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
1828                             ref_picture, s->field_select[dir][1] ? s->linesize : 0,
1829                             1, pix_op,
1830                             s->mv[dir][1][0], s->mv[dir][1][1], 8);
1831             }
1832         } else {
1833             
1834
1835         }
1836         break;
1837     }
1838 }
1839
1840
1841 /* put block[] to dest[] */
1842 static inline void put_dct(MpegEncContext *s, 
1843                            DCTELEM *block, int i, UINT8 *dest, int line_size)
1844 {
1845     s->dct_unquantize(s, block, i, s->qscale);
1846     s->idct_put (dest, line_size, block);
1847 }
1848
1849 /* add block[] to dest[] */
1850 static inline void add_dct(MpegEncContext *s, 
1851                            DCTELEM *block, int i, UINT8 *dest, int line_size)
1852 {
1853     if (s->block_last_index[i] >= 0) {
1854         s->idct_add (dest, line_size, block);
1855     }
1856 }
1857
1858 static inline void add_dequant_dct(MpegEncContext *s, 
1859                            DCTELEM *block, int i, UINT8 *dest, int line_size)
1860 {
1861     if (s->block_last_index[i] >= 0) {
1862         s->dct_unquantize(s, block, i, s->qscale);
1863
1864         s->idct_add (dest, line_size, block);
1865     }
1866 }
1867
1868 /**
1869  * cleans dc, ac, coded_block for the current non intra MB
1870  */
1871 void ff_clean_intra_table_entries(MpegEncContext *s)
1872 {
1873     int wrap = s->block_wrap[0];
1874     int xy = s->block_index[0];
1875     
1876     s->dc_val[0][xy           ] = 
1877     s->dc_val[0][xy + 1       ] = 
1878     s->dc_val[0][xy     + wrap] =
1879     s->dc_val[0][xy + 1 + wrap] = 1024;
1880     /* ac pred */
1881     memset(s->ac_val[0][xy       ], 0, 32 * sizeof(INT16));
1882     memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(INT16));
1883     if (s->msmpeg4_version>=3) {
1884         s->coded_block[xy           ] =
1885         s->coded_block[xy + 1       ] =
1886         s->coded_block[xy     + wrap] =
1887         s->coded_block[xy + 1 + wrap] = 0;
1888     }
1889     /* chroma */
1890     wrap = s->block_wrap[4];
1891     xy = s->mb_x + 1 + (s->mb_y + 1) * wrap;
1892     s->dc_val[1][xy] =
1893     s->dc_val[2][xy] = 1024;
1894     /* ac pred */
1895     memset(s->ac_val[1][xy], 0, 16 * sizeof(INT16));
1896     memset(s->ac_val[2][xy], 0, 16 * sizeof(INT16));
1897     
1898     s->mbintra_table[s->mb_x + s->mb_y*s->mb_width]= 0;
1899 }
1900
1901 /* generic function called after a macroblock has been parsed by the
1902    decoder or after it has been encoded by the encoder.
1903
1904    Important variables used:
1905    s->mb_intra : true if intra macroblock
1906    s->mv_dir   : motion vector direction
1907    s->mv_type  : motion vector type
1908    s->mv       : motion vector
1909    s->interlaced_dct : true if interlaced dct used (mpeg2)
1910  */
1911 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
1912 {
1913     int mb_x, mb_y;
1914     const int mb_xy = s->mb_y * s->mb_width + s->mb_x;
1915
1916     mb_x = s->mb_x;
1917     mb_y = s->mb_y;
1918
1919     s->current_picture.qscale_table[mb_xy]= s->qscale;
1920
1921     /* update DC predictors for P macroblocks */
1922     if (!s->mb_intra) {
1923         if (s->h263_pred || s->h263_aic) {
1924             if(s->mbintra_table[mb_xy])
1925                 ff_clean_intra_table_entries(s);
1926         } else {
1927             s->last_dc[0] =
1928             s->last_dc[1] =
1929             s->last_dc[2] = 128 << s->intra_dc_precision;
1930         }
1931     }
1932     else if (s->h263_pred || s->h263_aic)
1933         s->mbintra_table[mb_xy]=1;
1934
1935     /* update motion predictor, not for B-frames as they need the motion_val from the last P/S-Frame */
1936     if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE) { //FIXME move into h263.c if possible, format specific stuff shouldnt be here
1937         //FIXME a lot of thet is only needed for !low_delay
1938         const int wrap = s->block_wrap[0];
1939         const int xy = s->block_index[0];
1940         const int mb_index= s->mb_x + s->mb_y*s->mb_width;
1941         if(s->mv_type == MV_TYPE_8X8){
1942             s->co_located_type_table[mb_index]= CO_LOCATED_TYPE_4MV;
1943         } else {
1944             int motion_x, motion_y;
1945             if (s->mb_intra) {
1946                 motion_x = 0;
1947                 motion_y = 0;
1948                 if(s->co_located_type_table)
1949                     s->co_located_type_table[mb_index]= 0;
1950             } else if (s->mv_type == MV_TYPE_16X16) {
1951                 motion_x = s->mv[0][0][0];
1952                 motion_y = s->mv[0][0][1];
1953                 if(s->co_located_type_table)
1954                     s->co_located_type_table[mb_index]= 0;
1955             } else /*if (s->mv_type == MV_TYPE_FIELD)*/ {
1956                 int i;
1957                 motion_x = s->mv[0][0][0] + s->mv[0][1][0];
1958                 motion_y = s->mv[0][0][1] + s->mv[0][1][1];
1959                 motion_x = (motion_x>>1) | (motion_x&1);
1960                 for(i=0; i<2; i++){
1961                     s->field_mv_table[mb_index][i][0]= s->mv[0][i][0];
1962                     s->field_mv_table[mb_index][i][1]= s->mv[0][i][1];
1963                     s->field_select_table[mb_index][i]= s->field_select[0][i];
1964                 }
1965                 s->co_located_type_table[mb_index]= CO_LOCATED_TYPE_FIELDMV;
1966             }
1967             /* no update if 8X8 because it has been done during parsing */
1968             s->motion_val[xy][0] = motion_x;
1969             s->motion_val[xy][1] = motion_y;
1970             s->motion_val[xy + 1][0] = motion_x;
1971             s->motion_val[xy + 1][1] = motion_y;
1972             s->motion_val[xy + wrap][0] = motion_x;
1973             s->motion_val[xy + wrap][1] = motion_y;
1974             s->motion_val[xy + 1 + wrap][0] = motion_x;
1975             s->motion_val[xy + 1 + wrap][1] = motion_y;
1976         }
1977     }
1978     
1979     if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==B_TYPE))) { //FIXME precalc
1980         UINT8 *dest_y, *dest_cb, *dest_cr;
1981         int dct_linesize, dct_offset;
1982         op_pixels_func (*op_pix)[4];
1983         qpel_mc_func (*op_qpix)[16];
1984
1985         /* avoid copy if macroblock skipped in last frame too */
1986         if (s->pict_type != B_TYPE) {
1987             s->current_picture.mbskip_table[mb_xy]= s->mb_skiped;
1988         }
1989
1990         /* skip only during decoding as we might trash the buffers during encoding a bit */
1991         if(!s->encoding){
1992             UINT8 *mbskip_ptr = &s->mbskip_table[mb_xy];
1993             const int age= s->current_picture.age;
1994
1995             assert(age);
1996
1997             if (s->mb_skiped) {
1998                 s->mb_skiped= 0;
1999                 assert(s->pict_type!=I_TYPE);
2000  
2001                 (*mbskip_ptr) ++; /* indicate that this time we skiped it */
2002                 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2003
2004                 /* if previous was skipped too, then nothing to do !  */
2005                 if (*mbskip_ptr >= age){
2006 //if(s->pict_type!=B_TYPE && s->mb_x==0) printf("\n");
2007 //if(s->pict_type!=B_TYPE) printf("%d%d ", *mbskip_ptr, age);
2008                     if(s->pict_type!=B_TYPE) return;
2009                     if(s->avctx->draw_horiz_band==NULL && *mbskip_ptr > age) return; 
2010                     /* we dont draw complete frames here so we cant skip */
2011                 }
2012             } else {
2013                 *mbskip_ptr = 0; /* not skipped */
2014             }
2015         }else
2016             s->mb_skiped= 0;
2017
2018         if(s->pict_type==B_TYPE && s->avctx->draw_horiz_band){
2019             dest_y  = s->current_picture.data[0] + mb_x * 16;
2020             dest_cb = s->current_picture.data[1] + mb_x * 8;
2021             dest_cr = s->current_picture.data[2] + mb_x * 8;
2022         }else{
2023             dest_y  = s->current_picture.data[0] + (mb_y * 16* s->linesize  ) + mb_x * 16;
2024             dest_cb = s->current_picture.data[1] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
2025             dest_cr = s->current_picture.data[2] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
2026         }
2027
2028         if (s->interlaced_dct) {
2029             dct_linesize = s->linesize * 2;
2030             dct_offset = s->linesize;
2031         } else {
2032             dct_linesize = s->linesize;
2033             dct_offset = s->linesize * 8;
2034         }
2035
2036         if (!s->mb_intra) {
2037             /* motion handling */
2038             /* decoding or more than one mb_type (MC was allready done otherwise) */
2039             if((!s->encoding) || (s->mb_type[mb_xy]&(s->mb_type[mb_xy]-1))){
2040                 if ((!s->no_rounding) || s->pict_type==B_TYPE){                
2041                     op_pix = s->dsp.put_pixels_tab;
2042                     op_qpix= s->dsp.put_qpel_pixels_tab;
2043                 }else{
2044                     op_pix = s->dsp.put_no_rnd_pixels_tab;
2045                     op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab;
2046                 }
2047
2048                 if (s->mv_dir & MV_DIR_FORWARD) {
2049                     MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
2050                     op_pix = s->dsp.avg_pixels_tab;
2051                     op_qpix= s->dsp.avg_qpel_pixels_tab;
2052                 }
2053                 if (s->mv_dir & MV_DIR_BACKWARD) {
2054                     MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
2055                 }
2056             }
2057
2058             /* skip dequant / idct if we are really late ;) */
2059             if(s->hurry_up>1) return;
2060
2061             /* add dct residue */
2062             if(s->encoding || !(   s->mpeg2 || s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO 
2063                                 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2064                 add_dequant_dct(s, block[0], 0, dest_y, dct_linesize);
2065                 add_dequant_dct(s, block[1], 1, dest_y + 8, dct_linesize);
2066                 add_dequant_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
2067                 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
2068
2069                 if(!(s->flags&CODEC_FLAG_GRAY)){
2070                     add_dequant_dct(s, block[4], 4, dest_cb, s->uvlinesize);
2071                     add_dequant_dct(s, block[5], 5, dest_cr, s->uvlinesize);
2072                 }
2073             } else if(s->codec_id != CODEC_ID_WMV2){
2074                 add_dct(s, block[0], 0, dest_y, dct_linesize);
2075                 add_dct(s, block[1], 1, dest_y + 8, dct_linesize);
2076                 add_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
2077                 add_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
2078
2079                 if(!(s->flags&CODEC_FLAG_GRAY)){
2080                     add_dct(s, block[4], 4, dest_cb, s->uvlinesize);
2081                     add_dct(s, block[5], 5, dest_cr, s->uvlinesize);
2082                 }
2083             } else{
2084                 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2085             }
2086         } else {
2087             /* dct only in intra block */
2088             if(s->encoding || !(s->mpeg2 || s->codec_id==CODEC_ID_MPEG1VIDEO)){
2089                 put_dct(s, block[0], 0, dest_y, dct_linesize);
2090                 put_dct(s, block[1], 1, dest_y + 8, dct_linesize);
2091                 put_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
2092                 put_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
2093
2094                 if(!(s->flags&CODEC_FLAG_GRAY)){
2095                     put_dct(s, block[4], 4, dest_cb, s->uvlinesize);
2096                     put_dct(s, block[5], 5, dest_cr, s->uvlinesize);
2097                 }
2098             }else{
2099                 s->idct_put(dest_y                 , dct_linesize, block[0]);
2100                 s->idct_put(dest_y              + 8, dct_linesize, block[1]);
2101                 s->idct_put(dest_y + dct_offset    , dct_linesize, block[2]);
2102                 s->idct_put(dest_y + dct_offset + 8, dct_linesize, block[3]);
2103
2104                 if(!(s->flags&CODEC_FLAG_GRAY)){
2105                     s->idct_put(dest_cb, s->uvlinesize, block[4]);
2106                     s->idct_put(dest_cr, s->uvlinesize, block[5]);
2107                 }
2108             }
2109         }
2110     }
2111 }
2112
2113 static inline void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
2114 {
2115     static const char tab[64]=
2116         {3,2,2,1,1,1,1,1,
2117          1,1,1,1,1,1,1,1,
2118          1,1,1,1,1,1,1,1,
2119          0,0,0,0,0,0,0,0,
2120          0,0,0,0,0,0,0,0,
2121          0,0,0,0,0,0,0,0,
2122          0,0,0,0,0,0,0,0,
2123          0,0,0,0,0,0,0,0};
2124     int score=0;
2125     int run=0;
2126     int i;
2127     DCTELEM *block= s->block[n];
2128     const int last_index= s->block_last_index[n];
2129     int skip_dc;
2130
2131     if(threshold<0){
2132         skip_dc=0;
2133         threshold= -threshold;
2134     }else
2135         skip_dc=1;
2136
2137     /* are all which we could set to zero are allready zero? */
2138     if(last_index<=skip_dc - 1) return;
2139
2140     for(i=0; i<=last_index; i++){
2141         const int j = s->intra_scantable.permutated[i];
2142         const int level = ABS(block[j]);
2143         if(level==1){
2144             if(skip_dc && i==0) continue;
2145             score+= tab[run];
2146             run=0;
2147         }else if(level>1){
2148             return;
2149         }else{
2150             run++;
2151         }
2152     }
2153     if(score >= threshold) return;
2154     for(i=skip_dc; i<=last_index; i++){
2155         const int j = s->intra_scantable.permutated[i];
2156         block[j]=0;
2157     }
2158     if(block[0]) s->block_last_index[n]= 0;
2159     else         s->block_last_index[n]= -1;
2160 }
2161
2162 static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block, int last_index)
2163 {
2164     int i;
2165     const int maxlevel= s->max_qcoeff;
2166     const int minlevel= s->min_qcoeff;
2167     
2168     if(s->mb_intra){
2169         i=1; //skip clipping of intra dc
2170     }else
2171         i=0;
2172     
2173     for(;i<=last_index; i++){
2174         const int j= s->intra_scantable.permutated[i];
2175         int level = block[j];
2176        
2177         if     (level>maxlevel) level=maxlevel;
2178         else if(level<minlevel) level=minlevel;
2179         block[j]= level;
2180     }
2181 }
2182
2183 static inline void requantize_coeffs(MpegEncContext *s, DCTELEM block[64], int oldq, int newq, int n)
2184 {
2185     int i;
2186
2187     if(s->mb_intra){
2188         i=1; //skip clipping of intra dc
2189          //FIXME requantize, note (mpeg1/h263/h263p-aic dont need it,...)
2190     }else
2191         i=0;
2192     
2193     for(;i<=s->block_last_index[n]; i++){
2194         const int j = s->intra_scantable.permutated[i];
2195         int level = block[j];
2196         
2197         block[j]= ROUNDED_DIV(level*oldq, newq);
2198     }
2199
2200     for(i=s->block_last_index[n]; i>=0; i--){
2201         const int j = s->intra_scantable.permutated[i];
2202         if(block[j]) break;
2203     }
2204     s->block_last_index[n]= i;
2205 }
2206
2207 static inline void auto_requantize_coeffs(MpegEncContext *s, DCTELEM block[6][64])
2208 {
2209     int i,n, newq;
2210     const int maxlevel= s->max_qcoeff;
2211     const int minlevel= s->min_qcoeff;
2212     int largest=0, smallest=0;
2213
2214     assert(s->adaptive_quant);
2215     
2216     for(n=0; n<6; n++){
2217         if(s->mb_intra){
2218             i=1; //skip clipping of intra dc
2219              //FIXME requantize, note (mpeg1/h263/h263p-aic dont need it,...)
2220         }else
2221             i=0;
2222
2223         for(;i<=s->block_last_index[n]; i++){
2224             const int j = s->intra_scantable.permutated[i];
2225             int level = block[n][j];
2226             if(largest  < level) largest = level;
2227             if(smallest > level) smallest= level;
2228         }
2229     }
2230     
2231     for(newq=s->qscale+1; newq<32; newq++){
2232         if(   ROUNDED_DIV(smallest*s->qscale, newq) >= minlevel
2233            && ROUNDED_DIV(largest *s->qscale, newq) <= maxlevel) 
2234             break;
2235     }
2236         
2237     if(s->out_format==FMT_H263){
2238         /* h263 like formats cannot change qscale by more than 2 easiely */
2239         if(s->avctx->qmin + 2 < newq)
2240             newq= s->avctx->qmin + 2;
2241     }
2242
2243     for(n=0; n<6; n++){
2244         requantize_coeffs(s, block[n], s->qscale, newq, n);
2245         clip_coeffs(s, block[n], s->block_last_index[n]);
2246     }
2247      
2248     s->dquant+= newq - s->qscale;
2249     s->qscale= newq;
2250 }
2251 #if 0
2252 static int pix_vcmp16x8(UINT8 *s, int stride){ //FIXME move to dsputil & optimize
2253     int score=0;
2254     int x,y;
2255     
2256     for(y=0; y<7; y++){
2257         for(x=0; x<16; x+=4){
2258             score+= ABS(s[x  ] - s[x  +stride]) + ABS(s[x+1] - s[x+1+stride]) 
2259                    +ABS(s[x+2] - s[x+2+stride]) + ABS(s[x+3] - s[x+3+stride]);
2260         }
2261         s+= stride;
2262     }
2263     
2264     return score;
2265 }
2266
2267 static int pix_diff_vcmp16x8(UINT8 *s1, UINT8*s2, int stride){ //FIXME move to dsputil & optimize
2268     int score=0;
2269     int x,y;
2270     
2271     for(y=0; y<7; y++){
2272         for(x=0; x<16; x++){
2273             score+= ABS(s1[x  ] - s2[x ] - s1[x  +stride] + s2[x +stride]);
2274         }
2275         s1+= stride;
2276         s2+= stride;
2277     }
2278     
2279     return score;
2280 }
2281 #else
2282 #define SQ(a) ((a)*(a))
2283
2284 static int pix_vcmp16x8(UINT8 *s, int stride){ //FIXME move to dsputil & optimize
2285     int score=0;
2286     int x,y;
2287     
2288     for(y=0; y<7; y++){
2289         for(x=0; x<16; x+=4){
2290             score+= SQ(s[x  ] - s[x  +stride]) + SQ(s[x+1] - s[x+1+stride]) 
2291                    +SQ(s[x+2] - s[x+2+stride]) + SQ(s[x+3] - s[x+3+stride]);
2292         }
2293         s+= stride;
2294     }
2295     
2296     return score;
2297 }
2298
2299 static int pix_diff_vcmp16x8(UINT8 *s1, UINT8*s2, int stride){ //FIXME move to dsputil & optimize
2300     int score=0;
2301     int x,y;
2302     
2303     for(y=0; y<7; y++){
2304         for(x=0; x<16; x++){
2305             score+= SQ(s1[x  ] - s2[x ] - s1[x  +stride] + s2[x +stride]);
2306         }
2307         s1+= stride;
2308         s2+= stride;
2309     }
2310     
2311     return score;
2312 }
2313
2314 #endif
2315
2316 void ff_draw_horiz_band(MpegEncContext *s){
2317     if (    s->avctx->draw_horiz_band 
2318         && (s->last_picture.data[0] || s->low_delay) ) {
2319         UINT8 *src_ptr[3];
2320         int y, h, offset;
2321         y = s->mb_y * 16;
2322         h = s->height - y;
2323         if (h > 16)
2324             h = 16;
2325
2326         if(s->pict_type==B_TYPE)
2327             offset = 0;
2328         else
2329             offset = y * s->linesize;
2330
2331         if(s->pict_type==B_TYPE || s->low_delay){
2332             src_ptr[0] = s->current_picture.data[0] + offset;
2333             src_ptr[1] = s->current_picture.data[1] + (offset >> 2);
2334             src_ptr[2] = s->current_picture.data[2] + (offset >> 2);
2335         } else {
2336             src_ptr[0] = s->last_picture.data[0] + offset;
2337             src_ptr[1] = s->last_picture.data[1] + (offset >> 2);
2338             src_ptr[2] = s->last_picture.data[2] + (offset >> 2);
2339         }
2340         emms_c();
2341
2342         s->avctx->draw_horiz_band(s->avctx, src_ptr, s->linesize,
2343                                y, s->width, h);
2344     }
2345 }
2346
2347 static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2348 {
2349     const int mb_x= s->mb_x;
2350     const int mb_y= s->mb_y;
2351     int i;
2352     int skip_dct[6];
2353     int dct_offset   = s->linesize*8; //default for progressive frames
2354     
2355     for(i=0; i<6; i++) skip_dct[i]=0;
2356     
2357     if(s->adaptive_quant){
2358         s->dquant= s->current_picture.qscale_table[mb_x + mb_y*s->mb_width] - s->qscale;
2359
2360         if(s->out_format==FMT_H263){
2361             if     (s->dquant> 2) s->dquant= 2;
2362             else if(s->dquant<-2) s->dquant=-2;
2363         }
2364             
2365         if(s->codec_id==CODEC_ID_MPEG4){        
2366             if(!s->mb_intra){
2367                 assert(s->dquant==0 || s->mv_type!=MV_TYPE_8X8);
2368
2369                 if(s->mv_dir&MV_DIRECT)
2370                     s->dquant=0;
2371             }
2372         }
2373         s->qscale+= s->dquant;
2374         s->y_dc_scale= s->y_dc_scale_table[ s->qscale ];
2375         s->c_dc_scale= s->c_dc_scale_table[ s->qscale ];
2376     }
2377
2378     if (s->mb_intra) {
2379         UINT8 *ptr;
2380         int wrap_y;
2381         int emu=0;
2382
2383         wrap_y = s->linesize;
2384         ptr = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
2385
2386         if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
2387             ff_emulated_edge_mc(s, ptr, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
2388             ptr= s->edge_emu_buffer;
2389             emu=1;
2390         }
2391         
2392         if(s->flags&CODEC_FLAG_INTERLACED_DCT){
2393             int progressive_score, interlaced_score;
2394             
2395             progressive_score= pix_vcmp16x8(ptr, wrap_y  ) + pix_vcmp16x8(ptr + wrap_y*8, wrap_y );
2396             interlaced_score = pix_vcmp16x8(ptr, wrap_y*2) + pix_vcmp16x8(ptr + wrap_y  , wrap_y*2);
2397             
2398             if(progressive_score > interlaced_score + 100){
2399                 s->interlaced_dct=1;
2400             
2401                 dct_offset= wrap_y;
2402                 wrap_y<<=1;
2403             }else
2404                 s->interlaced_dct=0;
2405         }
2406         
2407         s->dsp.get_pixels(s->block[0], ptr                 , wrap_y);
2408         s->dsp.get_pixels(s->block[1], ptr              + 8, wrap_y);
2409         s->dsp.get_pixels(s->block[2], ptr + dct_offset    , wrap_y);
2410         s->dsp.get_pixels(s->block[3], ptr + dct_offset + 8, wrap_y);
2411
2412         if(s->flags&CODEC_FLAG_GRAY){
2413             skip_dct[4]= 1;
2414             skip_dct[5]= 1;
2415         }else{
2416             int wrap_c = s->uvlinesize;
2417             ptr = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8;
2418             if(emu){
2419                 ff_emulated_edge_mc(s, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2420                 ptr= s->edge_emu_buffer;
2421             }
2422             s->dsp.get_pixels(s->block[4], ptr, wrap_c);
2423
2424             ptr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8;
2425             if(emu){
2426                 ff_emulated_edge_mc(s, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2427                 ptr= s->edge_emu_buffer;
2428             }
2429             s->dsp.get_pixels(s->block[5], ptr, wrap_c);
2430         }
2431     }else{
2432         op_pixels_func (*op_pix)[4];
2433         qpel_mc_func (*op_qpix)[16];
2434         UINT8 *dest_y, *dest_cb, *dest_cr;
2435         UINT8 *ptr_y, *ptr_cb, *ptr_cr;
2436         int wrap_y, wrap_c;
2437         int emu=0;
2438
2439         dest_y  = s->current_picture.data[0] + (mb_y * 16 * s->linesize    ) + mb_x * 16;
2440         dest_cb = s->current_picture.data[1] + (mb_y * 8  * (s->uvlinesize)) + mb_x * 8;
2441         dest_cr = s->current_picture.data[2] + (mb_y * 8  * (s->uvlinesize)) + mb_x * 8;
2442         wrap_y = s->linesize;
2443         wrap_c = s->uvlinesize;
2444         ptr_y  = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
2445         ptr_cb = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8;
2446         ptr_cr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8;
2447
2448         if ((!s->no_rounding) || s->pict_type==B_TYPE){
2449             op_pix = s->dsp.put_pixels_tab;
2450             op_qpix= s->dsp.put_qpel_pixels_tab;
2451         }else{
2452             op_pix = s->dsp.put_no_rnd_pixels_tab;
2453             op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab;
2454         }
2455
2456         if (s->mv_dir & MV_DIR_FORWARD) {
2457             MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
2458             op_pix = s->dsp.avg_pixels_tab;
2459             op_qpix= s->dsp.avg_qpel_pixels_tab;
2460         }
2461         if (s->mv_dir & MV_DIR_BACKWARD) {
2462             MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
2463         }
2464
2465         if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
2466             ff_emulated_edge_mc(s, ptr_y, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
2467             ptr_y= s->edge_emu_buffer;
2468             emu=1;
2469         }
2470         
2471         if(s->flags&CODEC_FLAG_INTERLACED_DCT){
2472             int progressive_score, interlaced_score;
2473             
2474             progressive_score= pix_diff_vcmp16x8(ptr_y           , dest_y           , wrap_y  ) 
2475                              + pix_diff_vcmp16x8(ptr_y + wrap_y*8, dest_y + wrap_y*8, wrap_y  );
2476             interlaced_score = pix_diff_vcmp16x8(ptr_y           , dest_y           , wrap_y*2)
2477                              + pix_diff_vcmp16x8(ptr_y + wrap_y  , dest_y + wrap_y  , wrap_y*2);
2478             
2479             if(progressive_score > interlaced_score + 600){
2480                 s->interlaced_dct=1;
2481             
2482                 dct_offset= wrap_y;
2483                 wrap_y<<=1;
2484             }else
2485                 s->interlaced_dct=0;
2486         }
2487         
2488         s->dsp.diff_pixels(s->block[0], ptr_y                 , dest_y                 , wrap_y);
2489         s->dsp.diff_pixels(s->block[1], ptr_y              + 8, dest_y              + 8, wrap_y);
2490         s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset    , dest_y + dct_offset    , wrap_y);
2491         s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8, dest_y + dct_offset + 8, wrap_y);
2492         
2493         if(s->flags&CODEC_FLAG_GRAY){
2494             skip_dct[4]= 1;
2495             skip_dct[5]= 1;
2496         }else{
2497             if(emu){
2498                 ff_emulated_edge_mc(s, ptr_cb, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2499                 ptr_cb= s->edge_emu_buffer;
2500             }
2501             s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2502             if(emu){
2503                 ff_emulated_edge_mc(s, ptr_cr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2504                 ptr_cr= s->edge_emu_buffer;
2505             }
2506             s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2507         }
2508         /* pre quantization */         
2509         if(s->current_picture.mc_mb_var[s->mb_width*mb_y+ mb_x]<2*s->qscale*s->qscale){
2510             //FIXME optimize
2511             if(s->dsp.pix_abs8x8(ptr_y               , dest_y               , wrap_y) < 20*s->qscale) skip_dct[0]= 1;
2512             if(s->dsp.pix_abs8x8(ptr_y            + 8, dest_y            + 8, wrap_y) < 20*s->qscale) skip_dct[1]= 1;
2513             if(s->dsp.pix_abs8x8(ptr_y +dct_offset   , dest_y +dct_offset   , wrap_y) < 20*s->qscale) skip_dct[2]= 1;
2514             if(s->dsp.pix_abs8x8(ptr_y +dct_offset+ 8, dest_y +dct_offset+ 8, wrap_y) < 20*s->qscale) skip_dct[3]= 1;
2515             if(s->dsp.pix_abs8x8(ptr_cb              , dest_cb              , wrap_c) < 20*s->qscale) skip_dct[4]= 1;
2516             if(s->dsp.pix_abs8x8(ptr_cr              , dest_cr              , wrap_c) < 20*s->qscale) skip_dct[5]= 1;
2517 #if 0
2518 {
2519  static int stat[7];
2520  int num=0;
2521  for(i=0; i<6; i++)
2522   if(skip_dct[i]) num++;
2523  stat[num]++;
2524  
2525  if(s->mb_x==0 && s->mb_y==0){
2526   for(i=0; i<7; i++){
2527    printf("%6d %1d\n", stat[i], i);
2528   }
2529  }
2530 }
2531 #endif
2532         }
2533
2534     }
2535             
2536 #if 0
2537             {
2538                 float adap_parm;
2539                 
2540                 adap_parm = ((s->avg_mb_var << 1) + s->mb_var[s->mb_width*mb_y+mb_x] + 1.0) /
2541                             ((s->mb_var[s->mb_width*mb_y+mb_x] << 1) + s->avg_mb_var + 1.0);
2542             
2543                 printf("\ntype=%c qscale=%2d adap=%0.2f dquant=%4.2f var=%4d avgvar=%4d", 
2544                         (s->mb_type[s->mb_width*mb_y+mb_x] > 0) ? 'I' : 'P', 
2545                         s->qscale, adap_parm, s->qscale*adap_parm,
2546                         s->mb_var[s->mb_width*mb_y+mb_x], s->avg_mb_var);
2547             }
2548 #endif
2549     /* DCT & quantize */
2550     if(s->out_format==FMT_MJPEG){
2551         for(i=0;i<6;i++) {
2552             int overflow;
2553             s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, 8, &overflow);
2554             if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
2555         }
2556     }else{
2557         for(i=0;i<6;i++) {
2558             if(!skip_dct[i]){
2559                 int overflow;
2560                 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2561             // FIXME we could decide to change to quantizer instead of clipping
2562             // JS: I don't think that would be a good idea it could lower quality instead
2563             //     of improve it. Just INTRADC clipping deserves changes in quantizer
2564                 if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
2565             }else
2566                 s->block_last_index[i]= -1;
2567         }
2568         if(s->luma_elim_threshold && !s->mb_intra)
2569             for(i=0; i<4; i++)
2570                 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2571         if(s->chroma_elim_threshold && !s->mb_intra)
2572             for(i=4; i<6; i++)
2573                 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2574     }
2575
2576     if((s->flags&CODEC_FLAG_GRAY) && s->mb_intra){
2577         s->block_last_index[4]=
2578         s->block_last_index[5]= 0;
2579         s->block[4][0]=
2580         s->block[5][0]= 128;
2581     }
2582
2583 #ifdef CONFIG_ENCODERS
2584     /* huffman encode */
2585     switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2586     case CODEC_ID_MPEG1VIDEO:
2587         mpeg1_encode_mb(s, s->block, motion_x, motion_y); break;
2588     case CODEC_ID_MPEG4:
2589         mpeg4_encode_mb(s, s->block, motion_x, motion_y); break;
2590     case CODEC_ID_MSMPEG4V2:
2591     case CODEC_ID_MSMPEG4V3:
2592     case CODEC_ID_WMV1:
2593         msmpeg4_encode_mb(s, s->block, motion_x, motion_y); break;
2594     case CODEC_ID_WMV2:
2595          ff_wmv2_encode_mb(s, s->block, motion_x, motion_y); break;
2596     case CODEC_ID_MJPEG:
2597         mjpeg_encode_mb(s, s->block); break;
2598     case CODEC_ID_H263:
2599     case CODEC_ID_H263P:
2600     case CODEC_ID_RV10:
2601         h263_encode_mb(s, s->block, motion_x, motion_y); break;
2602     default:
2603         assert(0);
2604     }
2605 #endif
2606 }
2607
2608 void ff_copy_bits(PutBitContext *pb, UINT8 *src, int length)
2609 {
2610     int bytes= length>>4;
2611     int bits= length&15;
2612     int i;
2613
2614     if(length==0) return;
2615
2616     for(i=0; i<bytes; i++) put_bits(pb, 16, be2me_16(((uint16_t*)src)[i]));
2617     put_bits(pb, bits, be2me_16(((uint16_t*)src)[i])>>(16-bits));
2618 }
2619
2620 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2621     int i;
2622
2623     memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop?
2624
2625     /* mpeg1 */
2626     d->mb_incr= s->mb_incr;
2627     for(i=0; i<3; i++)
2628         d->last_dc[i]= s->last_dc[i];
2629     
2630     /* statistics */
2631     d->mv_bits= s->mv_bits;
2632     d->i_tex_bits= s->i_tex_bits;
2633     d->p_tex_bits= s->p_tex_bits;
2634     d->i_count= s->i_count;
2635     d->f_count= s->f_count;
2636     d->b_count= s->b_count;
2637     d->skip_count= s->skip_count;
2638     d->misc_bits= s->misc_bits;
2639     d->last_bits= 0;
2640
2641     d->mb_skiped= s->mb_skiped;
2642     d->qscale= s->qscale;
2643 }
2644
2645 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2646     int i;
2647
2648     memcpy(d->mv, s->mv, 2*4*2*sizeof(int)); 
2649     memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop?
2650     
2651     /* mpeg1 */
2652     d->mb_incr= s->mb_incr;
2653     for(i=0; i<3; i++)
2654         d->last_dc[i]= s->last_dc[i];
2655     
2656     /* statistics */
2657     d->mv_bits= s->mv_bits;
2658     d->i_tex_bits= s->i_tex_bits;
2659     d->p_tex_bits= s->p_tex_bits;
2660     d->i_count= s->i_count;
2661     d->f_count= s->f_count;
2662     d->b_count= s->b_count;
2663     d->skip_count= s->skip_count;
2664     d->misc_bits= s->misc_bits;
2665
2666     d->mb_intra= s->mb_intra;
2667     d->mb_skiped= s->mb_skiped;
2668     d->mv_type= s->mv_type;
2669     d->mv_dir= s->mv_dir;
2670     d->pb= s->pb;
2671     if(s->data_partitioning){
2672         d->pb2= s->pb2;
2673         d->tex_pb= s->tex_pb;
2674     }
2675     d->block= s->block;
2676     for(i=0; i<6; i++)
2677         d->block_last_index[i]= s->block_last_index[i];
2678     d->interlaced_dct= s->interlaced_dct;
2679     d->qscale= s->qscale;
2680 }
2681
2682 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type, 
2683                            PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2684                            int *dmin, int *next_block, int motion_x, int motion_y)
2685 {
2686     int bits_count;
2687     
2688     copy_context_before_encode(s, backup, type);
2689
2690     s->block= s->blocks[*next_block];
2691     s->pb= pb[*next_block];
2692     if(s->data_partitioning){
2693         s->pb2   = pb2   [*next_block];
2694         s->tex_pb= tex_pb[*next_block];
2695     }
2696
2697     encode_mb(s, motion_x, motion_y);
2698
2699     bits_count= get_bit_count(&s->pb);
2700     if(s->data_partitioning){
2701         bits_count+= get_bit_count(&s->pb2);
2702         bits_count+= get_bit_count(&s->tex_pb);
2703     }
2704
2705     if(bits_count<*dmin){
2706         *dmin= bits_count;
2707         *next_block^=1;
2708
2709         copy_context_after_encode(best, s, type);
2710     }
2711 }
2712                 
2713 static inline int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2714     uint32_t *sq = squareTbl + 256;
2715     int acc=0;
2716     int x,y;
2717     
2718     if(w==16 && h==16) 
2719         return s->dsp.sse[0](NULL, src1, src2, stride);
2720     else if(w==8 && h==8)
2721         return s->dsp.sse[1](NULL, src1, src2, stride);
2722     
2723     for(y=0; y<h; y++){
2724         for(x=0; x<w; x++){
2725             acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2726         } 
2727     }
2728     
2729     assert(acc>=0);
2730     
2731     return acc;
2732 }
2733
2734 static void encode_picture(MpegEncContext *s, int picture_number)
2735 {
2736     int mb_x, mb_y, pdif = 0;
2737     int i;
2738     int bits;
2739     MpegEncContext best_s, backup_s;
2740     UINT8 bit_buf[2][3000];
2741     UINT8 bit_buf2[2][3000];
2742     UINT8 bit_buf_tex[2][3000];
2743     PutBitContext pb[2], pb2[2], tex_pb[2];
2744
2745     for(i=0; i<2; i++){
2746         init_put_bits(&pb    [i], bit_buf    [i], 3000, NULL, NULL);
2747         init_put_bits(&pb2   [i], bit_buf2   [i], 3000, NULL, NULL);
2748         init_put_bits(&tex_pb[i], bit_buf_tex[i], 3000, NULL, NULL);
2749     }
2750
2751     s->picture_number = picture_number;
2752
2753     s->block_wrap[0]=
2754     s->block_wrap[1]=
2755     s->block_wrap[2]=
2756     s->block_wrap[3]= s->mb_width*2 + 2;
2757     s->block_wrap[4]=
2758     s->block_wrap[5]= s->mb_width + 2;
2759     
2760     /* Reset the average MB variance */
2761     s->current_picture.mb_var_sum = 0;
2762     s->current_picture.mc_mb_var_sum = 0;
2763
2764     /* we need to initialize some time vars before we can encode b-frames */
2765     if (s->h263_pred && !s->h263_msmpeg4)
2766         ff_set_mpeg4_time(s, s->picture_number); 
2767
2768     s->scene_change_score=0;
2769     
2770     s->qscale= (int)(s->frame_qscale + 0.5); //FIXME qscale / ... stuff for ME ratedistoration
2771     
2772     if(s->msmpeg4_version){
2773         if(s->pict_type==I_TYPE)
2774             s->no_rounding=1;
2775         else if(s->flipflop_rounding)
2776             s->no_rounding ^= 1;          
2777     }else{
2778         if(s->pict_type==I_TYPE)
2779             s->no_rounding=0;
2780         else if(s->pict_type!=B_TYPE)
2781             s->no_rounding ^= 1;          
2782     }
2783
2784     /* Estimate motion for every MB */
2785     if(s->pict_type != I_TYPE){
2786         for(mb_y=0; mb_y < s->mb_height; mb_y++) {
2787             s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1;
2788             s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1);
2789             s->block_index[2]= s->block_wrap[0]*(mb_y*2 + 2) - 1;
2790             s->block_index[3]= s->block_wrap[0]*(mb_y*2 + 2);
2791             for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2792                 s->mb_x = mb_x;
2793                 s->mb_y = mb_y;
2794                 s->block_index[0]+=2;
2795                 s->block_index[1]+=2;
2796                 s->block_index[2]+=2;
2797                 s->block_index[3]+=2;
2798
2799                 /* compute motion vector & mb_type and store in context */
2800                 if(s->pict_type==B_TYPE)
2801                     ff_estimate_b_frame_motion(s, mb_x, mb_y);
2802                 else
2803                     ff_estimate_p_frame_motion(s, mb_x, mb_y);
2804             }
2805         }
2806     }else /* if(s->pict_type == I_TYPE) */{
2807         /* I-Frame */
2808         //FIXME do we need to zero them?
2809         memset(s->motion_val[0], 0, sizeof(INT16)*(s->mb_width*2 + 2)*(s->mb_height*2 + 2)*2);
2810         memset(s->p_mv_table   , 0, sizeof(INT16)*(s->mb_width+2)*(s->mb_height+2)*2);
2811         memset(s->mb_type      , MB_TYPE_INTRA, sizeof(UINT8)*s->mb_width*s->mb_height);
2812         
2813         if(!s->fixed_qscale){
2814             /* finding spatial complexity for I-frame rate control */
2815             for(mb_y=0; mb_y < s->mb_height; mb_y++) {
2816                 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2817                     int xx = mb_x * 16;
2818                     int yy = mb_y * 16;
2819                     uint8_t *pix = s->new_picture.data[0] + (yy * s->linesize) + xx;
2820                     int varc;
2821                     int sum = s->dsp.pix_sum(pix, s->linesize);
2822     
2823                     varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)(sum*sum))>>8) + 500 + 128)>>8;
2824
2825                     s->current_picture.mb_var [s->mb_width * mb_y + mb_x] = varc;
2826                     s->current_picture.mb_mean[s->mb_width * mb_y + mb_x] = (sum+128)>>8;
2827                     s->current_picture.mb_var_sum    += varc;
2828                 }
2829             }
2830         }
2831     }
2832     emms_c();
2833
2834     if(s->scene_change_score > 0 && s->pict_type == P_TYPE){
2835         s->pict_type= I_TYPE;
2836         memset(s->mb_type   , MB_TYPE_INTRA, sizeof(UINT8)*s->mb_width*s->mb_height);
2837 //printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
2838     }
2839
2840     if(s->pict_type==P_TYPE || s->pict_type==S_TYPE) 
2841         s->f_code= ff_get_best_fcode(s, s->p_mv_table, MB_TYPE_INTER);
2842         ff_fix_long_p_mvs(s);
2843     if(s->pict_type==B_TYPE){
2844         s->f_code= ff_get_best_fcode(s, s->b_forw_mv_table, MB_TYPE_FORWARD);
2845         s->b_code= ff_get_best_fcode(s, s->b_back_mv_table, MB_TYPE_BACKWARD);
2846
2847         ff_fix_long_b_mvs(s, s->b_forw_mv_table, s->f_code, MB_TYPE_FORWARD);
2848         ff_fix_long_b_mvs(s, s->b_back_mv_table, s->b_code, MB_TYPE_BACKWARD);
2849         ff_fix_long_b_mvs(s, s->b_bidir_forw_mv_table, s->f_code, MB_TYPE_BIDIR);
2850         ff_fix_long_b_mvs(s, s->b_bidir_back_mv_table, s->b_code, MB_TYPE_BIDIR);
2851     }
2852     
2853     if (s->fixed_qscale) 
2854         s->frame_qscale = s->current_picture.quality;
2855     else
2856         s->frame_qscale = ff_rate_estimate_qscale(s);
2857
2858     if(s->adaptive_quant){
2859         switch(s->codec_id){
2860         case CODEC_ID_MPEG4:
2861             ff_clean_mpeg4_qscales(s);
2862             break;
2863         case CODEC_ID_H263:
2864         case CODEC_ID_H263P:
2865             ff_clean_h263_qscales(s);
2866             break;
2867         }
2868
2869         s->qscale= s->current_picture.qscale_table[0];
2870     }else
2871         s->qscale= (int)(s->frame_qscale + 0.5);
2872         
2873     if (s->out_format == FMT_MJPEG) {
2874         /* for mjpeg, we do include qscale in the matrix */
2875         s->intra_matrix[0] = ff_mpeg1_default_intra_matrix[0];
2876         for(i=1;i<64;i++){
2877             int j= s->idct_permutation[i];
2878
2879             s->intra_matrix[j] = CLAMP_TO_8BIT((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
2880         }
2881         convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16, 
2882                        s->q_intra_matrix16_bias, s->intra_matrix, s->intra_quant_bias, 8, 8);
2883     }
2884     
2885     //FIXME var duplication
2886     s->current_picture.key_frame= s->pict_type == I_TYPE;
2887     s->current_picture.pict_type= s->pict_type;
2888
2889     if(s->current_picture.key_frame)
2890         s->picture_in_gop_number=0;
2891
2892     s->last_bits= get_bit_count(&s->pb);
2893     switch(s->out_format) {
2894     case FMT_MJPEG:
2895         mjpeg_picture_header(s);
2896         break;
2897     case FMT_H263:
2898         if (s->codec_id == CODEC_ID_WMV2) 
2899             ff_wmv2_encode_picture_header(s, picture_number);
2900         else if (s->h263_msmpeg4) 
2901             msmpeg4_encode_picture_header(s, picture_number);
2902         else if (s->h263_pred)
2903             mpeg4_encode_picture_header(s, picture_number);
2904         else if (s->h263_rv10) 
2905             rv10_encode_picture_header(s, picture_number);
2906         else
2907             h263_encode_picture_header(s, picture_number);
2908         break;
2909     case FMT_MPEG1:
2910         mpeg1_encode_picture_header(s, picture_number);
2911         break;
2912     }
2913     bits= get_bit_count(&s->pb);
2914     s->header_bits= bits - s->last_bits;
2915     s->last_bits= bits;
2916     s->mv_bits=0;
2917     s->misc_bits=0;
2918     s->i_tex_bits=0;
2919     s->p_tex_bits=0;
2920     s->i_count=0;
2921     s->f_count=0;
2922     s->b_count=0;
2923     s->skip_count=0;
2924
2925     for(i=0; i<3; i++){
2926         /* init last dc values */
2927         /* note: quant matrix value (8) is implied here */
2928         s->last_dc[i] = 128;
2929         
2930         s->current_picture.error[i] = 0;
2931     }
2932     s->mb_incr = 1;
2933     s->last_mv[0][0][0] = 0;
2934     s->last_mv[0][0][1] = 0;
2935
2936     if (s->codec_id==CODEC_ID_H263 || s->codec_id==CODEC_ID_H263P)
2937         s->gob_index = ff_h263_get_gob_height(s);
2938
2939     if(s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame)
2940         ff_mpeg4_init_partitions(s);
2941
2942     s->resync_mb_x=0;
2943     s->resync_mb_y=0;
2944     s->first_slice_line = 1;
2945     s->ptr_lastgob = s->pb.buf;
2946     s->ptr_last_mb_line = s->pb.buf;
2947     for(mb_y=0; mb_y < s->mb_height; mb_y++) {
2948         s->y_dc_scale= s->y_dc_scale_table[ s->qscale ];
2949         s->c_dc_scale= s->c_dc_scale_table[ s->qscale ];
2950         
2951         s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1;
2952         s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1);
2953         s->block_index[2]= s->block_wrap[0]*(mb_y*2 + 2) - 1;
2954         s->block_index[3]= s->block_wrap[0]*(mb_y*2 + 2);
2955         s->block_index[4]= s->block_wrap[4]*(mb_y + 1)                    + s->block_wrap[0]*(s->mb_height*2 + 2);
2956         s->block_index[5]= s->block_wrap[4]*(mb_y + 1 + s->mb_height + 2) + s->block_wrap[0]*(s->mb_height*2 + 2);
2957         for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2958             const int mb_type= s->mb_type[mb_y * s->mb_width + mb_x];
2959             const int xy= (mb_y+1) * (s->mb_width+2) + mb_x + 1;
2960 //            int d;
2961             int dmin=10000000;
2962
2963             s->mb_x = mb_x;
2964             s->mb_y = mb_y;
2965             s->block_index[0]+=2;
2966             s->block_index[1]+=2;
2967             s->block_index[2]+=2;
2968             s->block_index[3]+=2;
2969             s->block_index[4]++;
2970             s->block_index[5]++;
2971
2972             /* write gob / video packet header  */
2973             if(s->rtp_mode){
2974                 int current_packet_size, is_gob_start;
2975                 
2976                 current_packet_size= pbBufPtr(&s->pb) - s->ptr_lastgob;
2977                 is_gob_start=0;
2978                 
2979                 if(s->codec_id==CODEC_ID_MPEG4){
2980                     if(current_packet_size + s->mb_line_avgsize/s->mb_width >= s->rtp_payload_size
2981                        && s->mb_y + s->mb_x>0){
2982
2983                         if(s->partitioned_frame){
2984                             ff_mpeg4_merge_partitions(s);
2985                             ff_mpeg4_init_partitions(s);
2986                         }
2987                         ff_mpeg4_encode_video_packet_header(s);
2988
2989                         if(s->flags&CODEC_FLAG_PASS1){
2990                             int bits= get_bit_count(&s->pb);
2991                             s->misc_bits+= bits - s->last_bits;
2992                             s->last_bits= bits;
2993                         }
2994                         ff_mpeg4_clean_buffers(s);
2995                         is_gob_start=1;
2996                     }
2997                 }else{
2998                     if(current_packet_size + s->mb_line_avgsize*s->gob_index >= s->rtp_payload_size
2999                        && s->mb_x==0 && s->mb_y>0 && s->mb_y%s->gob_index==0){
3000                        
3001                         h263_encode_gob_header(s, mb_y);                       
3002                         is_gob_start=1;
3003                     }
3004                 }
3005
3006                 if(is_gob_start){
3007                     s->ptr_lastgob = pbBufPtr(&s->pb);
3008                     s->first_slice_line=1;
3009                     s->resync_mb_x=mb_x;
3010                     s->resync_mb_y=mb_y;
3011                 }
3012             }
3013
3014             if(  (s->resync_mb_x   == s->mb_x)
3015                && s->resync_mb_y+1 == s->mb_y){
3016                 s->first_slice_line=0; 
3017             }
3018
3019             if(mb_type & (mb_type-1)){ // more than 1 MB type possible
3020                 int next_block=0;
3021                 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3022
3023                 copy_context_before_encode(&backup_s, s, -1);
3024                 backup_s.pb= s->pb;
3025                 best_s.data_partitioning= s->data_partitioning;
3026                 best_s.partitioned_frame= s->partitioned_frame;
3027                 if(s->data_partitioning){
3028                     backup_s.pb2= s->pb2;
3029                     backup_s.tex_pb= s->tex_pb;
3030                 }
3031
3032                 if(mb_type&MB_TYPE_INTER){
3033                     s->mv_dir = MV_DIR_FORWARD;
3034                     s->mv_type = MV_TYPE_16X16;
3035                     s->mb_intra= 0;
3036                     s->mv[0][0][0] = s->p_mv_table[xy][0];
3037                     s->mv[0][0][1] = s->p_mv_table[xy][1];
3038                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER, pb, pb2, tex_pb, 
3039                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3040                 }
3041                 if(mb_type&MB_TYPE_INTER4V){                 
3042                     s->mv_dir = MV_DIR_FORWARD;
3043                     s->mv_type = MV_TYPE_8X8;
3044                     s->mb_intra= 0;
3045                     for(i=0; i<4; i++){
3046                         s->mv[0][i][0] = s->motion_val[s->block_index[i]][0];
3047                         s->mv[0][i][1] = s->motion_val[s->block_index[i]][1];
3048                     }
3049                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER4V, pb, pb2, tex_pb, 
3050                                  &dmin, &next_block, 0, 0);
3051                 }
3052                 if(mb_type&MB_TYPE_FORWARD){
3053                     s->mv_dir = MV_DIR_FORWARD;
3054                     s->mv_type = MV_TYPE_16X16;
3055                     s->mb_intra= 0;
3056                     s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3057                     s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3058                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_FORWARD, pb, pb2, tex_pb, 
3059                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3060                 }
3061                 if(mb_type&MB_TYPE_BACKWARD){
3062                     s->mv_dir = MV_DIR_BACKWARD;
3063                     s->mv_type = MV_TYPE_16X16;
3064                     s->mb_intra= 0;
3065                     s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3066                     s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3067                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BACKWARD, pb, pb2, tex_pb, 
3068                                  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3069                 }
3070                 if(mb_type&MB_TYPE_BIDIR){
3071                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3072                     s->mv_type = MV_TYPE_16X16;
3073                     s->mb_intra= 0;
3074                     s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3075                     s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3076                     s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3077                     s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3078                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BIDIR, pb, pb2, tex_pb, 
3079                                  &dmin, &next_block, 0, 0);
3080                 }
3081                 if(mb_type&MB_TYPE_DIRECT){
3082                     int mx= s->b_direct_mv_table[xy][0];
3083                     int my= s->b_direct_mv_table[xy][1];
3084                     
3085                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3086                     s->mb_intra= 0;
3087                     ff_mpeg4_set_direct_mv(s, mx, my);
3088                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_DIRECT, pb, pb2, tex_pb, 
3089                                  &dmin, &next_block, mx, my);
3090                 }
3091                 if(mb_type&MB_TYPE_INTRA){
3092                     s->mv_dir = MV_DIR_FORWARD;
3093                     s->mv_type = MV_TYPE_16X16;
3094                     s->mb_intra= 1;
3095                     s->mv[0][0][0] = 0;
3096                     s->mv[0][0][1] = 0;
3097                     encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTRA, pb, pb2, tex_pb, 
3098                                  &dmin, &next_block, 0, 0);
3099                     /* force cleaning of ac/dc pred stuff if needed ... */
3100                     if(s->h263_pred || s->h263_aic)
3101                         s->mbintra_table[mb_x + mb_y*s->mb_width]=1;
3102                 }
3103                 copy_context_after_encode(s, &best_s, -1);
3104                 
3105                 pb_bits_count= get_bit_count(&s->pb);
3106                 flush_put_bits(&s->pb);
3107                 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3108                 s->pb= backup_s.pb;
3109                 
3110                 if(s->data_partitioning){
3111                     pb2_bits_count= get_bit_count(&s->pb2);
3112                     flush_put_bits(&s->pb2);
3113                     ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3114                     s->pb2= backup_s.pb2;
3115                     
3116                     tex_pb_bits_count= get_bit_count(&s->tex_pb);
3117                     flush_put_bits(&s->tex_pb);
3118                     ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3119                     s->tex_pb= backup_s.tex_pb;
3120                 }
3121                 s->last_bits= get_bit_count(&s->pb);
3122             } else {
3123                 int motion_x, motion_y;
3124                 s->mv_type=MV_TYPE_16X16;
3125                 // only one MB-Type possible
3126                 switch(mb_type){
3127                 case MB_TYPE_INTRA:
3128                     s->mv_dir = MV_DIR_FORWARD;
3129                     s->mb_intra= 1;
3130                     motion_x= s->mv[0][0][0] = 0;
3131                     motion_y= s->mv[0][0][1] = 0;
3132                     break;
3133                 case MB_TYPE_INTER:
3134                     s->mv_dir = MV_DIR_FORWARD;
3135                     s->mb_intra= 0;
3136                     motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3137                     motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3138                     break;
3139                 case MB_TYPE_INTER4V:
3140                     s->mv_dir = MV_DIR_FORWARD;
3141                     s->mv_type = MV_TYPE_8X8;
3142                     s->mb_intra= 0;
3143                     for(i=0; i<4; i++){
3144                         s->mv[0][i][0] = s->motion_val[s->block_index[i]][0];
3145                         s->mv[0][i][1] = s->motion_val[s->block_index[i]][1];
3146                     }
3147                     motion_x= motion_y= 0;
3148                     break;
3149                 case MB_TYPE_DIRECT:
3150                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3151                     s->mb_intra= 0;
3152                     motion_x=s->b_direct_mv_table[xy][0];
3153                     motion_y=s->b_direct_mv_table[xy][1];
3154                     ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3155                     break;
3156                 case MB_TYPE_BIDIR:
3157                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3158                     s->mb_intra= 0;
3159                     motion_x=0;
3160                     motion_y=0;
3161                     s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3162                     s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3163                     s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3164                     s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3165                     break;
3166                 case MB_TYPE_BACKWARD:
3167                     s->mv_dir = MV_DIR_BACKWARD;
3168                     s->mb_intra= 0;
3169                     motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3170                     motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3171                     break;
3172                 case MB_TYPE_FORWARD:
3173                     s->mv_dir = MV_DIR_FORWARD;
3174                     s->mb_intra= 0;
3175                     motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3176                     motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3177 //                    printf(" %d %d ", motion_x, motion_y);
3178                     break;
3179                 default:
3180                     motion_x=motion_y=0; //gcc warning fix
3181                     printf("illegal MB type\n");
3182                 }
3183                 encode_mb(s, motion_x, motion_y);
3184             }
3185             /* clean the MV table in IPS frames for direct mode in B frames */
3186             if(s->mb_intra /* && I,P,S_TYPE */){
3187                 s->p_mv_table[xy][0]=0;
3188                 s->p_mv_table[xy][1]=0;
3189             }
3190
3191             MPV_decode_mb(s, s->block);
3192             
3193             if(s->flags&CODEC_FLAG_PSNR){
3194                 int w= 16;
3195                 int h= 16;
3196
3197                 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3198                 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3199
3200                 s->current_picture.error[0] += sse(
3201                     s,
3202                     s->new_picture    .data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3203                     s->current_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3204                     w, h, s->linesize);
3205                 s->current_picture.error[1] += sse(
3206                     s,
3207                     s->new_picture    .data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,
3208                     s->current_picture.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,
3209                     w>>1, h>>1, s->uvlinesize);
3210                 s->current_picture.error[2] += sse(
3211                     s,
3212                     s->new_picture    .data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,
3213                     s->current_picture.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,
3214                     w>>1, h>>1, s->uvlinesize);
3215             }
3216 //printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_width, get_bit_count(&s->pb));
3217         }
3218
3219
3220         /* Obtain average mb_row size for RTP */
3221         if (s->rtp_mode) {
3222             if (mb_y==0)
3223                 s->mb_line_avgsize = pbBufPtr(&s->pb) - s->ptr_last_mb_line;
3224             else {    
3225                 s->mb_line_avgsize = (s->mb_line_avgsize + pbBufPtr(&s->pb) - s->ptr_last_mb_line) >> 1;
3226             }
3227             s->ptr_last_mb_line = pbBufPtr(&s->pb);
3228         }
3229     }
3230     emms_c();
3231
3232     if(s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame)
3233         ff_mpeg4_merge_partitions(s);
3234
3235     if (s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == I_TYPE)
3236         msmpeg4_encode_ext_header(s);
3237
3238     if(s->codec_id==CODEC_ID_MPEG4) 
3239         ff_mpeg4_stuffing(&s->pb);
3240
3241     //if (s->gob_number)
3242     //    fprintf(stderr,"\nNumber of GOB: %d", s->gob_number);
3243     
3244     /* Send the last GOB if RTP */    
3245     if (s->rtp_mode) {
3246         flush_put_bits(&s->pb);
3247         pdif = pbBufPtr(&s->pb) - s->ptr_lastgob;
3248         /* Call the RTP callback to send the last GOB */
3249         if (s->rtp_callback)
3250             s->rtp_callback(s->ptr_lastgob, pdif, s->gob_number);
3251         s->ptr_lastgob = pbBufPtr(&s->pb);
3252         //fprintf(stderr,"\nGOB: %2d size: %d (last)", s->gob_number, pdif);
3253     }
3254 }
3255
3256 static int dct_quantize_c(MpegEncContext *s, 
3257                         DCTELEM *block, int n,
3258                         int qscale, int *overflow)
3259 {
3260     int i, j, level, last_non_zero, q;
3261     const int *qmat;
3262     const UINT8 *scantable= s->intra_scantable.scantable;
3263     int bias;
3264     int max=0;
3265     unsigned int threshold1, threshold2;
3266
3267     s->fdct (block);
3268
3269     if (s->mb_intra) {
3270         if (!s->h263_aic) {
3271             if (n < 4)
3272                 q = s->y_dc_scale;
3273             else
3274                 q = s->c_dc_scale;
3275             q = q << 3;
3276         } else
3277             /* For AIC we skip quant/dequant of INTRADC */
3278             q = 1 << 3;
3279             
3280         /* note: block[0] is assumed to be positive */
3281         block[0] = (block[0] + (q >> 1)) / q;
3282         i = 1;
3283         last_non_zero = 0;
3284         qmat = s->q_intra_matrix[qscale];
3285         bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
3286     } else {
3287         i = 0;
3288         last_non_zero = -1;
3289         qmat = s->q_inter_matrix[qscale];
3290         bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
3291     }
3292     threshold1= (1<<QMAT_SHIFT) - bias - 1;
3293     threshold2= (threshold1<<1);
3294
3295     for(;i<64;i++) {
3296         j = scantable[i];
3297         level = block[j];
3298         level = level * qmat[j];
3299
3300 //        if(   bias+level >= (1<<(QMAT_SHIFT - 3))
3301 //           || bias-level >= (1<<(QMAT_SHIFT - 3))){
3302         if(((unsigned)(level+threshold1))>threshold2){
3303             if(level>0){
3304                 level= (bias + level)>>QMAT_SHIFT;
3305                 block[j]= level;
3306             }else{
3307                 level= (bias - level)>>QMAT_SHIFT;
3308                 block[j]= -level;
3309             }
3310             max |=level;
3311             last_non_zero = i;
3312         }else{
3313             block[j]=0;
3314         }
3315     }
3316     *overflow= s->max_qcoeff < max; //overflow might have happend
3317     
3318     /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
3319     if (s->idct_permutation_type != FF_NO_IDCT_PERM)
3320         ff_block_permute(block, s->idct_permutation, scantable, last_non_zero);
3321
3322     return last_non_zero;
3323 }
3324
3325 static void dct_unquantize_mpeg1_c(MpegEncContext *s, 
3326                                    DCTELEM *block, int n, int qscale)
3327 {
3328     int i, level, nCoeffs;
3329     const UINT16 *quant_matrix;
3330
3331     nCoeffs= s->block_last_index[n];
3332     
3333     if (s->mb_intra) {
3334         if (n < 4) 
3335             block[0] = block[0] * s->y_dc_scale;
3336         else
3337             block[0] = block[0] * s->c_dc_scale;
3338         /* XXX: only mpeg1 */
3339         quant_matrix = s->intra_matrix;
3340         for(i=1;i<=nCoeffs;i++) {
3341             int j= s->intra_scantable.permutated[i];
3342             level = block[j];
3343             if (level) {
3344                 if (level < 0) {
3345                     level = -level;
3346                     level = (int)(level * qscale * quant_matrix[j]) >> 3;
3347                     level = (level - 1) | 1;
3348                     level = -level;
3349                 } else {
3350                     level = (int)(level * qscale * quant_matrix[j]) >> 3;
3351                     level = (level - 1) | 1;
3352                 }
3353 #ifdef PARANOID
3354                 if (level < -2048 || level > 2047)
3355                     fprintf(stderr, "unquant error %d %d\n", i, level);
3356 #endif
3357                 block[j] = level;
3358             }
3359         }
3360     } else {
3361         i = 0;
3362         quant_matrix = s->inter_matrix;
3363         for(;i<=nCoeffs;i++) {
3364             int j= s->intra_scantable.permutated[i];
3365             level = block[j];
3366             if (level) {
3367                 if (level < 0) {
3368                     level = -level;
3369                     level = (((level << 1) + 1) * qscale *
3370                              ((int) (quant_matrix[j]))) >> 4;
3371                     level = (level - 1) | 1;
3372                     level = -level;
3373                 } else {
3374                     level = (((level << 1) + 1) * qscale *
3375                              ((int) (quant_matrix[j]))) >> 4;
3376                     level = (level - 1) | 1;
3377                 }
3378 #ifdef PARANOID
3379                 if (level < -2048 || level > 2047)
3380                     fprintf(stderr, "unquant error %d %d\n", i, level);
3381 #endif
3382                 block[j] = level;
3383             }
3384         }
3385     }
3386 }
3387
3388 static void dct_unquantize_mpeg2_c(MpegEncContext *s, 
3389                                    DCTELEM *block, int n, int qscale)
3390 {
3391     int i, level, nCoeffs;
3392     const UINT16 *quant_matrix;
3393
3394     if(s->alternate_scan) nCoeffs= 63;
3395     else nCoeffs= s->block_last_index[n];
3396     
3397     if (s->mb_intra) {
3398         if (n < 4) 
3399             block[0] = block[0] * s->y_dc_scale;
3400         else
3401             block[0] = block[0] * s->c_dc_scale;
3402         quant_matrix = s->intra_matrix;
3403         for(i=1;i<=nCoeffs;i++) {
3404             int j= s->intra_scantable.permutated[i];
3405             level = block[j];
3406             if (level) {
3407                 if (level < 0) {
3408                     level = -level;
3409                     level = (int)(level * qscale * quant_matrix[j]) >> 3;
3410                     level = -level;
3411                 } else {
3412                     level = (int)(level * qscale * quant_matrix[j]) >> 3;
3413                 }
3414 #ifdef PARANOID
3415                 if (level < -2048 || level > 2047)
3416                     fprintf(stderr, "unquant error %d %d\n", i, level);
3417 #endif
3418                 block[j] = level;
3419             }
3420         }
3421     } else {
3422         int sum=-1;
3423         i = 0;
3424         quant_matrix = s->inter_matrix;
3425         for(;i<=nCoeffs;i++) {
3426             int j= s->intra_scantable.permutated[i];
3427             level = block[j];
3428             if (level) {
3429                 if (level < 0) {
3430                     level = -level;
3431                     level = (((level << 1) + 1) * qscale *
3432                              ((int) (quant_matrix[j]))) >> 4;
3433                     level = -level;
3434                 } else {
3435                     level = (((level << 1) + 1) * qscale *
3436                              ((int) (quant_matrix[j]))) >> 4;
3437                 }
3438 #ifdef PARANOID
3439                 if (level < -2048 || level > 2047)
3440                     fprintf(stderr, "unquant error %d %d\n", i, level);
3441 #endif
3442                 block[j] = level;
3443                 sum+=level;
3444             }
3445         }
3446         block[63]^=sum&1;
3447     }
3448 }
3449
3450
3451 static void dct_unquantize_h263_c(MpegEncContext *s, 
3452                                   DCTELEM *block, int n, int qscale)
3453 {
3454     int i, level, qmul, qadd;
3455     int nCoeffs;
3456     
3457     assert(s->block_last_index[n]>=0);
3458     
3459     qadd = (qscale - 1) | 1;
3460     qmul = qscale << 1;
3461     
3462     if (s->mb_intra) {
3463         if (!s->h263_aic) {
3464             if (n < 4) 
3465                 block[0] = block[0] * s->y_dc_scale;
3466             else
3467                 block[0] = block[0] * s->c_dc_scale;
3468         }else
3469             qadd = 0;
3470         i = 1;
3471         nCoeffs= 63; //does not allways use zigzag table 
3472     } else {
3473         i = 0;
3474         nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
3475     }
3476
3477     for(;i<=nCoeffs;i++) {
3478         level = block[i];
3479         if (level) {
3480             if (level < 0) {
3481                 level = level * qmul - qadd;
3482             } else {
3483                 level = level * qmul + qadd;
3484             }
3485 #ifdef PARANOID
3486                 if (level < -2048 || level > 2047)
3487                     fprintf(stderr, "unquant error %d %d\n", i, level);
3488 #endif
3489             block[i] = level;
3490         }
3491     }