2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard.
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * 4MV & hq & b-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
27 * The simplest mpeg encoder (well, it was the simplest!).
32 #include "mpegvideo.h"
41 #ifdef CONFIG_ENCODERS
42 static int encode_picture(MpegEncContext *s, int picture_number);
43 #endif //CONFIG_ENCODERS
44 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
45 DCTELEM *block, int n, int qscale);
46 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
47 DCTELEM *block, int n, int qscale);
48 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
49 DCTELEM *block, int n, int qscale);
50 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
51 DCTELEM *block, int n, int qscale);
52 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
53 DCTELEM *block, int n, int qscale);
54 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
55 DCTELEM *block, int n, int qscale);
56 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
57 DCTELEM *block, int n, int qscale);
58 static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w);
59 #ifdef CONFIG_ENCODERS
60 static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
61 static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
62 static int dct_quantize_refine(MpegEncContext *s, DCTELEM *block, int16_t *weight, DCTELEM *orig, int n, int qscale);
63 static int sse_mb(MpegEncContext *s);
64 static void denoise_dct_c(MpegEncContext *s, DCTELEM *block);
65 #endif //CONFIG_ENCODERS
68 extern int XVMC_field_start(MpegEncContext*s, AVCodecContext *avctx);
69 extern void XVMC_field_end(MpegEncContext *s);
70 extern void XVMC_decode_mb(MpegEncContext *s);
73 void (*draw_edges)(uint8_t *buf, int wrap, int width, int height, int w)= draw_edges_c;
76 /* enable all paranoid tests for rounding, overflows, etc... */
82 #ifdef CONFIG_ENCODERS
83 static const uint16_t aanscales[64] = {
84 /* precomputed values scaled up by 14 bits */
85 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
86 22725, 31521, 29692, 26722, 22725, 17855, 12299, 6270,
87 21407, 29692, 27969, 25172, 21407, 16819, 11585, 5906,
88 19266, 26722, 25172, 22654, 19266, 15137, 10426, 5315,
89 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
90 12873, 17855, 16819, 15137, 12873, 10114, 6967, 3552,
91 8867 , 12299, 11585, 10426, 8867, 6967, 4799, 2446,
92 4520 , 6270, 5906, 5315, 4520, 3552, 2446, 1247
94 #endif //CONFIG_ENCODERS
96 static const uint8_t h263_chroma_roundtab[16] = {
97 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
98 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
101 static const uint8_t ff_default_chroma_qscale_table[32]={
102 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
103 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
106 #ifdef CONFIG_ENCODERS
107 static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_MV*2+1];
108 static uint8_t default_fcode_tab[MAX_MV*2+1];
110 enum PixelFormat ff_yuv420p_list[2]= {PIX_FMT_YUV420P, -1};
112 static void convert_matrix(DSPContext *dsp, int (*qmat)[64], uint16_t (*qmat16)[2][64],
113 const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
118 for(qscale=qmin; qscale<=qmax; qscale++){
120 if (dsp->fdct == ff_jpeg_fdct_islow
121 #ifdef FAAN_POSTSCALE
122 || dsp->fdct == ff_faandct
126 const int j= dsp->idct_permutation[i];
127 /* 16 <= qscale * quant_matrix[i] <= 7905 */
128 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
129 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
130 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
132 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
133 (qscale * quant_matrix[j]));
135 } else if (dsp->fdct == fdct_ifast
136 #ifndef FAAN_POSTSCALE
137 || dsp->fdct == ff_faandct
141 const int j= dsp->idct_permutation[i];
142 /* 16 <= qscale * quant_matrix[i] <= 7905 */
143 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
144 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
145 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
147 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
148 (aanscales[i] * qscale * quant_matrix[j]));
152 const int j= dsp->idct_permutation[i];
153 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
154 So 16 <= qscale * quant_matrix[i] <= 7905
155 so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905
156 so 32768 >= (1<<19) / (qscale * quant_matrix[i]) >= 67
158 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / (qscale * quant_matrix[j]));
159 // qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]);
160 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[j]);
162 if(qmat16[qscale][0][i]==0 || qmat16[qscale][0][i]==128*256) qmat16[qscale][0][i]=128*256-1;
163 qmat16[qscale][1][i]= ROUNDED_DIV(bias<<(16-QUANT_BIAS_SHIFT), qmat16[qscale][0][i]);
167 for(i=intra; i<64; i++){
169 if (dsp->fdct == fdct_ifast
170 #ifndef FAAN_POSTSCALE
171 || dsp->fdct == ff_faandct
174 max= (8191LL*aanscales[i]) >> 14;
176 while(((max * qmat[qscale][i]) >> shift) > INT_MAX){
182 av_log(NULL, AV_LOG_INFO, "Warning, QMAT_SHIFT is larger than %d, overflows possible\n", QMAT_SHIFT - shift);
186 static inline void update_qscale(MpegEncContext *s){
187 s->qscale= (s->lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7);
188 s->qscale= av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
190 s->lambda2= (s->lambda*s->lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT;
192 #endif //CONFIG_ENCODERS
194 void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable){
198 st->scantable= src_scantable;
202 j = src_scantable[i];
203 st->permutated[i] = permutation[j];
212 j = st->permutated[i];
214 st->raster_end[i]= end;
218 #ifdef CONFIG_ENCODERS
219 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix){
225 put_bits(pb, 8, matrix[ ff_zigzag_direct[i] ]);
230 #endif //CONFIG_ENCODERS
232 const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
240 uint32_t tmp= *state << 8;
241 *state= tmp + *(p++);
242 if(tmp == 0x100 || p==end)
247 if (p[-1] > 1 ) p+= 3;
248 else if(p[-2] ) p+= 2;
249 else if(p[-3]|(p[-1]-1)) p++;
257 *state= be2me_32(unaligned32(p));
262 /* init common dct for both encoder and decoder */
263 int DCT_common_init(MpegEncContext *s)
265 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
266 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
267 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
268 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
269 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
270 if(s->flags & CODEC_FLAG_BITEXACT)
271 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
272 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
274 #if defined(HAVE_MMX)
275 MPV_common_init_mmx(s);
276 #elif defined(ARCH_ALPHA)
277 MPV_common_init_axp(s);
278 #elif defined(HAVE_MLIB)
279 MPV_common_init_mlib(s);
280 #elif defined(HAVE_MMI)
281 MPV_common_init_mmi(s);
282 #elif defined(ARCH_ARMV4L)
283 MPV_common_init_armv4l(s);
284 #elif defined(ARCH_POWERPC)
285 MPV_common_init_ppc(s);
286 #elif defined(ARCH_BFIN)
287 MPV_common_init_bfin(s);
290 #ifdef CONFIG_ENCODERS
292 s->dct_quantize= dct_quantize_c;
294 s->denoise_dct= denoise_dct_c;
295 s->fast_dct_quantize= s->dct_quantize;
297 if(s->flags&CODEC_FLAG_TRELLIS_QUANT){
298 s->dct_quantize= dct_quantize_trellis_c; //move before MPV_common_init_*
301 #endif //CONFIG_ENCODERS
303 /* load & permutate scantables
304 note: only wmv uses different ones
306 if(s->alternate_scan){
307 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
308 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
310 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
311 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
313 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
314 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
319 static void copy_picture(Picture *dst, Picture *src){
321 dst->type= FF_BUFFER_TYPE_COPY;
324 #ifdef CONFIG_ENCODERS
325 static void copy_picture_attributes(MpegEncContext *s, AVFrame *dst, AVFrame *src){
328 dst->pict_type = src->pict_type;
329 dst->quality = src->quality;
330 dst->coded_picture_number = src->coded_picture_number;
331 dst->display_picture_number = src->display_picture_number;
332 // dst->reference = src->reference;
334 dst->interlaced_frame = src->interlaced_frame;
335 dst->top_field_first = src->top_field_first;
337 if(s->avctx->me_threshold){
338 if(!src->motion_val[0])
339 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_val not set!\n");
341 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.mb_type not set!\n");
342 if(!src->ref_index[0])
343 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.ref_index not set!\n");
344 if(src->motion_subsample_log2 != dst->motion_subsample_log2)
345 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_subsample_log2 doesn't match! (%d!=%d)\n",
346 src->motion_subsample_log2, dst->motion_subsample_log2);
348 memcpy(dst->mb_type, src->mb_type, s->mb_stride * s->mb_height * sizeof(dst->mb_type[0]));
351 int stride= ((16*s->mb_width )>>src->motion_subsample_log2) + 1;
352 int height= ((16*s->mb_height)>>src->motion_subsample_log2);
354 if(src->motion_val[i] && src->motion_val[i] != dst->motion_val[i]){
355 memcpy(dst->motion_val[i], src->motion_val[i], 2*stride*height*sizeof(int16_t));
357 if(src->ref_index[i] && src->ref_index[i] != dst->ref_index[i]){
358 memcpy(dst->ref_index[i], src->ref_index[i], s->b8_stride*2*s->mb_height*sizeof(int8_t));
366 * allocates a Picture
367 * The pixels are allocated/set by calling get_buffer() if shared=0
369 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
370 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
371 const int mb_array_size= s->mb_stride*s->mb_height;
372 const int b8_array_size= s->b8_stride*s->mb_height*2;
373 const int b4_array_size= s->b4_stride*s->mb_height*4;
377 assert(pic->data[0]);
378 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
379 pic->type= FF_BUFFER_TYPE_SHARED;
383 assert(!pic->data[0]);
385 r= s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
387 if(r<0 || !pic->age || !pic->type || !pic->data[0]){
388 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
392 if(s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])){
393 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
397 if(pic->linesize[1] != pic->linesize[2]){
398 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
402 s->linesize = pic->linesize[0];
403 s->uvlinesize= pic->linesize[1];
406 if(pic->qscale_table==NULL){
408 CHECKED_ALLOCZ(pic->mb_var , mb_array_size * sizeof(int16_t))
409 CHECKED_ALLOCZ(pic->mc_mb_var, mb_array_size * sizeof(int16_t))
410 CHECKED_ALLOCZ(pic->mb_mean , mb_array_size * sizeof(int8_t))
413 CHECKED_ALLOCZ(pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2) //the +2 is for the slice end check
414 CHECKED_ALLOCZ(pic->qscale_table , mb_array_size * sizeof(uint8_t))
415 CHECKED_ALLOCZ(pic->mb_type_base , big_mb_num * sizeof(uint32_t))
416 pic->mb_type= pic->mb_type_base + s->mb_stride+1;
417 if(s->out_format == FMT_H264){
419 CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t))
420 pic->motion_val[i]= pic->motion_val_base[i]+4;
421 CHECKED_ALLOCZ(pic->ref_index[i], b8_array_size * sizeof(uint8_t))
423 pic->motion_subsample_log2= 2;
424 }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
426 CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t))
427 pic->motion_val[i]= pic->motion_val_base[i]+4;
428 CHECKED_ALLOCZ(pic->ref_index[i], b8_array_size * sizeof(uint8_t))
430 pic->motion_subsample_log2= 3;
432 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
433 CHECKED_ALLOCZ(pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6)
435 pic->qstride= s->mb_stride;
436 CHECKED_ALLOCZ(pic->pan_scan , 1 * sizeof(AVPanScan))
439 /* It might be nicer if the application would keep track of these
440 * but it would require an API change. */
441 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
442 s->prev_pict_types[0]= s->pict_type;
443 if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == B_TYPE)
444 pic->age= INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
447 fail: //for the CHECKED_ALLOCZ macro
452 * deallocates a picture
454 static void free_picture(MpegEncContext *s, Picture *pic){
457 if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
458 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
461 av_freep(&pic->mb_var);
462 av_freep(&pic->mc_mb_var);
463 av_freep(&pic->mb_mean);
464 av_freep(&pic->mbskip_table);
465 av_freep(&pic->qscale_table);
466 av_freep(&pic->mb_type_base);
467 av_freep(&pic->dct_coeff);
468 av_freep(&pic->pan_scan);
471 av_freep(&pic->motion_val_base[i]);
472 av_freep(&pic->ref_index[i]);
475 if(pic->type == FF_BUFFER_TYPE_SHARED){
484 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
487 // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
488 CHECKED_ALLOCZ(s->allocated_edge_emu_buffer, (s->width+64)*2*21*2); //(width + edge + align)*interlaced*MBsize*tolerance
489 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21;
491 //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
492 CHECKED_ALLOCZ(s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t))
493 s->rd_scratchpad= s->me.scratchpad;
494 s->b_scratchpad= s->me.scratchpad;
495 s->obmc_scratchpad= s->me.scratchpad + 16;
497 CHECKED_ALLOCZ(s->me.map , ME_MAP_SIZE*sizeof(uint32_t))
498 CHECKED_ALLOCZ(s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t))
499 if(s->avctx->noise_reduction){
500 CHECKED_ALLOCZ(s->dct_error_sum, 2 * 64 * sizeof(int))
503 CHECKED_ALLOCZ(s->blocks, 64*12*2 * sizeof(DCTELEM))
504 s->block= s->blocks[0];
507 s->pblocks[i] = (short *)(&s->block[i]);
511 return -1; //free() through MPV_common_end()
514 static void free_duplicate_context(MpegEncContext *s){
517 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
518 av_freep(&s->me.scratchpad);
521 s->obmc_scratchpad= NULL;
523 av_freep(&s->dct_error_sum);
524 av_freep(&s->me.map);
525 av_freep(&s->me.score_map);
526 av_freep(&s->blocks);
530 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
531 #define COPY(a) bak->a= src->a
532 COPY(allocated_edge_emu_buffer);
533 COPY(edge_emu_buffer);
537 COPY(obmc_scratchpad);
544 COPY(me.map_generation);
552 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
555 //FIXME copy only needed parts
557 backup_duplicate_context(&bak, dst);
558 memcpy(dst, src, sizeof(MpegEncContext));
559 backup_duplicate_context(dst, &bak);
561 dst->pblocks[i] = (short *)(&dst->block[i]);
563 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
566 #ifdef CONFIG_ENCODERS
567 static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContext *src){
568 #define COPY(a) dst->a= src->a
570 COPY(current_picture);
576 COPY(picture_in_gop_number);
577 COPY(gop_picture_number);
578 COPY(frame_pred_frame_dct); //FIXME don't set in encode_header
579 COPY(progressive_frame); //FIXME don't set in encode_header
580 COPY(partitioned_frame); //FIXME don't set in encode_header
586 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
587 * the changed fields will not depend upon the prior state of the MpegEncContext.
589 static void MPV_common_defaults(MpegEncContext *s){
591 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
592 s->chroma_qscale_table= ff_default_chroma_qscale_table;
593 s->progressive_frame= 1;
594 s->progressive_sequence= 1;
595 s->picture_structure= PICT_FRAME;
597 s->coded_picture_number = 0;
598 s->picture_number = 0;
599 s->input_picture_number = 0;
601 s->picture_in_gop_number = 0;
608 * sets the given MpegEncContext to defaults for decoding.
609 * the changed fields will not depend upon the prior state of the MpegEncContext.
611 void MPV_decode_defaults(MpegEncContext *s){
612 MPV_common_defaults(s);
616 * sets the given MpegEncContext to defaults for encoding.
617 * the changed fields will not depend upon the prior state of the MpegEncContext.
620 #ifdef CONFIG_ENCODERS
621 static void MPV_encode_defaults(MpegEncContext *s){
623 MPV_common_defaults(s);
625 for(i=-16; i<16; i++){
626 default_fcode_tab[i + MAX_MV]= 1;
628 s->me.mv_penalty= default_mv_penalty;
629 s->fcode_tab= default_fcode_tab;
631 #endif //CONFIG_ENCODERS
634 * init common structure for both encoder and decoder.
635 * this assumes that some variables like width/height are already set
637 int MPV_common_init(MpegEncContext *s)
639 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
641 s->mb_height = (s->height + 15) / 16;
643 if(s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height)){
644 av_log(s->avctx, AV_LOG_ERROR, "too many threads\n");
648 if((s->width || s->height) && avcodec_check_dimensions(s->avctx, s->width, s->height))
651 dsputil_init(&s->dsp, s->avctx);
654 s->flags= s->avctx->flags;
655 s->flags2= s->avctx->flags2;
657 s->mb_width = (s->width + 15) / 16;
658 s->mb_stride = s->mb_width + 1;
659 s->b8_stride = s->mb_width*2 + 1;
660 s->b4_stride = s->mb_width*4 + 1;
661 mb_array_size= s->mb_height * s->mb_stride;
662 mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
664 /* set chroma shifts */
665 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
666 &(s->chroma_y_shift) );
668 /* set default edge pos, will be overriden in decode_header if needed */
669 s->h_edge_pos= s->mb_width*16;
670 s->v_edge_pos= s->mb_height*16;
672 s->mb_num = s->mb_width * s->mb_height;
677 s->block_wrap[3]= s->b8_stride;
679 s->block_wrap[5]= s->mb_stride;
681 y_size = s->b8_stride * (2 * s->mb_height + 1);
682 c_size = s->mb_stride * (s->mb_height + 1);
683 yc_size = y_size + 2 * c_size;
685 /* convert fourcc to upper case */
686 s->codec_tag= toupper( s->avctx->codec_tag &0xFF)
687 + (toupper((s->avctx->codec_tag>>8 )&0xFF)<<8 )
688 + (toupper((s->avctx->codec_tag>>16)&0xFF)<<16)
689 + (toupper((s->avctx->codec_tag>>24)&0xFF)<<24);
691 s->stream_codec_tag= toupper( s->avctx->stream_codec_tag &0xFF)
692 + (toupper((s->avctx->stream_codec_tag>>8 )&0xFF)<<8 )
693 + (toupper((s->avctx->stream_codec_tag>>16)&0xFF)<<16)
694 + (toupper((s->avctx->stream_codec_tag>>24)&0xFF)<<24);
696 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
698 CHECKED_ALLOCZ(s->mb_index2xy, (s->mb_num+1)*sizeof(int)) //error ressilience code looks cleaner with this
699 for(y=0; y<s->mb_height; y++){
700 for(x=0; x<s->mb_width; x++){
701 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
704 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
707 /* Allocate MV tables */
708 CHECKED_ALLOCZ(s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
709 CHECKED_ALLOCZ(s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
710 CHECKED_ALLOCZ(s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
711 CHECKED_ALLOCZ(s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
712 CHECKED_ALLOCZ(s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
713 CHECKED_ALLOCZ(s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
714 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
715 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
716 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
717 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
718 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
719 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
721 if(s->msmpeg4_version){
722 CHECKED_ALLOCZ(s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int));
724 CHECKED_ALLOCZ(s->avctx->stats_out, 256);
726 /* Allocate MB type table */
727 CHECKED_ALLOCZ(s->mb_type , mb_array_size * sizeof(uint16_t)) //needed for encoding
729 CHECKED_ALLOCZ(s->lambda_table, mb_array_size * sizeof(int))
731 CHECKED_ALLOCZ(s->q_intra_matrix, 64*32 * sizeof(int))
732 CHECKED_ALLOCZ(s->q_inter_matrix, 64*32 * sizeof(int))
733 CHECKED_ALLOCZ(s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t))
734 CHECKED_ALLOCZ(s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t))
735 CHECKED_ALLOCZ(s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*))
736 CHECKED_ALLOCZ(s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*))
738 if(s->avctx->noise_reduction){
739 CHECKED_ALLOCZ(s->dct_offset, 2 * 64 * sizeof(uint16_t))
742 CHECKED_ALLOCZ(s->picture, MAX_PICTURE_COUNT * sizeof(Picture))
744 CHECKED_ALLOCZ(s->error_status_table, mb_array_size*sizeof(uint8_t))
746 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
747 /* interlaced direct mode decoding tables */
752 CHECKED_ALLOCZ(s->b_field_mv_table_base[i][j][k] , mv_table_size * 2 * sizeof(int16_t))
753 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
755 CHECKED_ALLOCZ(s->b_field_select_table[i][j] , mb_array_size * 2 * sizeof(uint8_t))
756 CHECKED_ALLOCZ(s->p_field_mv_table_base[i][j] , mv_table_size * 2 * sizeof(int16_t))
757 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
759 CHECKED_ALLOCZ(s->p_field_select_table[i] , mb_array_size * 2 * sizeof(uint8_t))
762 if (s->out_format == FMT_H263) {
764 CHECKED_ALLOCZ(s->ac_val_base, yc_size * sizeof(int16_t) * 16);
765 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
766 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
767 s->ac_val[2] = s->ac_val[1] + c_size;
770 CHECKED_ALLOCZ(s->coded_block_base, y_size);
771 s->coded_block= s->coded_block_base + s->b8_stride + 1;
773 /* cbp, ac_pred, pred_dir */
774 CHECKED_ALLOCZ(s->cbp_table , mb_array_size * sizeof(uint8_t))
775 CHECKED_ALLOCZ(s->pred_dir_table, mb_array_size * sizeof(uint8_t))
778 if (s->h263_pred || s->h263_plus || !s->encoding) {
780 //MN: we need these for error resilience of intra-frames
781 CHECKED_ALLOCZ(s->dc_val_base, yc_size * sizeof(int16_t));
782 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
783 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
784 s->dc_val[2] = s->dc_val[1] + c_size;
785 for(i=0;i<yc_size;i++)
786 s->dc_val_base[i] = 1024;
789 /* which mb is a intra block */
790 CHECKED_ALLOCZ(s->mbintra_table, mb_array_size);
791 memset(s->mbintra_table, 1, mb_array_size);
793 /* init macroblock skip table */
794 CHECKED_ALLOCZ(s->mbskip_table, mb_array_size+2);
795 //Note the +1 is for a quicker mpeg4 slice_end detection
796 CHECKED_ALLOCZ(s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
798 s->parse_context.state= -1;
799 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
800 s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
801 s->visualization_buffer[1] = av_malloc((s->mb_width*8 + EDGE_WIDTH) * s->mb_height*8 + EDGE_WIDTH);
802 s->visualization_buffer[2] = av_malloc((s->mb_width*8 + EDGE_WIDTH) * s->mb_height*8 + EDGE_WIDTH);
805 s->context_initialized = 1;
807 s->thread_context[0]= s;
808 for(i=1; i<s->avctx->thread_count; i++){
809 s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
810 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
813 for(i=0; i<s->avctx->thread_count; i++){
814 if(init_duplicate_context(s->thread_context[i], s) < 0)
816 s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count;
817 s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
826 /* init common structure for both encoder and decoder */
827 void MPV_common_end(MpegEncContext *s)
831 for(i=0; i<s->avctx->thread_count; i++){
832 free_duplicate_context(s->thread_context[i]);
834 for(i=1; i<s->avctx->thread_count; i++){
835 av_freep(&s->thread_context[i]);
838 av_freep(&s->parse_context.buffer);
839 s->parse_context.buffer_size=0;
841 av_freep(&s->mb_type);
842 av_freep(&s->p_mv_table_base);
843 av_freep(&s->b_forw_mv_table_base);
844 av_freep(&s->b_back_mv_table_base);
845 av_freep(&s->b_bidir_forw_mv_table_base);
846 av_freep(&s->b_bidir_back_mv_table_base);
847 av_freep(&s->b_direct_mv_table_base);
849 s->b_forw_mv_table= NULL;
850 s->b_back_mv_table= NULL;
851 s->b_bidir_forw_mv_table= NULL;
852 s->b_bidir_back_mv_table= NULL;
853 s->b_direct_mv_table= NULL;
857 av_freep(&s->b_field_mv_table_base[i][j][k]);
858 s->b_field_mv_table[i][j][k]=NULL;
860 av_freep(&s->b_field_select_table[i][j]);
861 av_freep(&s->p_field_mv_table_base[i][j]);
862 s->p_field_mv_table[i][j]=NULL;
864 av_freep(&s->p_field_select_table[i]);
867 av_freep(&s->dc_val_base);
868 av_freep(&s->ac_val_base);
869 av_freep(&s->coded_block_base);
870 av_freep(&s->mbintra_table);
871 av_freep(&s->cbp_table);
872 av_freep(&s->pred_dir_table);
874 av_freep(&s->mbskip_table);
875 av_freep(&s->prev_pict_types);
876 av_freep(&s->bitstream_buffer);
877 s->allocated_bitstream_buffer_size=0;
879 av_freep(&s->avctx->stats_out);
880 av_freep(&s->ac_stats);
881 av_freep(&s->error_status_table);
882 av_freep(&s->mb_index2xy);
883 av_freep(&s->lambda_table);
884 av_freep(&s->q_intra_matrix);
885 av_freep(&s->q_inter_matrix);
886 av_freep(&s->q_intra_matrix16);
887 av_freep(&s->q_inter_matrix16);
888 av_freep(&s->input_picture);
889 av_freep(&s->reordered_input_picture);
890 av_freep(&s->dct_offset);
893 for(i=0; i<MAX_PICTURE_COUNT; i++){
894 free_picture(s, &s->picture[i]);
897 av_freep(&s->picture);
898 s->context_initialized = 0;
901 s->current_picture_ptr= NULL;
902 s->linesize= s->uvlinesize= 0;
905 av_freep(&s->visualization_buffer[i]);
907 avcodec_default_free_buffers(s->avctx);
910 #ifdef CONFIG_ENCODERS
912 /* init video encoder */
913 int MPV_encode_init(AVCodecContext *avctx)
915 MpegEncContext *s = avctx->priv_data;
917 int chroma_h_shift, chroma_v_shift;
919 MPV_encode_defaults(s);
921 switch (avctx->codec_id) {
922 case CODEC_ID_MPEG2VIDEO:
923 if(avctx->pix_fmt != PIX_FMT_YUV420P && avctx->pix_fmt != PIX_FMT_YUV422P){
924 av_log(avctx, AV_LOG_ERROR, "only YUV420 and YUV422 are supported\n");
930 if(avctx->pix_fmt != PIX_FMT_YUVJ420P && avctx->pix_fmt != PIX_FMT_YUVJ422P &&
931 ((avctx->pix_fmt != PIX_FMT_YUV420P && avctx->pix_fmt != PIX_FMT_YUV422P) || avctx->strict_std_compliance>FF_COMPLIANCE_INOFFICIAL)){
932 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
937 if(avctx->pix_fmt != PIX_FMT_YUV420P){
938 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
943 switch (avctx->pix_fmt) {
944 case PIX_FMT_YUVJ422P:
945 case PIX_FMT_YUV422P:
946 s->chroma_format = CHROMA_422;
948 case PIX_FMT_YUVJ420P:
949 case PIX_FMT_YUV420P:
951 s->chroma_format = CHROMA_420;
955 s->bit_rate = avctx->bit_rate;
956 s->width = avctx->width;
957 s->height = avctx->height;
958 if(avctx->gop_size > 600 && avctx->strict_std_compliance>FF_COMPLIANCE_EXPERIMENTAL){
959 av_log(avctx, AV_LOG_ERROR, "Warning keyframe interval too large! reducing it ...\n");
962 s->gop_size = avctx->gop_size;
964 s->flags= avctx->flags;
965 s->flags2= avctx->flags2;
966 s->max_b_frames= avctx->max_b_frames;
967 s->codec_id= avctx->codec->id;
968 s->luma_elim_threshold = avctx->luma_elim_threshold;
969 s->chroma_elim_threshold= avctx->chroma_elim_threshold;
970 s->strict_std_compliance= avctx->strict_std_compliance;
971 s->data_partitioning= avctx->flags & CODEC_FLAG_PART;
972 s->quarter_sample= (avctx->flags & CODEC_FLAG_QPEL)!=0;
973 s->mpeg_quant= avctx->mpeg_quant;
974 s->rtp_mode= !!avctx->rtp_payload_size;
975 s->intra_dc_precision= avctx->intra_dc_precision;
976 s->user_specified_pts = AV_NOPTS_VALUE;
978 if (s->gop_size <= 1) {
985 s->me_method = avctx->me_method;
988 s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
990 s->adaptive_quant= ( s->avctx->lumi_masking
991 || s->avctx->dark_masking
992 || s->avctx->temporal_cplx_masking
993 || s->avctx->spatial_cplx_masking
994 || s->avctx->p_masking
995 || s->avctx->border_masking
996 || (s->flags&CODEC_FLAG_QP_RD))
999 s->obmc= !!(s->flags & CODEC_FLAG_OBMC);
1000 s->loop_filter= !!(s->flags & CODEC_FLAG_LOOP_FILTER);
1001 s->alternate_scan= !!(s->flags & CODEC_FLAG_ALT_SCAN);
1002 s->intra_vlc_format= !!(s->flags2 & CODEC_FLAG2_INTRA_VLC);
1003 s->q_scale_type= !!(s->flags2 & CODEC_FLAG2_NON_LINEAR_QUANT);
1005 if(avctx->rc_max_rate && !avctx->rc_buffer_size){
1006 av_log(avctx, AV_LOG_ERROR, "a vbv buffer size is needed, for encoding with a maximum bitrate\n");
1010 if(avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate){
1011 av_log(avctx, AV_LOG_INFO, "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
1014 if(avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate){
1015 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
1019 if(avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate){
1020 av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
1024 if(avctx->rc_buffer_size && avctx->bit_rate*av_q2d(avctx->time_base) > avctx->rc_buffer_size){
1025 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
1029 if(avctx->bit_rate*av_q2d(avctx->time_base) > avctx->bit_rate_tolerance){
1030 av_log(avctx, AV_LOG_ERROR, "bitrate tolerance too small for bitrate\n");
1034 if( s->avctx->rc_max_rate && s->avctx->rc_min_rate == s->avctx->rc_max_rate
1035 && (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO)
1036 && 90000LL * (avctx->rc_buffer_size-1) > s->avctx->rc_max_rate*0xFFFFLL){
1038 av_log(avctx, AV_LOG_INFO, "Warning vbv_delay will be set to 0xFFFF (=VBR) as the specified vbv buffer is too large for the given bitrate!\n");
1041 if((s->flags & CODEC_FLAG_4MV) && s->codec_id != CODEC_ID_MPEG4
1042 && s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P && s->codec_id != CODEC_ID_FLV1){
1043 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
1047 if(s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE){
1048 av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with simple mb decision\n");
1052 if(s->obmc && s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P){
1053 av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with H263(+)\n");
1057 if(s->quarter_sample && s->codec_id != CODEC_ID_MPEG4){
1058 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
1062 if(s->data_partitioning && s->codec_id != CODEC_ID_MPEG4){
1063 av_log(avctx, AV_LOG_ERROR, "data partitioning not supported by codec\n");
1067 if(s->max_b_frames && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG1VIDEO && s->codec_id != CODEC_ID_MPEG2VIDEO){
1068 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
1072 if((s->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME|CODEC_FLAG_ALT_SCAN))
1073 && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG2VIDEO){
1074 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
1078 if(s->mpeg_quant && s->codec_id != CODEC_ID_MPEG4){ //FIXME mpeg2 uses that too
1079 av_log(avctx, AV_LOG_ERROR, "mpeg2 style quantization not supported by codec\n");
1083 if((s->flags & CODEC_FLAG_CBP_RD) && !(s->flags & CODEC_FLAG_TRELLIS_QUANT)){
1084 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
1088 if((s->flags & CODEC_FLAG_QP_RD) && s->avctx->mb_decision != FF_MB_DECISION_RD){
1089 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
1093 if(s->avctx->scenechange_threshold < 1000000000 && (s->flags & CODEC_FLAG_CLOSED_GOP)){
1094 av_log(avctx, AV_LOG_ERROR, "closed gop with scene change detection arent supported yet, set threshold to 1000000000\n");
1098 if((s->flags2 & CODEC_FLAG2_INTRA_VLC) && s->codec_id != CODEC_ID_MPEG2VIDEO){
1099 av_log(avctx, AV_LOG_ERROR, "intra vlc table not supported by codec\n");
1103 if(s->flags & CODEC_FLAG_LOW_DELAY){
1104 if (s->codec_id != CODEC_ID_MPEG2VIDEO && s->codec_id != CODEC_ID_MPEG1VIDEO){
1105 av_log(avctx, AV_LOG_ERROR, "low delay forcing is only available for mpeg1/2\n");
1108 if (s->max_b_frames != 0){
1109 av_log(avctx, AV_LOG_ERROR, "b frames cannot be used with low delay\n");
1114 if(s->q_scale_type == 1){
1115 if(s->codec_id != CODEC_ID_MPEG2VIDEO){
1116 av_log(avctx, AV_LOG_ERROR, "non linear quant is only available for mpeg2\n");
1119 if(avctx->qmax > 12){
1120 av_log(avctx, AV_LOG_ERROR, "non linear quant only supports qmax <= 12 currently\n");
1125 if(s->avctx->thread_count > 1 && s->codec_id != CODEC_ID_MPEG4
1126 && s->codec_id != CODEC_ID_MPEG1VIDEO && s->codec_id != CODEC_ID_MPEG2VIDEO
1127 && (s->codec_id != CODEC_ID_H263P || !(s->flags & CODEC_FLAG_H263P_SLICE_STRUCT))){
1128 av_log(avctx, AV_LOG_ERROR, "multi threaded encoding not supported by codec\n");
1132 if(s->avctx->thread_count > 1)
1135 if(!avctx->time_base.den || !avctx->time_base.num){
1136 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
1140 i= (INT_MAX/2+128)>>8;
1141 if(avctx->me_threshold >= i){
1142 av_log(avctx, AV_LOG_ERROR, "me_threshold too large, max is %d\n", i - 1);
1145 if(avctx->mb_threshold >= i){
1146 av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n", i - 1);
1150 if(avctx->b_frame_strategy && (avctx->flags&CODEC_FLAG_PASS2)){
1151 av_log(avctx, AV_LOG_INFO, "notice: b_frame_strategy only affects the first pass\n");
1152 avctx->b_frame_strategy = 0;
1155 i= ff_gcd(avctx->time_base.den, avctx->time_base.num);
1157 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
1158 avctx->time_base.den /= i;
1159 avctx->time_base.num /= i;
1163 if(s->codec_id==CODEC_ID_MJPEG){
1164 s->intra_quant_bias= 1<<(QUANT_BIAS_SHIFT-1); //(a + x/2)/x
1165 s->inter_quant_bias= 0;
1166 }else if(s->mpeg_quant || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO){
1167 s->intra_quant_bias= 3<<(QUANT_BIAS_SHIFT-3); //(a + x*3/8)/x
1168 s->inter_quant_bias= 0;
1170 s->intra_quant_bias=0;
1171 s->inter_quant_bias=-(1<<(QUANT_BIAS_SHIFT-2)); //(a - x/4)/x
1174 if(avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
1175 s->intra_quant_bias= avctx->intra_quant_bias;
1176 if(avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
1177 s->inter_quant_bias= avctx->inter_quant_bias;
1179 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift);
1181 if(avctx->codec_id == CODEC_ID_MPEG4 && s->avctx->time_base.den > (1<<16)-1){
1182 av_log(avctx, AV_LOG_ERROR, "timebase not supported by mpeg 4 standard\n");
1185 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
1187 switch(avctx->codec->id) {
1188 case CODEC_ID_MPEG1VIDEO:
1189 s->out_format = FMT_MPEG1;
1190 s->low_delay= !!(s->flags & CODEC_FLAG_LOW_DELAY);
1191 avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
1193 case CODEC_ID_MPEG2VIDEO:
1194 s->out_format = FMT_MPEG1;
1195 s->low_delay= !!(s->flags & CODEC_FLAG_LOW_DELAY);
1196 avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
1199 case CODEC_ID_LJPEG:
1200 case CODEC_ID_MJPEG:
1201 s->out_format = FMT_MJPEG;
1202 s->intra_only = 1; /* force intra only for jpeg */
1203 s->mjpeg_vsample[0] = 2;
1204 s->mjpeg_vsample[1] = 2>>chroma_v_shift;
1205 s->mjpeg_vsample[2] = 2>>chroma_v_shift;
1206 s->mjpeg_hsample[0] = 2;
1207 s->mjpeg_hsample[1] = 2>>chroma_h_shift;
1208 s->mjpeg_hsample[2] = 2>>chroma_h_shift;
1209 if (!(ENABLE_MJPEG_ENCODER || ENABLE_LJPEG_ENCODER)
1210 || ff_mjpeg_encode_init(s) < 0)
1216 if (!ENABLE_H261_ENCODER) return -1;
1217 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
1218 av_log(avctx, AV_LOG_ERROR, "The specified picture size of %dx%d is not valid for the H.261 codec.\nValid sizes are 176x144, 352x288\n", s->width, s->height);
1221 s->out_format = FMT_H261;
1226 if (h263_get_picture_format(s->width, s->height) == 7) {
1227 av_log(avctx, AV_LOG_INFO, "The specified picture size of %dx%d is not valid for the H.263 codec.\nValid sizes are 128x96, 176x144, 352x288, 704x576, and 1408x1152. Try H.263+.\n", s->width, s->height);
1230 s->out_format = FMT_H263;
1231 s->obmc= (avctx->flags & CODEC_FLAG_OBMC) ? 1:0;
1235 case CODEC_ID_H263P:
1236 s->out_format = FMT_H263;
1239 s->umvplus = (avctx->flags & CODEC_FLAG_H263P_UMV) ? 1:0;
1240 s->h263_aic= (avctx->flags & CODEC_FLAG_AC_PRED) ? 1:0;
1241 s->modified_quant= s->h263_aic;
1242 s->alt_inter_vlc= (avctx->flags & CODEC_FLAG_H263P_AIV) ? 1:0;
1243 s->obmc= (avctx->flags & CODEC_FLAG_OBMC) ? 1:0;
1244 s->loop_filter= (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1:0;
1245 s->unrestricted_mv= s->obmc || s->loop_filter || s->umvplus;
1246 s->h263_slice_structured= (s->flags & CODEC_FLAG_H263P_SLICE_STRUCT) ? 1:0;
1249 /* These are just to be sure */
1254 s->out_format = FMT_H263;
1255 s->h263_flv = 2; /* format = 1; 11-bit codes */
1256 s->unrestricted_mv = 1;
1257 s->rtp_mode=0; /* don't allow GOB */
1262 s->out_format = FMT_H263;
1267 s->out_format = FMT_H263;
1270 s->modified_quant=1;
1274 s->unrestricted_mv= s->obmc || s->loop_filter || s->umvplus;
1276 case CODEC_ID_MPEG4:
1277 s->out_format = FMT_H263;
1279 s->unrestricted_mv = 1;
1280 s->low_delay= s->max_b_frames ? 0 : 1;
1281 avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
1283 case CODEC_ID_MSMPEG4V1:
1284 s->out_format = FMT_H263;
1285 s->h263_msmpeg4 = 1;
1287 s->unrestricted_mv = 1;
1288 s->msmpeg4_version= 1;
1292 case CODEC_ID_MSMPEG4V2:
1293 s->out_format = FMT_H263;
1294 s->h263_msmpeg4 = 1;
1296 s->unrestricted_mv = 1;
1297 s->msmpeg4_version= 2;
1301 case CODEC_ID_MSMPEG4V3:
1302 s->out_format = FMT_H263;
1303 s->h263_msmpeg4 = 1;
1305 s->unrestricted_mv = 1;
1306 s->msmpeg4_version= 3;
1307 s->flipflop_rounding=1;
1312 s->out_format = FMT_H263;
1313 s->h263_msmpeg4 = 1;
1315 s->unrestricted_mv = 1;
1316 s->msmpeg4_version= 4;
1317 s->flipflop_rounding=1;
1322 s->out_format = FMT_H263;
1323 s->h263_msmpeg4 = 1;
1325 s->unrestricted_mv = 1;
1326 s->msmpeg4_version= 5;
1327 s->flipflop_rounding=1;
1335 avctx->has_b_frames= !s->low_delay;
1340 if (MPV_common_init(s) < 0)
1343 if(s->modified_quant)
1344 s->chroma_qscale_table= ff_h263_chroma_qscale_table;
1345 s->progressive_frame=
1346 s->progressive_sequence= !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME|CODEC_FLAG_ALT_SCAN));
1347 s->quant_precision=5;
1349 ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
1350 ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
1352 if (ENABLE_H261_ENCODER && s->out_format == FMT_H261)
1353 ff_h261_encode_init(s);
1354 if (s->out_format == FMT_H263)
1355 h263_encode_init(s);
1356 if (ENABLE_MSMPEG4_ENCODER && s->msmpeg4_version)
1357 ff_msmpeg4_encode_init(s);
1358 if (s->out_format == FMT_MPEG1)
1359 ff_mpeg1_encode_init(s);
1363 int j= s->dsp.idct_permutation[i];
1364 if(s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){
1365 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
1366 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
1367 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
1368 s->intra_matrix[j] =
1369 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1372 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1373 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1375 if(s->avctx->intra_matrix)
1376 s->intra_matrix[j] = s->avctx->intra_matrix[i];
1377 if(s->avctx->inter_matrix)
1378 s->inter_matrix[j] = s->avctx->inter_matrix[i];
1381 /* precompute matrix */
1382 /* for mjpeg, we do include qscale in the matrix */
1383 if (s->out_format != FMT_MJPEG) {
1384 convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
1385 s->intra_matrix, s->intra_quant_bias, avctx->qmin, 31, 1);
1386 convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
1387 s->inter_matrix, s->inter_quant_bias, avctx->qmin, 31, 0);
1390 if(ff_rate_control_init(s) < 0)
1396 int MPV_encode_end(AVCodecContext *avctx)
1398 MpegEncContext *s = avctx->priv_data;
1400 ff_rate_control_uninit(s);
1403 if ((ENABLE_MJPEG_ENCODER || ENABLE_LJPEG_ENCODER) && s->out_format == FMT_MJPEG)
1404 ff_mjpeg_encode_close(s);
1406 av_freep(&avctx->extradata);
1411 #endif //CONFIG_ENCODERS
1413 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
1415 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
1416 uint8_t index_run[MAX_RUN+1];
1417 int last, run, level, start, end, i;
1419 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1420 if(static_store && rl->max_level[0])
1423 /* compute max_level[], max_run[] and index_run[] */
1424 for(last=0;last<2;last++) {
1433 memset(max_level, 0, MAX_RUN + 1);
1434 memset(max_run, 0, MAX_LEVEL + 1);
1435 memset(index_run, rl->n, MAX_RUN + 1);
1436 for(i=start;i<end;i++) {
1437 run = rl->table_run[i];
1438 level = rl->table_level[i];
1439 if (index_run[run] == rl->n)
1441 if (level > max_level[run])
1442 max_level[run] = level;
1443 if (run > max_run[level])
1444 max_run[level] = run;
1447 rl->max_level[last] = static_store[last];
1449 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1450 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1452 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1454 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1455 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1457 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1459 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1460 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1464 /* draw the edges of width 'w' of an image of size width, height */
1465 //FIXME check that this is ok for mpeg4 interlaced
1466 static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w)
1468 uint8_t *ptr, *last_line;
1471 last_line = buf + (height - 1) * wrap;
1473 /* top and bottom */
1474 memcpy(buf - (i + 1) * wrap, buf, width);
1475 memcpy(last_line + (i + 1) * wrap, last_line, width);
1477 /* left and right */
1479 for(i=0;i<height;i++) {
1480 memset(ptr - w, ptr[0], w);
1481 memset(ptr + width, ptr[width-1], w);
1486 memset(buf - (i + 1) * wrap - w, buf[0], w); /* top left */
1487 memset(buf - (i + 1) * wrap + width, buf[width-1], w); /* top right */
1488 memset(last_line + (i + 1) * wrap - w, last_line[0], w); /* top left */
1489 memset(last_line + (i + 1) * wrap + width, last_line[width-1], w); /* top right */
1493 int ff_find_unused_picture(MpegEncContext *s, int shared){
1497 for(i=0; i<MAX_PICTURE_COUNT; i++){
1498 if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
1501 for(i=0; i<MAX_PICTURE_COUNT; i++){
1502 if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME
1504 for(i=0; i<MAX_PICTURE_COUNT; i++){
1505 if(s->picture[i].data[0]==NULL) return i;
1513 static void update_noise_reduction(MpegEncContext *s){
1516 for(intra=0; intra<2; intra++){
1517 if(s->dct_count[intra] > (1<<16)){
1518 for(i=0; i<64; i++){
1519 s->dct_error_sum[intra][i] >>=1;
1521 s->dct_count[intra] >>= 1;
1524 for(i=0; i<64; i++){
1525 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
1531 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
1533 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1539 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
1541 /* mark&release old frames */
1542 if (s->pict_type != B_TYPE && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
1543 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
1544 avctx->release_buffer(avctx, (AVFrame*)s->last_picture_ptr);
1546 /* release forgotten pictures */
1547 /* if(mpeg124/h263) */
1549 for(i=0; i<MAX_PICTURE_COUNT; i++){
1550 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
1551 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
1552 avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
1560 /* release non reference frames */
1561 for(i=0; i<MAX_PICTURE_COUNT; i++){
1562 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1563 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
1567 if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
1568 pic= (AVFrame*)s->current_picture_ptr; //we allready have a unused image (maybe it was set before reading the header)
1570 i= ff_find_unused_picture(s, 0);
1571 pic= (AVFrame*)&s->picture[i];
1574 pic->reference= (s->pict_type != B_TYPE || s->codec_id == CODEC_ID_H264)
1575 && !s->dropable ? 3 : 0;
1577 pic->coded_picture_number= s->coded_picture_number++;
1579 if( alloc_picture(s, (Picture*)pic, 0) < 0)
1582 s->current_picture_ptr= (Picture*)pic;
1583 s->current_picture_ptr->top_field_first= s->top_field_first; //FIXME use only the vars from current_pic
1584 s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
1587 s->current_picture_ptr->pict_type= s->pict_type;
1588 // if(s->flags && CODEC_FLAG_QSCALE)
1589 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
1590 s->current_picture_ptr->key_frame= s->pict_type == I_TYPE;
1592 copy_picture(&s->current_picture, s->current_picture_ptr);
1594 if (s->pict_type != B_TYPE) {
1595 s->last_picture_ptr= s->next_picture_ptr;
1597 s->next_picture_ptr= s->current_picture_ptr;
1599 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1600 s->last_picture_ptr ? s->last_picture_ptr->data[0] : NULL,
1601 s->next_picture_ptr ? s->next_picture_ptr->data[0] : NULL,
1602 s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL,
1603 s->pict_type, s->dropable);*/
1605 if(s->last_picture_ptr) copy_picture(&s->last_picture, s->last_picture_ptr);
1606 if(s->next_picture_ptr) copy_picture(&s->next_picture, s->next_picture_ptr);
1608 if(s->pict_type != I_TYPE && (s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) && !s->dropable){
1609 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
1610 assert(s->pict_type != B_TYPE); //these should have been dropped if we don't have a reference
1614 assert(s->pict_type == I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
1616 if(s->picture_structure!=PICT_FRAME){
1619 if(s->picture_structure == PICT_BOTTOM_FIELD){
1620 s->current_picture.data[i] += s->current_picture.linesize[i];
1622 s->current_picture.linesize[i] *= 2;
1623 s->last_picture.linesize[i] *=2;
1624 s->next_picture.linesize[i] *=2;
1628 s->hurry_up= s->avctx->hurry_up;
1629 s->error_resilience= avctx->error_resilience;
1631 /* set dequantizer, we can't do it during init as it might change for mpeg4
1632 and we can't do it in the header decode as init is not called for mpeg4 there yet */
1633 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
1634 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1635 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1636 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
1637 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1638 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1640 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1641 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1644 if(s->dct_error_sum){
1645 assert(s->avctx->noise_reduction && s->encoding);
1647 update_noise_reduction(s);
1651 if(s->avctx->xvmc_acceleration)
1652 return XVMC_field_start(s, avctx);
1657 /* generic function for encode/decode called after a frame has been coded/decoded */
1658 void MPV_frame_end(MpegEncContext *s)
1661 /* draw edge for correct motion prediction if outside */
1663 //just to make sure that all data is rendered.
1664 if(s->avctx->xvmc_acceleration){
1668 if(s->unrestricted_mv && s->current_picture.reference && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1669 draw_edges(s->current_picture.data[0], s->linesize , s->h_edge_pos , s->v_edge_pos , EDGE_WIDTH );
1670 draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
1671 draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
1675 s->last_pict_type = s->pict_type;
1676 s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
1677 if(s->pict_type!=B_TYPE){
1678 s->last_non_b_pict_type= s->pict_type;
1681 /* copy back current_picture variables */
1682 for(i=0; i<MAX_PICTURE_COUNT; i++){
1683 if(s->picture[i].data[0] == s->current_picture.data[0]){
1684 s->picture[i]= s->current_picture;
1688 assert(i<MAX_PICTURE_COUNT);
1692 /* release non-reference frames */
1693 for(i=0; i<MAX_PICTURE_COUNT; i++){
1694 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1695 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
1699 // clear copies, to avoid confusion
1701 memset(&s->last_picture, 0, sizeof(Picture));
1702 memset(&s->next_picture, 0, sizeof(Picture));
1703 memset(&s->current_picture, 0, sizeof(Picture));
1705 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
1709 * draws an line from (ex, ey) -> (sx, sy).
1710 * @param w width of the image
1711 * @param h height of the image
1712 * @param stride stride/linesize of the image
1713 * @param color color of the arrow
1715 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1718 sx= av_clip(sx, 0, w-1);
1719 sy= av_clip(sy, 0, h-1);
1720 ex= av_clip(ex, 0, w-1);
1721 ey= av_clip(ey, 0, h-1);
1723 buf[sy*stride + sx]+= color;
1725 if(FFABS(ex - sx) > FFABS(ey - sy)){
1727 FFSWAP(int, sx, ex);
1728 FFSWAP(int, sy, ey);
1730 buf+= sx + sy*stride;
1732 f= ((ey-sy)<<16)/ex;
1733 for(x= 0; x <= ex; x++){
1736 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
1737 buf[(y+1)*stride + x]+= (color* fr )>>16;
1741 FFSWAP(int, sx, ex);
1742 FFSWAP(int, sy, ey);
1744 buf+= sx + sy*stride;
1746 if(ey) f= ((ex-sx)<<16)/ey;
1748 for(y= 0; y <= ey; y++){
1751 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;;
1752 buf[y*stride + x+1]+= (color* fr )>>16;;
1758 * draws an arrow from (ex, ey) -> (sx, sy).
1759 * @param w width of the image
1760 * @param h height of the image
1761 * @param stride stride/linesize of the image
1762 * @param color color of the arrow
1764 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1767 sx= av_clip(sx, -100, w+100);
1768 sy= av_clip(sy, -100, h+100);
1769 ex= av_clip(ex, -100, w+100);
1770 ey= av_clip(ey, -100, h+100);
1775 if(dx*dx + dy*dy > 3*3){
1778 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1780 //FIXME subpixel accuracy
1781 rx= ROUNDED_DIV(rx*3<<4, length);
1782 ry= ROUNDED_DIV(ry*3<<4, length);
1784 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1785 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1787 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1791 * prints debuging info for the given picture.
1793 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1795 if(!pict || !pict->mb_type) return;
1797 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1800 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1801 switch (pict->pict_type) {
1802 case FF_I_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
1803 case FF_P_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
1804 case FF_B_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
1805 case FF_S_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
1806 case FF_SI_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
1807 case FF_SP_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
1809 for(y=0; y<s->mb_height; y++){
1810 for(x=0; x<s->mb_width; x++){
1811 if(s->avctx->debug&FF_DEBUG_SKIP){
1812 int count= s->mbskip_table[x + y*s->mb_stride];
1813 if(count>9) count=9;
1814 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1816 if(s->avctx->debug&FF_DEBUG_QP){
1817 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1819 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1820 int mb_type= pict->mb_type[x + y*s->mb_stride];
1821 //Type & MV direction
1823 av_log(s->avctx, AV_LOG_DEBUG, "P");
1824 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1825 av_log(s->avctx, AV_LOG_DEBUG, "A");
1826 else if(IS_INTRA4x4(mb_type))
1827 av_log(s->avctx, AV_LOG_DEBUG, "i");
1828 else if(IS_INTRA16x16(mb_type))
1829 av_log(s->avctx, AV_LOG_DEBUG, "I");
1830 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1831 av_log(s->avctx, AV_LOG_DEBUG, "d");
1832 else if(IS_DIRECT(mb_type))
1833 av_log(s->avctx, AV_LOG_DEBUG, "D");
1834 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1835 av_log(s->avctx, AV_LOG_DEBUG, "g");
1836 else if(IS_GMC(mb_type))
1837 av_log(s->avctx, AV_LOG_DEBUG, "G");
1838 else if(IS_SKIP(mb_type))
1839 av_log(s->avctx, AV_LOG_DEBUG, "S");
1840 else if(!USES_LIST(mb_type, 1))
1841 av_log(s->avctx, AV_LOG_DEBUG, ">");
1842 else if(!USES_LIST(mb_type, 0))
1843 av_log(s->avctx, AV_LOG_DEBUG, "<");
1845 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1846 av_log(s->avctx, AV_LOG_DEBUG, "X");
1851 av_log(s->avctx, AV_LOG_DEBUG, "+");
1852 else if(IS_16X8(mb_type))
1853 av_log(s->avctx, AV_LOG_DEBUG, "-");
1854 else if(IS_8X16(mb_type))
1855 av_log(s->avctx, AV_LOG_DEBUG, "|");
1856 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1857 av_log(s->avctx, AV_LOG_DEBUG, " ");
1859 av_log(s->avctx, AV_LOG_DEBUG, "?");
1862 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264)
1863 av_log(s->avctx, AV_LOG_DEBUG, "=");
1865 av_log(s->avctx, AV_LOG_DEBUG, " ");
1867 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1869 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1873 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
1874 const int shift= 1 + s->quarter_sample;
1878 int h_chroma_shift, v_chroma_shift;
1879 const int width = s->avctx->width;
1880 const int height= s->avctx->height;
1881 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
1882 const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1883 s->low_delay=0; //needed to see the vectors without trashing the buffers
1885 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1887 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
1888 pict->data[i]= s->visualization_buffer[i];
1890 pict->type= FF_BUFFER_TYPE_COPY;
1893 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1895 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1896 const int mb_index= mb_x + mb_y*s->mb_stride;
1897 if((s->avctx->debug_mv) && pict->motion_val){
1899 for(type=0; type<3; type++){
1902 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=FF_P_TYPE))
1906 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=FF_B_TYPE))
1910 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=FF_B_TYPE))
1915 if(!USES_LIST(pict->mb_type[mb_index], direction))
1918 if(IS_8X8(pict->mb_type[mb_index])){
1921 int sx= mb_x*16 + 4 + 8*(i&1);
1922 int sy= mb_y*16 + 4 + 8*(i>>1);
1923 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1924 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1925 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1926 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1928 }else if(IS_16X8(pict->mb_type[mb_index])){
1932 int sy=mb_y*16 + 4 + 8*i;
1933 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
1934 int mx=(pict->motion_val[direction][xy][0]>>shift);
1935 int my=(pict->motion_val[direction][xy][1]>>shift);
1937 if(IS_INTERLACED(pict->mb_type[mb_index]))
1940 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1942 }else if(IS_8X16(pict->mb_type[mb_index])){
1945 int sx=mb_x*16 + 4 + 8*i;
1947 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
1948 int mx=(pict->motion_val[direction][xy][0]>>shift);
1949 int my=(pict->motion_val[direction][xy][1]>>shift);
1951 if(IS_INTERLACED(pict->mb_type[mb_index]))
1954 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1957 int sx= mb_x*16 + 8;
1958 int sy= mb_y*16 + 8;
1959 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
1960 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1961 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1962 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1966 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1967 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1970 *(uint64_t*)(pict->data[1] + 8*mb_x + (8*mb_y + y)*pict->linesize[1])= c;
1971 *(uint64_t*)(pict->data[2] + 8*mb_x + (8*mb_y + y)*pict->linesize[2])= c;
1974 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1975 int mb_type= pict->mb_type[mb_index];
1978 #define COLOR(theta, r)\
1979 u= (int)(128 + r*cos(theta*3.141592/180));\
1980 v= (int)(128 + r*sin(theta*3.141592/180));
1984 if(IS_PCM(mb_type)){
1986 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1988 }else if(IS_INTRA4x4(mb_type)){
1990 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1992 }else if(IS_DIRECT(mb_type)){
1994 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1996 }else if(IS_GMC(mb_type)){
1998 }else if(IS_SKIP(mb_type)){
2000 }else if(!USES_LIST(mb_type, 1)){
2002 }else if(!USES_LIST(mb_type, 0)){
2005 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2009 u*= 0x0101010101010101ULL;
2010 v*= 0x0101010101010101ULL;
2012 *(uint64_t*)(pict->data[1] + 8*mb_x + (8*mb_y + y)*pict->linesize[1])= u;
2013 *(uint64_t*)(pict->data[2] + 8*mb_x + (8*mb_y + y)*pict->linesize[2])= v;
2017 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
2018 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
2019 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
2021 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
2023 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
2025 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
2026 int dm= 1 << (mv_sample_log2-2);
2028 int sx= mb_x*16 + 8*(i&1);
2029 int sy= mb_y*16 + 8*(i>>1);
2030 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
2032 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
2033 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
2035 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
2036 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
2037 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
2041 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
2045 s->mbskip_table[mb_index]=0;
2051 #ifdef CONFIG_ENCODERS
2053 static int get_sae(uint8_t *src, int ref, int stride){
2057 for(y=0; y<16; y++){
2058 for(x=0; x<16; x++){
2059 acc+= FFABS(src[x+y*stride] - ref);
2066 static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride){
2073 for(y=0; y<h; y+=16){
2074 for(x=0; x<w; x+=16){
2075 int offset= x + y*stride;
2076 int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride, 16);
2077 int mean= (s->dsp.pix_sum(src + offset, stride) + 128)>>8;
2078 int sae = get_sae(src + offset, mean, stride);
2080 acc+= sae + 500 < sad;
2087 static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
2091 const int encoding_delay= s->max_b_frames;
2096 pic_arg->display_picture_number= s->input_picture_number++;
2098 if(pts != AV_NOPTS_VALUE){
2099 if(s->user_specified_pts != AV_NOPTS_VALUE){
2101 int64_t last= s->user_specified_pts;
2104 av_log(s->avctx, AV_LOG_ERROR, "Error, Invalid timestamp=%"PRId64", last=%"PRId64"\n", pts, s->user_specified_pts);
2108 s->user_specified_pts= pts;
2110 if(s->user_specified_pts != AV_NOPTS_VALUE){
2111 s->user_specified_pts=
2112 pts= s->user_specified_pts + 1;
2113 av_log(s->avctx, AV_LOG_INFO, "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n", pts);
2115 pts= pic_arg->display_picture_number;
2121 if(encoding_delay && !(s->flags&CODEC_FLAG_INPUT_PRESERVED)) direct=0;
2122 if(pic_arg->linesize[0] != s->linesize) direct=0;
2123 if(pic_arg->linesize[1] != s->uvlinesize) direct=0;
2124 if(pic_arg->linesize[2] != s->uvlinesize) direct=0;
2126 // av_log(AV_LOG_DEBUG, "%d %d %d %d\n",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize);
2129 i= ff_find_unused_picture(s, 1);
2131 pic= (AVFrame*)&s->picture[i];
2135 pic->data[i]= pic_arg->data[i];
2136 pic->linesize[i]= pic_arg->linesize[i];
2138 alloc_picture(s, (Picture*)pic, 1);
2140 i= ff_find_unused_picture(s, 0);
2142 pic= (AVFrame*)&s->picture[i];
2145 alloc_picture(s, (Picture*)pic, 0);
2147 if( pic->data[0] + INPLACE_OFFSET == pic_arg->data[0]
2148 && pic->data[1] + INPLACE_OFFSET == pic_arg->data[1]
2149 && pic->data[2] + INPLACE_OFFSET == pic_arg->data[2]){
2152 int h_chroma_shift, v_chroma_shift;
2153 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
2156 int src_stride= pic_arg->linesize[i];
2157 int dst_stride= i ? s->uvlinesize : s->linesize;
2158 int h_shift= i ? h_chroma_shift : 0;
2159 int v_shift= i ? v_chroma_shift : 0;
2160 int w= s->width >>h_shift;
2161 int h= s->height>>v_shift;
2162 uint8_t *src= pic_arg->data[i];
2163 uint8_t *dst= pic->data[i];
2165 if(!s->avctx->rc_buffer_size)
2166 dst +=INPLACE_OFFSET;
2168 if(src_stride==dst_stride)
2169 memcpy(dst, src, src_stride*h);
2172 memcpy(dst, src, w);
2180 copy_picture_attributes(s, pic, pic_arg);
2181 pic->pts= pts; //we set this here to avoid modifiying pic_arg
2184 /* shift buffer entries */
2185 for(i=1; i<MAX_PICTURE_COUNT /*s->encoding_delay+1*/; i++)
2186 s->input_picture[i-1]= s->input_picture[i];
2188 s->input_picture[encoding_delay]= (Picture*)pic;
2193 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref){
2198 for(plane=0; plane<3; plane++){
2199 const int stride= p->linesize[plane];
2200 const int bw= plane ? 1 : 2;
2201 for(y=0; y<s->mb_height*bw; y++){
2202 for(x=0; x<s->mb_width*bw; x++){
2203 int off= p->type == FF_BUFFER_TYPE_SHARED ? 0: 16;
2204 int v= s->dsp.frame_skip_cmp[1](s, p->data[plane] + 8*(x + y*stride)+off, ref->data[plane] + 8*(x + y*stride), stride, 8);
2206 switch(s->avctx->frame_skip_exp){
2207 case 0: score= FFMAX(score, v); break;
2208 case 1: score+= FFABS(v);break;
2209 case 2: score+= v*v;break;
2210 case 3: score64+= FFABS(v*v*(int64_t)v);break;
2211 case 4: score64+= v*v*(int64_t)(v*v);break;
2217 if(score) score64= score;
2219 if(score64 < s->avctx->frame_skip_threshold)
2221 if(score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda)>>8))
2226 static int estimate_best_b_count(MpegEncContext *s){
2227 AVCodec *codec= avcodec_find_encoder(s->avctx->codec_id);
2228 AVCodecContext *c= avcodec_alloc_context();
2229 AVFrame input[FF_MAX_B_FRAMES+2];
2230 const int scale= s->avctx->brd_scale;
2231 int i, j, out_size, p_lambda, b_lambda, lambda2;
2232 int outbuf_size= s->width * s->height; //FIXME
2233 uint8_t *outbuf= av_malloc(outbuf_size);
2234 int64_t best_rd= INT64_MAX;
2235 int best_b_count= -1;
2237 assert(scale>=0 && scale <=3);
2240 p_lambda= s->last_lambda_for[P_TYPE]; //s->next_picture_ptr->quality;
2241 b_lambda= s->last_lambda_for[B_TYPE]; //p_lambda *FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
2242 if(!b_lambda) b_lambda= p_lambda; //FIXME we should do this somewhere else
2243 lambda2= (b_lambda*b_lambda + (1<<FF_LAMBDA_SHIFT)/2 ) >> FF_LAMBDA_SHIFT;
2245 c->width = s->width >> scale;
2246 c->height= s->height>> scale;
2247 c->flags= CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR | CODEC_FLAG_INPUT_PRESERVED /*| CODEC_FLAG_EMU_EDGE*/;
2248 c->flags|= s->avctx->flags & CODEC_FLAG_QPEL;
2249 c->mb_decision= s->avctx->mb_decision;
2250 c->me_cmp= s->avctx->me_cmp;
2251 c->mb_cmp= s->avctx->mb_cmp;
2252 c->me_sub_cmp= s->avctx->me_sub_cmp;
2253 c->pix_fmt = PIX_FMT_YUV420P;
2254 c->time_base= s->avctx->time_base;
2255 c->max_b_frames= s->max_b_frames;
2257 if (avcodec_open(c, codec) < 0)
2260 for(i=0; i<s->max_b_frames+2; i++){
2261 int ysize= c->width*c->height;
2262 int csize= (c->width/2)*(c->height/2);
2263 Picture pre_input, *pre_input_ptr= i ? s->input_picture[i-1] : s->next_picture_ptr;
2265 avcodec_get_frame_defaults(&input[i]);
2266 input[i].data[0]= av_malloc(ysize + 2*csize);
2267 input[i].data[1]= input[i].data[0] + ysize;
2268 input[i].data[2]= input[i].data[1] + csize;
2269 input[i].linesize[0]= c->width;
2270 input[i].linesize[1]=
2271 input[i].linesize[2]= c->width/2;
2273 if(pre_input_ptr && (!i || s->input_picture[i-1])) {
2274 pre_input= *pre_input_ptr;
2276 if(pre_input.type != FF_BUFFER_TYPE_SHARED && i) {
2277 pre_input.data[0]+=INPLACE_OFFSET;
2278 pre_input.data[1]+=INPLACE_OFFSET;
2279 pre_input.data[2]+=INPLACE_OFFSET;
2282 s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0], pre_input.data[0], pre_input.linesize[0], c->width, c->height);
2283 s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1], pre_input.data[1], pre_input.linesize[1], c->width>>1, c->height>>1);
2284 s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2], pre_input.data[2], pre_input.linesize[2], c->width>>1, c->height>>1);
2288 for(j=0; j<s->max_b_frames+1; j++){
2291 if(!s->input_picture[j])
2294 c->error[0]= c->error[1]= c->error[2]= 0;
2296 input[0].pict_type= I_TYPE;
2297 input[0].quality= 1 * FF_QP2LAMBDA;
2298 out_size = avcodec_encode_video(c, outbuf, outbuf_size, &input[0]);
2299 // rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
2301 for(i=0; i<s->max_b_frames+1; i++){
2302 int is_p= i % (j+1) == j || i==s->max_b_frames;
2304 input[i+1].pict_type= is_p ? P_TYPE : B_TYPE;
2305 input[i+1].quality= is_p ? p_lambda : b_lambda;
2306 out_size = avcodec_encode_video(c, outbuf, outbuf_size, &input[i+1]);
2307 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
2310 /* get the delayed frames */
2312 out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
2313 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
2316 rd += c->error[0] + c->error[1] + c->error[2];
2328 for(i=0; i<s->max_b_frames+2; i++){
2329 av_freep(&input[i].data[0]);
2332 return best_b_count;
2335 static void select_input_picture(MpegEncContext *s){
2338 for(i=1; i<MAX_PICTURE_COUNT; i++)
2339 s->reordered_input_picture[i-1]= s->reordered_input_picture[i];
2340 s->reordered_input_picture[MAX_PICTURE_COUNT-1]= NULL;
2342 /* set next picture type & ordering */
2343 if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){
2344 if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){
2345 s->reordered_input_picture[0]= s->input_picture[0];
2346 s->reordered_input_picture[0]->pict_type= I_TYPE;
2347 s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++;
2351 if(s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor){
2352 if(s->picture_in_gop_number < s->gop_size && skip_check(s, s->input_picture[0], s->next_picture_ptr)){
2353 //FIXME check that te gop check above is +-1 correct
2354 //av_log(NULL, AV_LOG_DEBUG, "skip %p %"PRId64"\n", s->input_picture[0]->data[0], s->input_picture[0]->pts);
2356 if(s->input_picture[0]->type == FF_BUFFER_TYPE_SHARED){
2358 s->input_picture[0]->data[i]= NULL;
2359 s->input_picture[0]->type= 0;
2361 assert( s->input_picture[0]->type==FF_BUFFER_TYPE_USER
2362 || s->input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL);
2364 s->avctx->release_buffer(s->avctx, (AVFrame*)s->input_picture[0]);
2368 ff_vbv_update(s, 0);
2374 if(s->flags&CODEC_FLAG_PASS2){
2375 for(i=0; i<s->max_b_frames+1; i++){
2376 int pict_num= s->input_picture[0]->display_picture_number + i;
2378 if(pict_num >= s->rc_context.num_entries)
2380 if(!s->input_picture[i]){
2381 s->rc_context.entry[pict_num-1].new_pict_type = P_TYPE;
2385 s->input_picture[i]->pict_type=
2386 s->rc_context.entry[pict_num].new_pict_type;
2390 if(s->avctx->b_frame_strategy==0){
2391 b_frames= s->max_b_frames;
2392 while(b_frames && !s->input_picture[b_frames]) b_frames--;
2393 }else if(s->avctx->b_frame_strategy==1){
2394 for(i=1; i<s->max_b_frames+1; i++){
2395 if(s->input_picture[i] && s->input_picture[i]->b_frame_score==0){
2396 s->input_picture[i]->b_frame_score=
2397 get_intra_count(s, s->input_picture[i ]->data[0],
2398 s->input_picture[i-1]->data[0], s->linesize) + 1;
2401 for(i=0; i<s->max_b_frames+1; i++){
2402 if(s->input_picture[i]==NULL || s->input_picture[i]->b_frame_score - 1 > s->mb_num/s->avctx->b_sensitivity) break;
2405 b_frames= FFMAX(0, i-1);
2408 for(i=0; i<b_frames+1; i++){
2409 s->input_picture[i]->b_frame_score=0;
2411 }else if(s->avctx->b_frame_strategy==2){
2412 b_frames= estimate_best_b_count(s);
2414 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
2419 //static int b_count=0;
2420 //b_count+= b_frames;
2421 //av_log(s->avctx, AV_LOG_DEBUG, "b_frames: %d\n", b_count);
2423 for(i= b_frames - 1; i>=0; i--){
2424 int type= s->input_picture[i]->pict_type;
2425 if(type && type != B_TYPE)
2428 if(s->input_picture[b_frames]->pict_type == B_TYPE && b_frames == s->max_b_frames){
2429 av_log(s->avctx, AV_LOG_ERROR, "warning, too many b frames in a row\n");
2432 if(s->picture_in_gop_number + b_frames >= s->gop_size){
2433 if((s->flags2 & CODEC_FLAG2_STRICT_GOP) && s->gop_size > s->picture_in_gop_number){
2434 b_frames= s->gop_size - s->picture_in_gop_number - 1;
2436 if(s->flags & CODEC_FLAG_CLOSED_GOP)
2438 s->input_picture[b_frames]->pict_type= I_TYPE;
2442 if( (s->flags & CODEC_FLAG_CLOSED_GOP)
2444 && s->input_picture[b_frames]->pict_type== I_TYPE)
2447 s->reordered_input_picture[0]= s->input_picture[b_frames];
2448 if(s->reordered_input_picture[0]->pict_type != I_TYPE)
2449 s->reordered_input_picture[0]->pict_type= P_TYPE;
2450 s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++;
2451 for(i=0; i<b_frames; i++){
2452 s->reordered_input_picture[i+1]= s->input_picture[i];
2453 s->reordered_input_picture[i+1]->pict_type= B_TYPE;
2454 s->reordered_input_picture[i+1]->coded_picture_number= s->coded_picture_number++;
2459 if(s->reordered_input_picture[0]){
2460 s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE ? 3 : 0;
2462 copy_picture(&s->new_picture, s->reordered_input_picture[0]);
2464 if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED || s->avctx->rc_buffer_size){
2465 // input is a shared pix, so we can't modifiy it -> alloc a new one & ensure that the shared one is reuseable
2467 int i= ff_find_unused_picture(s, 0);
2468 Picture *pic= &s->picture[i];
2470 pic->reference = s->reordered_input_picture[0]->reference;
2471 alloc_picture(s, pic, 0);
2473 /* mark us unused / free shared pic */
2474 if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_INTERNAL)
2475 s->avctx->release_buffer(s->avctx, (AVFrame*)s->reordered_input_picture[0]);
2477 s->reordered_input_picture[0]->data[i]= NULL;
2478 s->reordered_input_picture[0]->type= 0;
2480 copy_picture_attributes(s, (AVFrame*)pic, (AVFrame*)s->reordered_input_picture[0]);
2482 s->current_picture_ptr= pic;
2484 // input is not a shared pix -> reuse buffer for current_pix
2486 assert( s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_USER
2487 || s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL);
2489 s->current_picture_ptr= s->reordered_input_picture[0];
2491 s->new_picture.data[i]+= INPLACE_OFFSET;
2494 copy_picture(&s->current_picture, s->current_picture_ptr);
2496 s->picture_number= s->new_picture.display_picture_number;
2497 //printf("dpn:%d\n", s->picture_number);
2499 memset(&s->new_picture, 0, sizeof(Picture));
2503 int MPV_encode_picture(AVCodecContext *avctx,
2504 unsigned char *buf, int buf_size, void *data)
2506 MpegEncContext *s = avctx->priv_data;
2507 AVFrame *pic_arg = data;
2508 int i, stuffing_count;
2510 for(i=0; i<avctx->thread_count; i++){
2511 int start_y= s->thread_context[i]->start_mb_y;
2512 int end_y= s->thread_context[i]-> end_mb_y;
2513 int h= s->mb_height;
2514 uint8_t *start= buf + (size_t)(((int64_t) buf_size)*start_y/h);
2515 uint8_t *end = buf + (size_t)(((int64_t) buf_size)* end_y/h);
2517 init_put_bits(&s->thread_context[i]->pb, start, end - start);
2520 s->picture_in_gop_number++;
2522 if(load_input_picture(s, pic_arg) < 0)
2525 select_input_picture(s);
2528 if(s->new_picture.data[0]){
2529 s->pict_type= s->new_picture.pict_type;
2531 //printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale);
2532 MPV_frame_start(s, avctx);
2534 if (encode_picture(s, s->picture_number) < 0)
2537 avctx->real_pict_num = s->picture_number;
2538 avctx->header_bits = s->header_bits;
2539 avctx->mv_bits = s->mv_bits;
2540 avctx->misc_bits = s->misc_bits;
2541 avctx->i_tex_bits = s->i_tex_bits;
2542 avctx->p_tex_bits = s->p_tex_bits;
2543 avctx->i_count = s->i_count;
2544 avctx->p_count = s->mb_num - s->i_count - s->skip_count; //FIXME f/b_count in avctx
2545 avctx->skip_count = s->skip_count;
2549 if (ENABLE_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
2550 ff_mjpeg_encode_picture_trailer(s);
2552 if(avctx->rc_buffer_size){
2553 RateControlContext *rcc= &s->rc_context;
2554 int max_size= rcc->buffer_index/3;
2556 if(put_bits_count(&s->pb) > max_size && s->lambda < s->avctx->lmax){
2557 s->next_lambda= FFMAX(s->lambda+1, s->lambda*(s->qscale+1) / s->qscale);
2558 if(s->adaptive_quant){
2560 for(i=0; i<s->mb_height*s->mb_stride; i++)
2561 s->lambda_table[i]= FFMAX(s->lambda_table[i]+1, s->lambda_table[i]*(s->qscale+1) / s->qscale);
2563 s->mb_skipped = 0; //done in MPV_frame_start()
2564 if(s->pict_type==P_TYPE){ //done in encode_picture() so we must undo it
2565 if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4)
2566 s->no_rounding ^= 1;
2568 if(s->pict_type!=B_TYPE){
2569 s->time_base= s->last_time_base;
2570 s->last_non_b_time= s->time - s->pp_time;
2572 // av_log(NULL, AV_LOG_ERROR, "R:%d ", s->next_lambda);
2573 for(i=0; i<avctx->thread_count; i++){
2574 PutBitContext *pb= &s->thread_context[i]->pb;
2575 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
2580 assert(s->avctx->rc_max_rate);
2583 if(s->flags&CODEC_FLAG_PASS1)
2584 ff_write_pass1_stats(s);
2587 s->current_picture_ptr->error[i]= s->current_picture.error[i];
2588 avctx->error[i] += s->current_picture_ptr->error[i];
2591 if(s->flags&CODEC_FLAG_PASS1)
2592 assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits + avctx->i_tex_bits + avctx->p_tex_bits == put_bits_count(&s->pb));
2593 flush_put_bits(&s->pb);
2594 s->frame_bits = put_bits_count(&s->pb);
2596 stuffing_count= ff_vbv_update(s, s->frame_bits);
2598 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < stuffing_count + 50){
2599 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
2603 switch(s->codec_id){
2604 case CODEC_ID_MPEG1VIDEO:
2605 case CODEC_ID_MPEG2VIDEO:
2606 while(stuffing_count--){
2607 put_bits(&s->pb, 8, 0);
2610 case CODEC_ID_MPEG4:
2611 put_bits(&s->pb, 16, 0);
2612 put_bits(&s->pb, 16, 0x1C3);
2613 stuffing_count -= 4;
2614 while(stuffing_count--){
2615 put_bits(&s->pb, 8, 0xFF);
2619 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2621 flush_put_bits(&s->pb);
2622 s->frame_bits = put_bits_count(&s->pb);
2625 /* update mpeg1/2 vbv_delay for CBR */
2626 if(s->avctx->rc_max_rate && s->avctx->rc_min_rate == s->avctx->rc_max_rate && s->out_format == FMT_MPEG1
2627 && 90000LL * (avctx->rc_buffer_size-1) <= s->avctx->rc_max_rate*0xFFFFLL){
2630 assert(s->repeat_first_field==0);
2632 vbv_delay= lrintf(90000 * s->rc_context.buffer_index / s->avctx->rc_max_rate);
2633 assert(vbv_delay < 0xFFFF);
2635 s->vbv_delay_ptr[0] &= 0xF8;
2636 s->vbv_delay_ptr[0] |= vbv_delay>>13;
2637 s->vbv_delay_ptr[1] = vbv_delay>>5;
2638 s->vbv_delay_ptr[2] &= 0x07;
2639 s->vbv_delay_ptr[2] |= vbv_delay<<3;
2641 s->total_bits += s->frame_bits;
2642 avctx->frame_bits = s->frame_bits;
2644 assert((pbBufPtr(&s->pb) == s->pb.buf));
2647 assert((s->frame_bits&7)==0);
2649 return s->frame_bits/8;
2652 #endif //CONFIG_ENCODERS
2654 static inline void gmc1_motion(MpegEncContext *s,
2655 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2656 uint8_t **ref_picture)
2659 int offset, src_x, src_y, linesize, uvlinesize;
2660 int motion_x, motion_y;
2663 motion_x= s->sprite_offset[0][0];
2664 motion_y= s->sprite_offset[0][1];
2665 src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy+1));
2666 src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy+1));
2667 motion_x<<=(3-s->sprite_warping_accuracy);
2668 motion_y<<=(3-s->sprite_warping_accuracy);
2669 src_x = av_clip(src_x, -16, s->width);
2670 if (src_x == s->width)
2672 src_y = av_clip(src_y, -16, s->height);
2673 if (src_y == s->height)
2676 linesize = s->linesize;
2677 uvlinesize = s->uvlinesize;
2679 ptr = ref_picture[0] + (src_y * linesize) + src_x;
2681 if(s->flags&CODEC_FLAG_EMU_EDGE){
2682 if( (unsigned)src_x >= s->h_edge_pos - 17
2683 || (unsigned)src_y >= s->v_edge_pos - 17){
2684 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, linesize, 17, 17, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
2685 ptr= s->edge_emu_buffer;
2689 if((motion_x|motion_y)&7){
2690 s->dsp.gmc1(dest_y , ptr , linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
2691 s->dsp.gmc1(dest_y+8, ptr+8, linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
2695 dxy= ((motion_x>>3)&1) | ((motion_y>>2)&2);
2696 if (s->no_rounding){
2697 s->dsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
2699 s->dsp.put_pixels_tab [0][dxy](dest_y, ptr, linesize, 16);
2703 if(s->flags&CODEC_FLAG_GRAY) return;
2705 motion_x= s->sprite_offset[1][0];
2706 motion_y= s->sprite_offset[1][1];
2707 src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy+1));
2708 src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy+1));
2709 motion_x<<=(3-s->sprite_warping_accuracy);
2710 motion_y<<=(3-s->sprite_warping_accuracy);
2711 src_x = av_clip(src_x, -8, s->width>>1);
2712 if (src_x == s->width>>1)
2714 src_y = av_clip(src_y, -8, s->height>>1);
2715 if (src_y == s->height>>1)
2718 offset = (src_y * uvlinesize) + src_x;
2719 ptr = ref_picture[1] + offset;
2720 if(s->flags&CODEC_FLAG_EMU_EDGE){
2721 if( (unsigned)src_x >= (s->h_edge_pos>>1) - 9
2722 || (unsigned)src_y >= (s->v_edge_pos>>1) - 9){
2723 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
2724 ptr= s->edge_emu_buffer;
2728 s->dsp.gmc1(dest_cb, ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
2730 ptr = ref_picture[2] + offset;
2732 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
2733 ptr= s->edge_emu_buffer;
2735 s->dsp.gmc1(dest_cr, ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
2740 static inline void gmc_motion(MpegEncContext *s,
2741 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2742 uint8_t **ref_picture)
2745 int linesize, uvlinesize;
2746 const int a= s->sprite_warping_accuracy;
2749 linesize = s->linesize;
2750 uvlinesize = s->uvlinesize;
2752 ptr = ref_picture[0];
2754 ox= s->sprite_offset[0][0] + s->sprite_delta[0][0]*s->mb_x*16 + s->sprite_delta[0][1]*s->mb_y*16;
2755 oy= s->sprite_offset[0][1] + s->sprite_delta[1][0]*s->mb_x*16 + s->sprite_delta[1][1]*s->mb_y*16;
2757 s->dsp.gmc(dest_y, ptr, linesize, 16,
2760 s->sprite_delta[0][0], s->sprite_delta[0][1],
2761 s->sprite_delta[1][0], s->sprite_delta[1][1],
2762 a+1, (1<<(2*a+1)) - s->no_rounding,
2763 s->h_edge_pos, s->v_edge_pos);
2764 s->dsp.gmc(dest_y+8, ptr, linesize, 16,
2765 ox + s->sprite_delta[0][0]*8,
2766 oy + s->sprite_delta[1][0]*8,
2767 s->sprite_delta[0][0], s->sprite_delta[0][1],
2768 s->sprite_delta[1][0], s->sprite_delta[1][1],
2769 a+1, (1<<(2*a+1)) - s->no_rounding,
2770 s->h_edge_pos, s->v_edge_pos);
2772 if(s->flags&CODEC_FLAG_GRAY) return;
2774 ox= s->sprite_offset[1][0] + s->sprite_delta[0][0]*s->mb_x*8 + s->sprite_delta[0][1]*s->mb_y*8;
2775 oy= s->sprite_offset[1][1] + s->sprite_delta[1][0]*s->mb_x*8 + s->sprite_delta[1][1]*s->mb_y*8;
2777 ptr = ref_picture[1];
2778 s->dsp.gmc(dest_cb, ptr, uvlinesize, 8,
2781 s->sprite_delta[0][0], s->sprite_delta[0][1],
2782 s->sprite_delta[1][0], s->sprite_delta[1][1],
2783 a+1, (1<<(2*a+1)) - s->no_rounding,
2784 s->h_edge_pos>>1, s->v_edge_pos>>1);
2786 ptr = ref_picture[2];
2787 s->dsp.gmc(dest_cr, ptr, uvlinesize, 8,
2790 s->sprite_delta[0][0], s->sprite_delta[0][1],
2791 s->sprite_delta[1][0], s->sprite_delta[1][1],
2792 a+1, (1<<(2*a+1)) - s->no_rounding,
2793 s->h_edge_pos>>1, s->v_edge_pos>>1);
2797 * Copies a rectangular area of samples to a temporary buffer and replicates the boarder samples.
2798 * @param buf destination buffer
2799 * @param src source buffer
2800 * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers
2801 * @param block_w width of block
2802 * @param block_h height of block
2803 * @param src_x x coordinate of the top left sample of the block in the source buffer
2804 * @param src_y y coordinate of the top left sample of the block in the source buffer
2805 * @param w width of the source buffer
2806 * @param h height of the source buffer
2808 void ff_emulated_edge_mc(uint8_t *buf, uint8_t *src, int linesize, int block_w, int block_h,
2809 int src_x, int src_y, int w, int h){
2811 int start_y, start_x, end_y, end_x;
2814 src+= (h-1-src_y)*linesize;
2816 }else if(src_y<=-block_h){
2817 src+= (1-block_h-src_y)*linesize;
2823 }else if(src_x<=-block_w){
2824 src+= (1-block_w-src_x);
2828 start_y= FFMAX(0, -src_y);
2829 start_x= FFMAX(0, -src_x);
2830 end_y= FFMIN(block_h, h-src_y);
2831 end_x= FFMIN(block_w, w-src_x);
2833 // copy existing part
2834 for(y=start_y; y<end_y; y++){
2835 for(x=start_x; x<end_x; x++){
2836 buf[x + y*linesize]= src[x + y*linesize];
2841 for(y=0; y<start_y; y++){
2842 for(x=start_x; x<end_x; x++){
2843 buf[x + y*linesize]= buf[x + start_y*linesize];
2848 for(y=end_y; y<block_h; y++){
2849 for(x=start_x; x<end_x; x++){
2850 buf[x + y*linesize]= buf[x + (end_y-1)*linesize];
2854 for(y=0; y<block_h; y++){
2856 for(x=0; x<start_x; x++){
2857 buf[x + y*linesize]= buf[start_x + y*linesize];
2861 for(x=end_x; x<block_w; x++){
2862 buf[x + y*linesize]= buf[end_x - 1 + y*linesize];
2867 static inline int hpel_motion(MpegEncContext *s,
2868 uint8_t *dest, uint8_t *src,
2869 int field_based, int field_select,
2870 int src_x, int src_y,
2871 int width, int height, int stride,
2872 int h_edge_pos, int v_edge_pos,
2873 int w, int h, op_pixels_func *pix_op,
2874 int motion_x, int motion_y)
2879 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
2880 src_x += motion_x >> 1;
2881 src_y += motion_y >> 1;
2883 /* WARNING: do no forget half pels */
2884 src_x = av_clip(src_x, -16, width); //FIXME unneeded for emu?
2887 src_y = av_clip(src_y, -16, height);
2888 if (src_y == height)
2890 src += src_y * stride + src_x;
2892 if(s->unrestricted_mv && (s->flags&CODEC_FLAG_EMU_EDGE)){
2893 if( (unsigned)src_x > h_edge_pos - (motion_x&1) - w
2894 || (unsigned)src_y > v_edge_pos - (motion_y&1) - h){
2895 ff_emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
2896 src_x, src_y<<field_based, h_edge_pos, s->v_edge_pos);
2897 src= s->edge_emu_buffer;
2903 pix_op[dxy](dest, src, stride, h);
2907 static inline int hpel_motion_lowres(MpegEncContext *s,
2908 uint8_t *dest, uint8_t *src,
2909 int field_based, int field_select,
2910 int src_x, int src_y,
2911 int width, int height, int stride,
2912 int h_edge_pos, int v_edge_pos,
2913 int w, int h, h264_chroma_mc_func *pix_op,
2914 int motion_x, int motion_y)
2916 const int lowres= s->avctx->lowres;
2917 const int s_mask= (2<<lowres)-1;
2921 if(s->quarter_sample){
2926 sx= motion_x & s_mask;
2927 sy= motion_y & s_mask;
2928 src_x += motion_x >> (lowres+1);
2929 src_y += motion_y >> (lowres+1);
2931 src += src_y * stride + src_x;
2933 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
2934 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
2935 ff_emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
2936 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
2937 src= s->edge_emu_buffer;
2945 pix_op[lowres](dest, src, stride, h, sx, sy);
2949 /* apply one mpeg motion vector to the three components */
2950 static av_always_inline void mpeg_motion(MpegEncContext *s,
2951 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2952 int field_based, int bottom_field, int field_select,
2953 uint8_t **ref_picture, op_pixels_func (*pix_op)[4],
2954 int motion_x, int motion_y, int h)
2956 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2957 int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos, uvlinesize, linesize;
2960 if(s->quarter_sample)
2967 v_edge_pos = s->v_edge_pos >> field_based;
2968 linesize = s->current_picture.linesize[0] << field_based;
2969 uvlinesize = s->current_picture.linesize[1] << field_based;
2971 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
2972 src_x = s->mb_x* 16 + (motion_x >> 1);
2973 src_y =(s->mb_y<<(4-field_based)) + (motion_y >> 1);
2975 if (s->out_format == FMT_H263) {
2976 if((s->workaround_bugs & FF_BUG_HPEL_CHROMA) && field_based){
2977 mx = (motion_x>>1)|(motion_x&1);
2979 uvdxy = ((my & 1) << 1) | (mx & 1);
2980 uvsrc_x = s->mb_x* 8 + (mx >> 1);
2981 uvsrc_y = (s->mb_y<<(3-field_based)) + (my >> 1);
2983 uvdxy = dxy | (motion_y & 2) | ((motion_x & 2) >> 1);
2987 }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
2991 uvsrc_x = s->mb_x*8 + mx;
2992 uvsrc_y = s->mb_y*8 + my;
2994 if(s->chroma_y_shift){
2997 uvdxy = ((my & 1) << 1) | (mx & 1);
2998 uvsrc_x = s->mb_x* 8 + (mx >> 1);
2999 uvsrc_y = (s->mb_y<<(3-field_based)) + (my >> 1);
3001 if(s->chroma_x_shift){
3004 uvdxy = ((motion_y & 1) << 1) | (mx & 1);
3005 uvsrc_x = s->mb_x* 8 + (mx >> 1);
3016 ptr_y = ref_picture[0] + src_y * linesize + src_x;
3017 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
3018 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
3020 if( (unsigned)src_x > s->h_edge_pos - (motion_x&1) - 16
3021 || (unsigned)src_y > v_edge_pos - (motion_y&1) - h){
3022 if(s->codec_id == CODEC_ID_MPEG2VIDEO ||
3023 s->codec_id == CODEC_ID_MPEG1VIDEO){
3024 av_log(s->avctx,AV_LOG_DEBUG,"MPEG motion vector out of boundary\n");
3027 ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
3028 src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
3029 ptr_y = s->edge_emu_buffer;
3030 if(!(s->flags&CODEC_FLAG_GRAY)){
3031 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
3032 ff_emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
3033 uvsrc_x, uvsrc_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
3034 ff_emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
3035 uvsrc_x, uvsrc_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
3041 if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
3042 dest_y += s->linesize;
3043 dest_cb+= s->uvlinesize;
3044 dest_cr+= s->uvlinesize;
3048 ptr_y += s->linesize;
3049 ptr_cb+= s->uvlinesize;
3050 ptr_cr+= s->uvlinesize;
3053 pix_op[0][dxy](dest_y, ptr_y, linesize, h);
3055 if(!(s->flags&CODEC_FLAG_GRAY)){
3056 pix_op[s->chroma_x_shift][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift);
3057 pix_op[s->chroma_x_shift][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift);
3059 if((ENABLE_H261_ENCODER || ENABLE_H261_DECODER) && s->out_format == FMT_H261){
3060 ff_h261_loop_filter(s);
3064 /* apply one mpeg motion vector to the three components */
3065 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
3066 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
3067 int field_based, int bottom_field, int field_select,
3068 uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
3069 int motion_x, int motion_y, int h)
3071 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
3072 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
3073 const int lowres= s->avctx->lowres;
3074 const int block_s= 8>>lowres;
3075 const int s_mask= (2<<lowres)-1;
3076 const int h_edge_pos = s->h_edge_pos >> lowres;
3077 const int v_edge_pos = s->v_edge_pos >> lowres;
3078 linesize = s->current_picture.linesize[0] << field_based;
3079 uvlinesize = s->current_picture.linesize[1] << field_based;
3081 if(s->quarter_sample){ //FIXME obviously not perfect but qpel wont work in lowres anyway
3087 motion_y += (bottom_field - field_select)*((1<<lowres)-1);
3090 sx= motion_x & s_mask;
3091 sy= motion_y & s_mask;
3092 src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
3093 src_y =(s->mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
3095 if (s->out_format == FMT_H263) {
3096 uvsx = ((motion_x>>1) & s_mask) | (sx&1);
3097 uvsy = ((motion_y>>1) & s_mask) | (sy&1);
3100 }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
3103 uvsx = (2*mx) & s_mask;
3104 uvsy = (2*my) & s_mask;
3105 uvsrc_x = s->mb_x*block_s + (mx >> lowres);
3106 uvsrc_y = s->mb_y*block_s + (my >> lowres);
3112 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
3113 uvsrc_y =(s->mb_y*block_s>>field_based) + (my >> (lowres+1));
3116 ptr_y = ref_picture[0] + src_y * linesize + src_x;
3117 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
3118 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
3120 if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
3121 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
3122 ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
3123 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
3124 ptr_y = s->edge_emu_buffer;
3125 if(!(s->flags&CODEC_FLAG_GRAY)){
3126 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
3127 ff_emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
3128 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
3129 ff_emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
3130 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
3136 if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
3137 dest_y += s->linesize;
3138 dest_cb+= s->uvlinesize;
3139 dest_cr+= s->uvlinesize;
3143 ptr_y += s->linesize;
3144 ptr_cb+= s->uvlinesize;
3145 ptr_cr+= s->uvlinesize;
3150 pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
3152 if(!(s->flags&CODEC_FLAG_GRAY)){
3153 uvsx <<= 2 - lowres;
3154 uvsy <<= 2 - lowres;
3155 pix_op[lowres](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
3156 pix_op[lowres](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
3158 //FIXME h261 lowres loop filter
3161 //FIXME move to dsputil, avg variant, 16x16 version
3162 static inline void put_obmc(uint8_t *dst, uint8_t *src[5], int stride){
3164 uint8_t * const top = src[1];
3165 uint8_t * const left = src[2];
3166 uint8_t * const mid = src[0];
3167 uint8_t * const right = src[3];
3168 uint8_t * const bottom= src[4];
3169 #define OBMC_FILTER(x, t, l, m, r, b)\
3170 dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3
3171 #define OBMC_FILTER4(x, t, l, m, r, b)\
3172 OBMC_FILTER(x , t, l, m, r, b);\
3173 OBMC_FILTER(x+1 , t, l, m, r, b);\
3174 OBMC_FILTER(x +stride, t, l, m, r, b);\
3175 OBMC_FILTER(x+1+stride, t, l, m, r, b);
3178 OBMC_FILTER (x , 2, 2, 4, 0, 0);
3179 OBMC_FILTER (x+1, 2, 1, 5, 0, 0);
3180 OBMC_FILTER4(x+2, 2, 1, 5, 0, 0);
3181 OBMC_FILTER4(x+4, 2, 0, 5, 1, 0);
3182 OBMC_FILTER (x+6, 2, 0, 5, 1, 0);
3183 OBMC_FILTER (x+7, 2, 0, 4, 2, 0);
3185 OBMC_FILTER (x , 1, 2, 5, 0, 0);
3186 OBMC_FILTER (x+1, 1, 2, 5, 0, 0);
3187 OBMC_FILTER (x+6, 1, 0, 5, 2, 0);
3188 OBMC_FILTER (x+7, 1, 0, 5, 2, 0);
3190 OBMC_FILTER4(x , 1, 2, 5, 0, 0);
3191 OBMC_FILTER4(x+2, 1, 1, 6, 0, 0);
3192 OBMC_FILTER4(x+4, 1, 0, 6, 1, 0);
3193 OBMC_FILTER4(x+6, 1, 0, 5, 2, 0);
3195 OBMC_FILTER4(x , 0, 2, 5, 0, 1);
3196 OBMC_FILTER4(x+2, 0, 1, 6, 0, 1);
3197 OBMC_FILTER4(x+4, 0, 0, 6, 1, 1);
3198 OBMC_FILTER4(x+6, 0, 0, 5, 2, 1);
3200 OBMC_FILTER (x , 0, 2, 5, 0, 1);
3201 OBMC_FILTER (x+1, 0, 2, 5, 0, 1);
3202 OBMC_FILTER4(x+2, 0, 1, 5, 0, 2);
3203 OBMC_FILTER4(x+4, 0, 0, 5, 1, 2);
3204 OBMC_FILTER (x+6, 0, 0, 5, 2, 1);
3205 OBMC_FILTER (x+7, 0, 0, 5, 2, 1);
3207 OBMC_FILTER (x , 0, 2, 4, 0, 2);
3208 OBMC_FILTER (x+1, 0, 1, 5, 0, 2);
3209 OBMC_FILTER (x+6, 0, 0, 5, 1, 2);
3210 OBMC_FILTER (x+7, 0, 0, 4, 2, 2);
3213 /* obmc for 1 8x8 luma block */
3214 static inline void obmc_motion(MpegEncContext *s,
3215 uint8_t *dest, uint8_t *src,
3216 int src_x, int src_y,
3217 op_pixels_func *pix_op,
3218 int16_t mv[5][2]/* mid top left right bottom*/)
3224 assert(s->quarter_sample==0);
3227 if(i && mv[i][0]==mv[MID][0] && mv[i][1]==mv[MID][1]){
3230 ptr[i]= s->obmc_scratchpad + 8*(i&1) + s->linesize*8*(i>>1);
3231 hpel_motion(s, ptr[i], src, 0, 0,
3233 s->width, s->height, s->linesize,
3234 s->h_edge_pos, s->v_edge_pos,
3236 mv[i][0], mv[i][1]);
3240 put_obmc(dest, ptr, s->linesize);
3243 static inline void qpel_motion(MpegEncContext *s,
3244 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
3245 int field_based, int bottom_field, int field_select,
3246 uint8_t **ref_picture, op_pixels_func (*pix_op)[4],
3247 qpel_mc_func (*qpix_op)[16],
3248 int motion_x, int motion_y, int h)
3250 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
3251 int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos, linesize, uvlinesize;
3253 dxy = ((motion_y & 3) << 2) | (motion_x & 3);
3254 src_x = s->mb_x * 16 + (motion_x >> 2);
3255 src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
3257 v_edge_pos = s->v_edge_pos >> field_based;
3258 linesize = s->linesize << field_based;
3259 uvlinesize = s->uvlinesize << field_based;
3264 }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA2){
3265 static const int rtab[8]= {0,0,1,1,0,0,0,1};
3266 mx= (motion_x>>1) + rtab[motion_x&7];
3267 my= (motion_y>>1) + rtab[motion_y&7];
3268 }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA){
3269 mx= (motion_x>>1)|(motion_x&1);
3270 my= (motion_y>>1)|(motion_y&1);
3278 uvdxy= (mx&1) | ((my&1)<<1);
3282 uvsrc_x = s->mb_x * 8 + mx;
3283 uvsrc_y = s->mb_y * (8 >> field_based) + my;
3285 ptr_y = ref_picture[0] + src_y * linesize + src_x;
3286 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
3287 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
3289 if( (unsigned)src_x > s->h_edge_pos - (motion_x&3) - 16
3290 || (unsigned)src_y > v_edge_pos - (motion_y&3) - h ){
3291 ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
3292 src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
3293 ptr_y= s->edge_emu_buffer;
3294 if(!(s->flags&CODEC_FLAG_GRAY)){
3295 uint8_t *uvbuf= s->edge_emu_buffer + 18*s->linesize;
3296 ff_emulated_edge_mc(uvbuf, ptr_cb, s->uvlinesize, 9, 9 + field_based,
3297 uvsrc_x, uvsrc_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
3298 ff_emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize, 9, 9 + field_based,
3299 uvsrc_x, uvsrc_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
3306 qpix_op[0][dxy](dest_y, ptr_y, linesize);
3309 dest_y += s->linesize;
3310 dest_cb+= s->uvlinesize;
3311 dest_cr+= s->uvlinesize;
3315 ptr_y += s->linesize;
3316 ptr_cb += s->uvlinesize;
3317 ptr_cr += s->uvlinesize;
3319 //damn interlaced mode
3320 //FIXME boundary mirroring is not exactly correct here
3321 qpix_op[1][dxy](dest_y , ptr_y , linesize);
3322 qpix_op[1][dxy](dest_y+8, ptr_y+8, linesize);
3324 if(!(s->flags&CODEC_FLAG_GRAY)){
3325 pix_op[1][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> 1);
3326 pix_op[1][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> 1);
3330 inline int ff_h263_round_chroma(int x){
3332 return (h263_chroma_roundtab[x & 0xf] + ((x >> 3) & ~1));
3335 return -(h263_chroma_roundtab[x & 0xf] + ((x >> 3) & ~1));
3340 * h263 chorma 4mv motion compensation.
3342 static inline void chroma_4mv_motion(MpegEncContext *s,
3343 uint8_t *dest_cb, uint8_t *dest_cr,
3344 uint8_t **ref_picture,
3345 op_pixels_func *pix_op,
3347 int dxy, emu=0, src_x, src_y, offset;
3350 /* In case of 8X8, we construct a single chroma motion vector
3351 with a special rounding */
3352 mx= ff_h263_round_chroma(mx);
3353 my= ff_h263_round_chroma(my);
3355 dxy = ((my & 1) << 1) | (mx & 1);
3359 src_x = s->mb_x * 8 + mx;
3360 src_y = s->mb_y * 8 + my;
3361 src_x = av_clip(src_x, -8, s->width/2);
3362 if (src_x == s->width/2)
3364 src_y = av_clip(src_y, -8, s->height/2);
3365 if (src_y == s->height/2)
3368 offset = (src_y * (s->uvlinesize)) + src_x;
3369 ptr = ref_picture[1] + offset;
3370 if(s->flags&CODEC_FLAG_EMU_EDGE){
3371 if( (unsigned)src_x > (s->h_edge_pos>>1) - (dxy &1) - 8
3372 || (unsigned)src_y > (s->v_edge_pos>>1) - (dxy>>1) - 8){
3373 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
3374 ptr= s->edge_emu_buffer;
3378 pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8);
3380 ptr = ref_picture[2] + offset;
3382 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
3383 ptr= s->edge_emu_buffer;
3385 pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8);
3388 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
3389 uint8_t *dest_cb, uint8_t *dest_cr,
3390 uint8_t **ref_picture,
3391 h264_chroma_mc_func *pix_op,
3393 const int lowres= s->avctx->lowres;
3394 const int block_s= 8>>lowres;
3395 const int s_mask= (2<<lowres)-1;
3396 const int h_edge_pos = s->h_edge_pos >> (lowres+1);
3397 const int v_edge_pos = s->v_edge_pos >> (lowres+1);
3398 int emu=0, src_x, src_y, offset, sx, sy;
3401 if(s->quarter_sample){
3406 /* In case of 8X8, we construct a single chroma motion vector
3407 with a special rounding */
3408 mx= ff_h263_round_chroma(mx);
3409 my= ff_h263_round_chroma(my);
3413 src_x = s->mb_x*block_s + (mx >> (lowres+1));
3414 src_y = s->mb_y*block_s + (my >> (lowres+1));
3416 offset = src_y * s->uvlinesize + src_x;
3417 ptr = ref_picture[1] + offset;
3418 if(s->flags&CODEC_FLAG_EMU_EDGE){
3419 if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
3420 || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
3421 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
3422 ptr= s->edge_emu_buffer;
3428 pix_op[lowres](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
3430 ptr = ref_picture[2] + offset;
3432 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
3433 ptr= s->edge_emu_buffer;
3435 pix_op[lowres](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
3438 static inline void prefetch_motion(MpegEncContext *s, uint8_t **pix, int dir){
3439 /* fetch pixels for estimated mv 4 macroblocks ahead
3440 * optimized for 64byte cache lines */
3441 const int shift = s->quarter_sample ? 2 : 1;
3442 const int mx= (s->mv[dir][0][0]>>shift) + 16*s->mb_x + 8;
3443 const int my= (s->mv[dir][0][1]>>shift) + 16*s->mb_y;
3444 int off= mx + (my + (s->mb_x&3)*4)*s->linesize + 64;
3445 s->dsp.prefetch(pix[0]+off, s->linesize, 4);
3446 off= (mx>>1) + ((my>>1) + (s->mb_x&7))*s->uvlinesize + 64;
3447 s->dsp.prefetch(pix[1]+off, pix[2]-pix[1], 2);
3451 * motion compensation of a single macroblock
3453 * @param dest_y luma destination pointer
3454 * @param dest_cb chroma cb/u destination pointer
3455 * @param dest_cr chroma cr/v destination pointer
3456 * @param dir direction (0->forward, 1->backward)
3457 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
3458 * @param pic_op halfpel motion compensation function (average or put normally)
3459 * @param pic_op qpel motion compensation function (average or put normally)
3460 * the motion vectors are taken from s->mv and the MV type from s->mv_type
3462 static inline void MPV_motion(MpegEncContext *s,
3463 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
3464 int dir, uint8_t **ref_picture,
3465 op_pixels_func (*pix_op)[4], qpel_mc_func (*qpix_op)[16])
3467 int dxy, mx, my, src_x, src_y, motion_x, motion_y;
3469 uint8_t *ptr, *dest;
3474 prefetch_motion(s, ref_picture, dir);
3476 if(s->obmc && s->pict_type != B_TYPE){
3477 int16_t mv_cache[4][4][2];
3478 const int xy= s->mb_x + s->mb_y*s->mb_stride;
3479 const int mot_stride= s->b8_stride;
3480 const int mot_xy= mb_x*2 + mb_y*2*mot_stride;
3482 assert(!s->mb_skipped);
3484 memcpy(mv_cache[1][1], s->current_picture.motion_val[0][mot_xy ], sizeof(int16_t)*4);
3485 memcpy(mv_cache[2][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4);
3486 memcpy(mv_cache[3][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4);
3488 if(mb_y==0 || IS_INTRA(s->current_picture.mb_type[xy-s->mb_stride])){
3489 memcpy(mv_cache[0][1], mv_cache[1][1], sizeof(int16_t)*4);
3491 memcpy(mv_cache[0][1], s->current_picture.motion_val[0][mot_xy-mot_stride], sizeof(int16_t)*4);
3494 if(mb_x==0 || IS_INTRA(s->current_picture.mb_type[xy-1])){
3495 *(int32_t*)mv_cache[1][0]= *(int32_t*)mv_cache[1][1];
3496 *(int32_t*)mv_cache[2][0]= *(int32_t*)mv_cache[2][1];
3498 *(int32_t*)mv_cache[1][0]= *(int32_t*)s->current_picture.motion_val[0][mot_xy-1];
3499 *(int32_t*)mv_cache[2][0]= *(int32_t*)s->current_picture.motion_val[0][mot_xy-1+mot_stride];
3502 if(mb_x+1>=s->mb_width || IS_INTRA(s->current_picture.mb_type[xy+1])){
3503 *(int32_t*)mv_cache[1][3]= *(int32_t*)mv_cache[1][2];
3504 *(int32_t*)mv_cache[2][3]= *(int32_t*)mv_cache[2][2];
3506 *(int32_t*)mv_cache[1][3]= *(int32_t*)s->current_picture.motion_val[0][mot_xy+2];
3507 *(int32_t*)mv_cache[2][3]= *(int32_t*)s->current_picture.motion_val[0][mot_xy+2+mot_stride];
3513 const int x= (i&1)+1;