2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard.
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 4MV & hq & b-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
23 #include "mpegvideo.h"
26 #include "fastmemcpy.h"
29 static void encode_picture(MpegEncContext *s, int picture_number);
30 static void dct_unquantize_mpeg1_c(MpegEncContext *s,
31 DCTELEM *block, int n, int qscale);
32 static void dct_unquantize_mpeg2_c(MpegEncContext *s,
33 DCTELEM *block, int n, int qscale);
34 static void dct_unquantize_h263_c(MpegEncContext *s,
35 DCTELEM *block, int n, int qscale);
36 static void draw_edges_c(UINT8 *buf, int wrap, int width, int height, int w);
37 static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
39 void (*draw_edges)(UINT8 *buf, int wrap, int width, int height, int w)= draw_edges_c;
40 static void emulated_edge_mc(MpegEncContext *s, UINT8 *src, int linesize, int block_w, int block_h,
41 int src_x, int src_y, int w, int h);
45 /* enable all paranoid tests for rounding, overflows, etc... */
51 /* for jpeg fast DCT */
54 static const unsigned short aanscales[64] = {
55 /* precomputed values scaled up by 14 bits */
56 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
57 22725, 31521, 29692, 26722, 22725, 17855, 12299, 6270,
58 21407, 29692, 27969, 25172, 21407, 16819, 11585, 5906,
59 19266, 26722, 25172, 22654, 19266, 15137, 10426, 5315,
60 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
61 12873, 17855, 16819, 15137, 12873, 10114, 6967, 3552,
62 8867, 12299, 11585, 10426, 8867, 6967, 4799, 2446,
63 4520, 6270, 5906, 5315, 4520, 3552, 2446, 1247
66 static UINT8 h263_chroma_roundtab[16] = {
67 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
70 static UINT16 default_mv_penalty[MAX_FCODE+1][MAX_MV*2+1];
71 static UINT8 default_fcode_tab[MAX_MV*2+1];
73 extern UINT8 zigzag_end[64];
75 /* default motion estimation */
76 int motion_estimation_method = ME_EPZS;
78 static void convert_matrix(MpegEncContext *s, int (*qmat)[64], uint16_t (*qmat16)[64], uint16_t (*qmat16_bias)[64],
79 const UINT16 *quant_matrix, int bias)
83 for(qscale=1; qscale<32; qscale++){
85 if (s->fdct == ff_jpeg_fdct_islow) {
87 const int j= block_permute_op(i);
88 /* 16 <= qscale * quant_matrix[i] <= 7905 */
89 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
90 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
91 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
93 qmat[qscale][j] = (int)((UINT64_C(1) << QMAT_SHIFT) /
94 (qscale * quant_matrix[j]));
96 } else if (s->fdct == fdct_ifast) {
98 const int j= block_permute_op(i);
99 /* 16 <= qscale * quant_matrix[i] <= 7905 */
100 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
101 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
102 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
104 qmat[qscale][j] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
105 (aanscales[i] * qscale * quant_matrix[j]));
109 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
110 So 16 <= qscale * quant_matrix[i] <= 7905
111 so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905
112 so 32768 >= (1<<19) / (qscale * quant_matrix[i]) >= 67
114 qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]);
115 qmat16[qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[block_permute_op(i)]);
117 if(qmat16[qscale][i]==0 || qmat16[qscale][i]==128*256) qmat16[qscale][i]=128*256-1;
118 qmat16_bias[qscale][i]= ROUNDED_DIV(bias<<(16-QUANT_BIAS_SHIFT), qmat16[qscale][i]);
123 // move into common.c perhaps
124 #define CHECKED_ALLOCZ(p, size)\
126 p= av_mallocz(size);\
133 /* init common structure for both encoder and decoder */
134 int MPV_common_init(MpegEncContext *s)
139 s->dct_unquantize_h263 = dct_unquantize_h263_c;
140 s->dct_unquantize_mpeg1 = dct_unquantize_mpeg1_c;
141 s->dct_unquantize_mpeg2 = dct_unquantize_mpeg2_c;
142 s->dct_quantize= dct_quantize_c;
144 if(s->avctx->dct_algo==FF_DCT_FASTINT)
145 s->fdct = fdct_ifast;
147 s->fdct = ff_jpeg_fdct_islow;
150 MPV_common_init_mmx(s);
153 MPV_common_init_axp(s);
156 MPV_common_init_mlib(s);
159 s->mb_width = (s->width + 15) / 16;
160 s->mb_height = (s->height + 15) / 16;
162 /* set default edge pos, will be overriden in decode_header if needed */
163 s->h_edge_pos= s->mb_width*16;
164 s->v_edge_pos= s->mb_height*16;
166 s->mb_num = s->mb_width * s->mb_height;
167 if(!(s->flags&CODEC_FLAG_DR1)){
168 s->linesize = s->mb_width * 16 + 2 * EDGE_WIDTH;
169 s->uvlinesize = s->mb_width * 8 + EDGE_WIDTH;
172 int w, h, shift, pict_start;
175 h = s->mb_height * 16 + 2 * EDGE_WIDTH;
176 shift = (i == 0) ? 0 : 1;
177 c_size = (s->linesize>>shift) * (h >> shift);
178 pict_start = (s->linesize>>shift) * (EDGE_WIDTH >> shift) + (EDGE_WIDTH >> shift);
180 CHECKED_ALLOCZ(pict, c_size)
181 s->last_picture_base[i] = pict;
182 s->last_picture[i] = pict + pict_start;
183 if(i>0) memset(s->last_picture_base[i], 128, c_size);
185 CHECKED_ALLOCZ(pict, c_size)
186 s->next_picture_base[i] = pict;
187 s->next_picture[i] = pict + pict_start;
188 if(i>0) memset(s->next_picture_base[i], 128, c_size);
190 if (s->has_b_frames || s->codec_id==CODEC_ID_MPEG4) {
191 /* Note the MPEG4 stuff is here cuz of buggy encoders which dont set the low_delay flag but
192 do low-delay encoding, so we cant allways distinguish b-frame containing streams from low_delay streams */
193 CHECKED_ALLOCZ(pict, c_size)
194 s->aux_picture_base[i] = pict;
195 s->aux_picture[i] = pict + pict_start;
196 if(i>0) memset(s->aux_picture_base[i], 128, c_size);
199 s->ip_buffer_count= 2;
202 CHECKED_ALLOCZ(s->edge_emu_buffer, (s->width+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance
206 int mv_table_size= (s->mb_width+2)*(s->mb_height+2);
208 CHECKED_ALLOCZ(s->mb_var , s->mb_num * sizeof(INT16))
209 CHECKED_ALLOCZ(s->mc_mb_var, s->mb_num * sizeof(INT16))
210 CHECKED_ALLOCZ(s->mb_mean , s->mb_num * sizeof(INT8))
212 /* Allocate MV tables */
213 CHECKED_ALLOCZ(s->p_mv_table , mv_table_size * 2 * sizeof(INT16))
214 CHECKED_ALLOCZ(s->b_forw_mv_table , mv_table_size * 2 * sizeof(INT16))
215 CHECKED_ALLOCZ(s->b_back_mv_table , mv_table_size * 2 * sizeof(INT16))
216 CHECKED_ALLOCZ(s->b_bidir_forw_mv_table , mv_table_size * 2 * sizeof(INT16))
217 CHECKED_ALLOCZ(s->b_bidir_back_mv_table , mv_table_size * 2 * sizeof(INT16))
218 CHECKED_ALLOCZ(s->b_direct_forw_mv_table, mv_table_size * 2 * sizeof(INT16))
219 CHECKED_ALLOCZ(s->b_direct_back_mv_table, mv_table_size * 2 * sizeof(INT16))
220 CHECKED_ALLOCZ(s->b_direct_mv_table , mv_table_size * 2 * sizeof(INT16))
222 CHECKED_ALLOCZ(s->me_scratchpad, s->linesize*16*3*sizeof(uint8_t))
224 CHECKED_ALLOCZ(s->me_map , ME_MAP_SIZE*sizeof(uint32_t))
225 CHECKED_ALLOCZ(s->me_score_map, ME_MAP_SIZE*sizeof(uint16_t))
228 for(j=0; j<REORDER_BUFFER_SIZE; j++){
234 h = s->mb_height * 16;
235 shift = (i == 0) ? 0 : 1;
236 c_size = (w >> shift) * (h >> shift);
238 CHECKED_ALLOCZ(pict, c_size);
239 s->picture_buffer[j][i] = pict;
244 if(s->codec_id==CODEC_ID_MPEG4){
245 CHECKED_ALLOCZ(s->tex_pb_buffer, PB_BUFFER_SIZE);
246 CHECKED_ALLOCZ( s->pb2_buffer, PB_BUFFER_SIZE);
249 if(s->msmpeg4_version){
250 CHECKED_ALLOCZ(s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int));
252 CHECKED_ALLOCZ(s->avctx->stats_out, 256);
255 if (s->out_format == FMT_H263 || s->encoding) {
257 /* Allocate MB type table */
258 CHECKED_ALLOCZ(s->mb_type , s->mb_num * sizeof(UINT8))
261 size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
262 CHECKED_ALLOCZ(s->motion_val, size * 2 * sizeof(INT16));
265 if(s->codec_id==CODEC_ID_MPEG4){
266 /* 4mv and interlaced direct mode decoding tables */
267 CHECKED_ALLOCZ(s->co_located_type_table, s->mb_num * sizeof(UINT8))
268 CHECKED_ALLOCZ(s->field_mv_table, s->mb_num*2*2 * sizeof(INT16))
269 CHECKED_ALLOCZ(s->field_select_table, s->mb_num*2* sizeof(INT8))
272 if (s->h263_pred || s->h263_plus) {
273 int y_size, c_size, i, size;
277 y_size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
278 c_size = (s->mb_width + 2) * (s->mb_height + 2);
279 size = y_size + 2 * c_size;
280 CHECKED_ALLOCZ(s->dc_val[0], size * sizeof(INT16));
281 s->dc_val[1] = s->dc_val[0] + y_size;
282 s->dc_val[2] = s->dc_val[1] + c_size;
284 s->dc_val[0][i] = 1024;
287 CHECKED_ALLOCZ(s->ac_val[0], size * sizeof(INT16) * 16);
288 s->ac_val[1] = s->ac_val[0] + y_size;
289 s->ac_val[2] = s->ac_val[1] + c_size;
292 CHECKED_ALLOCZ(s->coded_block, y_size);
294 /* divx501 bitstream reorder buffer */
295 CHECKED_ALLOCZ(s->bitstream_buffer, BITSTREAM_BUFFER_SIZE);
297 /* cbp, ac_pred, pred_dir */
298 CHECKED_ALLOCZ(s->cbp_table , s->mb_num * sizeof(UINT8))
299 CHECKED_ALLOCZ(s->pred_dir_table, s->mb_num * sizeof(UINT8))
301 CHECKED_ALLOCZ(s->qscale_table , s->mb_num * sizeof(UINT8))
303 /* which mb is a intra block */
304 CHECKED_ALLOCZ(s->mbintra_table, s->mb_num);
305 memset(s->mbintra_table, 1, s->mb_num);
307 /* default structure is frame */
308 s->picture_structure = PICT_FRAME;
310 /* init macroblock skip table */
311 CHECKED_ALLOCZ(s->mbskip_table, s->mb_num);
313 s->block= s->blocks[0];
315 s->context_initialized = 1;
325 /* init common structure for both encoder and decoder */
326 void MPV_common_end(MpegEncContext *s)
330 av_freep(&s->mb_type);
331 av_freep(&s->mb_var);
332 av_freep(&s->mc_mb_var);
333 av_freep(&s->mb_mean);
334 av_freep(&s->p_mv_table);
335 av_freep(&s->b_forw_mv_table);
336 av_freep(&s->b_back_mv_table);
337 av_freep(&s->b_bidir_forw_mv_table);
338 av_freep(&s->b_bidir_back_mv_table);
339 av_freep(&s->b_direct_forw_mv_table);
340 av_freep(&s->b_direct_back_mv_table);
341 av_freep(&s->b_direct_mv_table);
342 av_freep(&s->motion_val);
343 av_freep(&s->dc_val[0]);
344 av_freep(&s->ac_val[0]);
345 av_freep(&s->coded_block);
346 av_freep(&s->mbintra_table);
347 av_freep(&s->cbp_table);
348 av_freep(&s->pred_dir_table);
349 av_freep(&s->qscale_table);
350 av_freep(&s->me_scratchpad);
351 av_freep(&s->me_map);
352 av_freep(&s->me_score_map);
354 av_freep(&s->mbskip_table);
355 av_freep(&s->bitstream_buffer);
356 av_freep(&s->tex_pb_buffer);
357 av_freep(&s->pb2_buffer);
358 av_freep(&s->edge_emu_buffer);
359 av_freep(&s->co_located_type_table);
360 av_freep(&s->field_mv_table);
361 av_freep(&s->field_select_table);
362 av_freep(&s->avctx->stats_out);
363 av_freep(&s->ac_stats);
367 if(!(s->flags&CODEC_FLAG_DR1)){
368 av_freep(&s->last_picture_base[i]);
369 av_freep(&s->next_picture_base[i]);
370 av_freep(&s->aux_picture_base[i]);
372 s->last_picture_base[i]=
373 s->next_picture_base[i]=
374 s->aux_picture_base [i] = NULL;
377 s->aux_picture [i] = NULL;
379 for(j=0; j<REORDER_BUFFER_SIZE; j++){
380 av_freep(&s->picture_buffer[j][i]);
383 s->context_initialized = 0;
386 /* init video encoder */
387 int MPV_encode_init(AVCodecContext *avctx)
389 MpegEncContext *s = avctx->priv_data;
392 avctx->pix_fmt = PIX_FMT_YUV420P;
394 s->bit_rate = avctx->bit_rate;
395 s->bit_rate_tolerance = avctx->bit_rate_tolerance;
396 s->frame_rate = avctx->frame_rate;
397 s->width = avctx->width;
398 s->height = avctx->height;
399 if(avctx->gop_size > 600){
400 fprintf(stderr, "Warning keyframe interval too large! reducing it ...\n");
403 s->gop_size = avctx->gop_size;
404 s->rtp_mode = avctx->rtp_mode;
405 s->rtp_payload_size = avctx->rtp_payload_size;
406 if (avctx->rtp_callback)
407 s->rtp_callback = avctx->rtp_callback;
408 s->qmin= avctx->qmin;
409 s->qmax= avctx->qmax;
410 s->max_qdiff= avctx->max_qdiff;
411 s->qcompress= avctx->qcompress;
412 s->qblur= avctx->qblur;
414 s->aspect_ratio_info= avctx->aspect_ratio_info;
415 if (avctx->aspect_ratio_info == FF_ASPECT_EXTENDED)
417 s->aspected_width = avctx->aspected_width;
418 s->aspected_height = avctx->aspected_height;
420 s->flags= avctx->flags;
421 s->max_b_frames= avctx->max_b_frames;
422 s->b_frame_strategy= avctx->b_frame_strategy;
423 s->codec_id= avctx->codec->id;
424 s->luma_elim_threshold = avctx->luma_elim_threshold;
425 s->chroma_elim_threshold= avctx->chroma_elim_threshold;
426 s->strict_std_compliance= avctx->strict_std_compliance;
427 s->data_partitioning= avctx->flags & CODEC_FLAG_PART;
428 s->mpeg_quant= avctx->mpeg_quant;
430 if (s->gop_size <= 1) {
438 if (avctx->me_method == 0)
439 /* For compatibility */
440 s->me_method = motion_estimation_method;
442 s->me_method = avctx->me_method;
445 s->fixed_qscale = (avctx->flags & CODEC_FLAG_QSCALE);
447 s->adaptive_quant= ( s->avctx->lumi_masking
448 || s->avctx->dark_masking
449 || s->avctx->temporal_cplx_masking
450 || s->avctx->spatial_cplx_masking
451 || s->avctx->p_masking)
454 switch(avctx->codec->id) {
455 case CODEC_ID_MPEG1VIDEO:
456 s->out_format = FMT_MPEG1;
457 avctx->delay=0; //FIXME not sure, should check the spec
460 s->out_format = FMT_MJPEG;
461 s->intra_only = 1; /* force intra only for jpeg */
462 s->mjpeg_write_tables = 1; /* write all tables */
463 s->mjpeg_data_only_frames = 0; /* write all the needed headers */
464 s->mjpeg_vsample[0] = 2; /* set up default sampling factors */
465 s->mjpeg_vsample[1] = 1; /* the only currently supported values */
466 s->mjpeg_vsample[2] = 1;
467 s->mjpeg_hsample[0] = 2;
468 s->mjpeg_hsample[1] = 1;
469 s->mjpeg_hsample[2] = 1;
470 if (mjpeg_init(s) < 0)
475 if (h263_get_picture_format(s->width, s->height) == 7) {
476 printf("Input picture size isn't suitable for h263 codec! try h263+\n");
479 s->out_format = FMT_H263;
483 s->out_format = FMT_H263;
485 s->rtp_payload_size = 1200;
487 s->unrestricted_mv = 1;
490 /* These are just to be sure */
496 s->out_format = FMT_H263;
501 s->out_format = FMT_H263;
503 s->unrestricted_mv = 1;
504 s->has_b_frames= s->max_b_frames ? 1 : 0;
506 avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
508 case CODEC_ID_MSMPEG4V1:
509 s->out_format = FMT_H263;
512 s->unrestricted_mv = 1;
513 s->msmpeg4_version= 1;
516 case CODEC_ID_MSMPEG4V2:
517 s->out_format = FMT_H263;
520 s->unrestricted_mv = 1;
521 s->msmpeg4_version= 2;
524 case CODEC_ID_MSMPEG4V3:
525 s->out_format = FMT_H263;
528 s->unrestricted_mv = 1;
529 s->msmpeg4_version= 3;
533 s->out_format = FMT_H263;
536 s->unrestricted_mv = 1;
537 s->msmpeg4_version= 4;
541 s->out_format = FMT_H263;
544 s->unrestricted_mv = 1;
545 s->msmpeg4_version= 5;
552 { /* set up some save defaults, some codecs might override them later */
557 memset(default_mv_penalty, 0, sizeof(UINT16)*(MAX_FCODE+1)*(2*MAX_MV+1));
558 memset(default_fcode_tab , 0, sizeof(UINT8)*(2*MAX_MV+1));
560 for(i=-16; i<16; i++){
561 default_fcode_tab[i + MAX_MV]= 1;
565 s->mv_penalty= default_mv_penalty;
566 s->fcode_tab= default_fcode_tab;
568 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
570 if (s->out_format == FMT_H263)
572 else if (s->out_format == FMT_MPEG1)
573 ff_mpeg1_encode_init(s);
574 if(s->msmpeg4_version)
575 ff_msmpeg4_encode_init(s);
577 /* dont use mv_penalty table for crap MV as it would be confused */
578 if (s->me_method < ME_EPZS) s->mv_penalty = default_mv_penalty;
583 if (MPV_common_init(s) < 0)
586 /* init default q matrix */
588 if(s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){
589 s->intra_matrix[i] = ff_mpeg4_default_intra_matrix[i];
590 s->inter_matrix[i] = ff_mpeg4_default_non_intra_matrix[i];
591 }else if(s->out_format == FMT_H263){
593 s->inter_matrix[i] = ff_mpeg1_default_non_intra_matrix[i];
595 s->intra_matrix[i] = ff_mpeg1_default_intra_matrix[i];
596 s->inter_matrix[i] = ff_mpeg1_default_non_intra_matrix[i];
600 /* precompute matrix */
601 /* for mjpeg, we do include qscale in the matrix */
602 if (s->out_format != FMT_MJPEG) {
603 convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16, s->q_intra_matrix16_bias,
604 s->intra_matrix, s->intra_quant_bias);
605 convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16, s->q_inter_matrix16_bias,
606 s->inter_matrix, s->inter_quant_bias);
609 if(ff_rate_control_init(s) < 0)
612 s->picture_number = 0;
613 s->picture_in_gop_number = 0;
614 s->fake_picture_number = 0;
615 /* motion detector init */
622 int MPV_encode_end(AVCodecContext *avctx)
624 MpegEncContext *s = avctx->priv_data;
630 ff_rate_control_uninit(s);
633 if (s->out_format == FMT_MJPEG)
639 /* draw the edges of width 'w' of an image of size width, height */
640 //FIXME check that this is ok for mpeg4 interlaced
641 static void draw_edges_c(UINT8 *buf, int wrap, int width, int height, int w)
643 UINT8 *ptr, *last_line;
646 last_line = buf + (height - 1) * wrap;
649 memcpy(buf - (i + 1) * wrap, buf, width);
650 memcpy(last_line + (i + 1) * wrap, last_line, width);
654 for(i=0;i<height;i++) {
655 memset(ptr - w, ptr[0], w);
656 memset(ptr + width, ptr[width-1], w);
661 memset(buf - (i + 1) * wrap - w, buf[0], w); /* top left */
662 memset(buf - (i + 1) * wrap + width, buf[width-1], w); /* top right */
663 memset(last_line + (i + 1) * wrap - w, last_line[0], w); /* top left */
664 memset(last_line + (i + 1) * wrap + width, last_line[width-1], w); /* top right */
668 /* generic function for encode/decode called before a frame is coded/decoded */
669 void MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
676 avctx->mbskip_table= s->mbskip_table;
678 if(avctx->flags&CODEC_FLAG_DR1){
679 avctx->get_buffer_callback(avctx, s->width, s->height, s->pict_type);
681 s->linesize = avctx->dr_stride;
682 s->uvlinesize= avctx->dr_uvstride;
683 s->ip_buffer_count= avctx->dr_ip_buffer_count;
685 avctx->dr_ip_buffer_count= s->ip_buffer_count;
687 if (s->pict_type == B_TYPE) {
689 if(avctx->flags&CODEC_FLAG_DR1)
690 s->aux_picture[i]= avctx->dr_buffer[i];
692 //FIXME the following should never be needed, the decoder should drop b frames if no reference is available
693 if(s->next_picture[i]==NULL)
694 s->next_picture[i]= s->aux_picture[i];
695 if(s->last_picture[i]==NULL)
696 s->last_picture[i]= s->next_picture[i];
698 s->current_picture[i] = s->aux_picture[i];
702 /* swap next and last */
703 if(avctx->flags&CODEC_FLAG_DR1)
704 tmp= avctx->dr_buffer[i];
706 tmp = s->last_picture[i];
708 s->last_picture[i] = s->next_picture[i];
709 s->next_picture[i] = tmp;
710 s->current_picture[i] = tmp;
712 if(s->last_picture[i]==NULL)
713 s->last_picture[i]= s->next_picture[i];
715 s->last_dr_opaque= s->next_dr_opaque;
716 s->next_dr_opaque= avctx->dr_opaque_frame;
718 if(s->has_b_frames && s->last_dr_opaque && s->codec_id!=CODEC_ID_SVQ1)
719 avctx->dr_opaque_frame= s->last_dr_opaque;
721 avctx->dr_opaque_frame= s->next_dr_opaque;
724 /* set dequantizer, we cant do it during init as it might change for mpeg4
725 and we cant do it in the header decode as init isnt called for mpeg4 there yet */
726 if(s->out_format == FMT_H263){
728 s->dct_unquantize = s->dct_unquantize_mpeg2;
730 s->dct_unquantize = s->dct_unquantize_h263;
732 s->dct_unquantize = s->dct_unquantize_mpeg1;
735 /* generic function for encode/decode called after a frame has been coded/decoded */
736 void MPV_frame_end(MpegEncContext *s)
738 s->avctx->key_frame = (s->pict_type == I_TYPE);
739 s->avctx->pict_type = s->pict_type;
741 /* draw edge for correct motion prediction if outside */
742 if (s->pict_type != B_TYPE && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
743 draw_edges(s->current_picture[0], s->linesize , s->h_edge_pos , s->v_edge_pos , EDGE_WIDTH );
744 draw_edges(s->current_picture[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
745 draw_edges(s->current_picture[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
749 s->last_pict_type = s->pict_type;
750 if(s->pict_type!=B_TYPE){
751 s->last_non_b_pict_type= s->pict_type;
752 s->num_available_buffers++;
753 if(s->num_available_buffers>2) s->num_available_buffers= 2;
757 /* reorder input for encoding */
758 void reorder_input(MpegEncContext *s, AVPicture *pict)
762 if(s->max_b_frames > FF_MAX_B_FRAMES) s->max_b_frames= FF_MAX_B_FRAMES;
764 // delay= s->max_b_frames+1; (or 0 if no b frames cuz decoder diff)
766 for(j=0; j<REORDER_BUFFER_SIZE-1; j++){
767 s->coded_order[j]= s->coded_order[j+1];
769 s->coded_order[j].picture[0]= s->coded_order[j].picture[1]= s->coded_order[j].picture[2]= NULL; //catch uninitalized buffers
770 s->coded_order[j].pict_type=0;
772 switch(s->input_pict_type){
777 index= s->max_b_frames - s->b_frames_since_non_b;
778 s->b_frames_since_non_b=0;
781 index= s->max_b_frames + 1;
782 s->b_frames_since_non_b++;
785 //printf("index:%d type:%d strides: %d %d\n", index, s->input_pict_type, pict->linesize[0], s->linesize);
786 if( (index==0 || (s->flags&CODEC_FLAG_INPUT_PRESERVED))
787 && pict->linesize[0] == s->linesize
788 && pict->linesize[1] == s->uvlinesize
789 && pict->linesize[2] == s->uvlinesize){
792 s->coded_order[index].picture[i]= pict->data[i];
797 uint8_t *src = pict->data[i];
799 int src_wrap = pict->linesize[i];
800 int dest_wrap = s->linesize;
804 if(index==0) dest= s->last_picture[i]+16; //is current_picture indeed but the switch hapens after reordering
805 else dest= s->picture_buffer[s->picture_buffer_index][i];
813 s->coded_order[index].picture[i]= dest;
815 memcpy(dest, src, w);
821 s->picture_buffer_index++;
822 if(s->picture_buffer_index >= REORDER_BUFFER_SIZE) s->picture_buffer_index=0;
825 s->coded_order[index].pict_type = s->input_pict_type;
826 s->coded_order[index].qscale = s->input_qscale;
827 s->coded_order[index].force_type= s->force_input_type;
828 s->coded_order[index].picture_in_gop_number= s->input_picture_in_gop_number;
829 s->coded_order[index].picture_number= s->input_picture_number;
832 s->new_picture[i]= s->coded_order[0].picture[i];
836 int MPV_encode_picture(AVCodecContext *avctx,
837 unsigned char *buf, int buf_size, void *data)
839 MpegEncContext *s = avctx->priv_data;
840 AVPicture *pict = data;
842 s->input_qscale = avctx->quality;
844 init_put_bits(&s->pb, buf, buf_size, NULL, NULL);
846 if(avctx->flags&CODEC_FLAG_TYPE){
848 s->force_input_type= avctx->key_frame ? I_TYPE : P_TYPE;
849 }else if(s->flags&CODEC_FLAG_PASS2){
851 s->force_input_type= s->rc_context.entry[s->input_picture_number].new_pict_type;
853 s->force_input_type=0;
854 if (!s->intra_only) {
855 /* first picture of GOP is intra */
856 if (s->input_picture_in_gop_number % s->gop_size==0){
857 s->input_pict_type = I_TYPE;
858 }else if(s->max_b_frames==0){
859 s->input_pict_type = P_TYPE;
861 if(s->b_frames_since_non_b < s->max_b_frames) //FIXME more IQ
862 s->input_pict_type = B_TYPE;
864 s->input_pict_type = P_TYPE;
867 s->input_pict_type = I_TYPE;
871 if(s->input_pict_type==I_TYPE)
872 s->input_picture_in_gop_number=0;
874 reorder_input(s, pict);
877 if(s->coded_order[0].picture[0]){
879 s->pict_type= s->coded_order[0].pict_type;
880 if (s->fixed_qscale) /* the ratecontrol needs the last qscale so we dont touch it for CBR */
881 s->qscale= s->coded_order[0].qscale;
882 s->force_type= s->coded_order[0].force_type;
883 s->picture_in_gop_number= s->coded_order[0].picture_in_gop_number;
884 s->picture_number= s->coded_order[0].picture_number;
886 MPV_frame_start(s, avctx);
888 encode_picture(s, s->picture_number);
890 avctx->real_pict_num = s->picture_number;
891 avctx->header_bits = s->header_bits;
892 avctx->mv_bits = s->mv_bits;
893 avctx->misc_bits = s->misc_bits;
894 avctx->i_tex_bits = s->i_tex_bits;
895 avctx->p_tex_bits = s->p_tex_bits;
896 avctx->i_count = s->i_count;
897 avctx->p_count = s->mb_num - s->i_count - s->skip_count; //FIXME f/b_count in avctx
898 avctx->skip_count = s->skip_count;
902 if (s->out_format == FMT_MJPEG)
903 mjpeg_picture_trailer(s);
906 avctx->quality = s->qscale;
908 if(s->flags&CODEC_FLAG_PASS1)
909 ff_write_pass1_stats(s);
913 s->input_picture_number++;
914 s->input_picture_in_gop_number++;
916 flush_put_bits(&s->pb);
917 s->frame_bits = (pbBufPtr(&s->pb) - s->pb.buf) * 8;
919 s->total_bits += s->frame_bits;
920 avctx->frame_bits = s->frame_bits;
921 //printf("fcode: %d, type: %d, head: %d, mv: %d, misc: %d, frame: %d, itex: %d, ptex: %d\n",
922 //s->f_code, avctx->key_frame, s->header_bits, s->mv_bits, s->misc_bits, s->frame_bits, s->i_tex_bits, s->p_tex_bits);
923 #if 0 //dump some stats to stats.txt for testing/debuging
924 if(s->max_b_frames==0)
927 if(!f) f= fopen("stats.txt", "wb");
928 get_psnr(pict->data, s->current_picture,
929 pict->linesize, s->linesize, avctx);
930 fprintf(f, "%7d, %7d, %2.4f\n", pbBufPtr(&s->pb) - s->pb.buf, s->qscale, avctx->psnr_y);
934 if (avctx->get_psnr) {
935 /* At this point pict->data should have the original frame */
936 /* an s->current_picture should have the coded/decoded frame */
937 get_psnr(pict->data, s->current_picture,
938 pict->linesize, s->linesize, avctx);
939 // printf("%f\n", avctx->psnr_y);
941 return pbBufPtr(&s->pb) - s->pb.buf;
944 static inline void gmc1_motion(MpegEncContext *s,
945 UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
947 UINT8 **ref_picture, int src_offset,
951 int offset, src_x, src_y, linesize, uvlinesize;
952 int motion_x, motion_y;
955 if(s->real_sprite_warping_points>1) printf("more than 1 warp point isnt supported\n");
956 motion_x= s->sprite_offset[0][0];
957 motion_y= s->sprite_offset[0][1];
958 src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy+1));
959 src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy+1));
960 motion_x<<=(3-s->sprite_warping_accuracy);
961 motion_y<<=(3-s->sprite_warping_accuracy);
962 src_x = clip(src_x, -16, s->width);
963 if (src_x == s->width)
965 src_y = clip(src_y, -16, s->height);
966 if (src_y == s->height)
969 linesize = s->linesize;
970 uvlinesize = s->uvlinesize;
971 ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset;
974 if(s->flags&CODEC_FLAG_EMU_EDGE){
975 if(src_x<0 || src_y<0 || src_x + (motion_x&15) + 16 > s->h_edge_pos
976 || src_y + (motion_y&15) + h > s->v_edge_pos){
977 emulated_edge_mc(s, ptr, linesize, 17, h+1, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
978 ptr= s->edge_emu_buffer;
982 gmc1(dest_y , ptr , linesize, h, motion_x&15, motion_y&15, s->no_rounding);
983 gmc1(dest_y+8, ptr+8, linesize, h, motion_x&15, motion_y&15, s->no_rounding);
985 motion_x= s->sprite_offset[1][0];
986 motion_y= s->sprite_offset[1][1];
987 src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy+1));
988 src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy+1));
989 motion_x<<=(3-s->sprite_warping_accuracy);
990 motion_y<<=(3-s->sprite_warping_accuracy);
991 src_x = clip(src_x, -8, s->width>>1);
992 if (src_x == s->width>>1)
994 src_y = clip(src_y, -8, s->height>>1);
995 if (src_y == s->height>>1)
998 offset = (src_y * uvlinesize) + src_x + (src_offset>>1);
999 ptr = ref_picture[1] + offset;
1001 emulated_edge_mc(s, ptr, uvlinesize, 9, (h>>1)+1, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1002 ptr= s->edge_emu_buffer;
1004 gmc1(dest_cb + (dest_offset>>1), ptr, uvlinesize, h>>1, motion_x&15, motion_y&15, s->no_rounding);
1006 ptr = ref_picture[2] + offset;
1008 emulated_edge_mc(s, ptr, uvlinesize, 9, (h>>1)+1, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1009 ptr= s->edge_emu_buffer;
1011 gmc1(dest_cr + (dest_offset>>1), ptr, uvlinesize, h>>1, motion_x&15, motion_y&15, s->no_rounding);
1016 static void emulated_edge_mc(MpegEncContext *s, UINT8 *src, int linesize, int block_w, int block_h,
1017 int src_x, int src_y, int w, int h){
1019 int start_y, start_x, end_y, end_x;
1020 UINT8 *buf= s->edge_emu_buffer;
1023 src+= (h-1-src_y)*linesize;
1025 }else if(src_y<=-block_h){
1026 src+= (1-block_h-src_y)*linesize;
1032 }else if(src_x<=-block_w){
1033 src+= (1-block_w-src_x);
1037 start_y= MAX(0, -src_y);
1038 start_x= MAX(0, -src_x);
1039 end_y= MIN(block_h, h-src_y);
1040 end_x= MIN(block_w, w-src_x);
1042 // copy existing part
1043 for(y=start_y; y<end_y; y++){
1044 for(x=start_x; x<end_x; x++){
1045 buf[x + y*linesize]= src[x + y*linesize];
1050 for(y=0; y<start_y; y++){
1051 for(x=start_x; x<end_x; x++){
1052 buf[x + y*linesize]= buf[x + start_y*linesize];
1057 for(y=end_y; y<block_h; y++){
1058 for(x=start_x; x<end_x; x++){
1059 buf[x + y*linesize]= buf[x + (end_y-1)*linesize];
1063 for(y=0; y<block_h; y++){
1065 for(x=0; x<start_x; x++){
1066 buf[x + y*linesize]= buf[start_x + y*linesize];
1070 for(x=end_x; x<block_w; x++){
1071 buf[x + y*linesize]= buf[end_x - 1 + y*linesize];
1077 /* apply one mpeg motion vector to the three components */
1078 static inline void mpeg_motion(MpegEncContext *s,
1079 UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
1081 UINT8 **ref_picture, int src_offset,
1082 int field_based, op_pixels_func (*pix_op)[4],
1083 int motion_x, int motion_y, int h)
1086 int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize;
1089 if(s->quarter_sample)
1095 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
1096 src_x = s->mb_x * 16 + (motion_x >> 1);
1097 src_y = s->mb_y * (16 >> field_based) + (motion_y >> 1);
1099 /* WARNING: do no forget half pels */
1100 height = s->height >> field_based;
1101 v_edge_pos = s->v_edge_pos >> field_based;
1102 src_x = clip(src_x, -16, s->width);
1103 if (src_x == s->width)
1105 src_y = clip(src_y, -16, height);
1106 if (src_y == height)
1108 linesize = s->linesize << field_based;
1109 uvlinesize = s->uvlinesize << field_based;
1110 ptr = ref_picture[0] + (src_y * linesize) + (src_x) + src_offset;
1111 dest_y += dest_offset;
1113 if(s->flags&CODEC_FLAG_EMU_EDGE){
1114 if(src_x<0 || src_y<0 || src_x + (motion_x&1) + 16 > s->h_edge_pos
1115 || src_y + (motion_y&1) + h > v_edge_pos){
1116 emulated_edge_mc(s, ptr, linesize, 17, h+1, src_x, src_y, s->h_edge_pos, v_edge_pos);
1117 ptr= s->edge_emu_buffer;
1121 pix_op[0][dxy](dest_y, ptr, linesize, h);
1123 if(s->flags&CODEC_FLAG_GRAY) return;
1125 if (s->out_format == FMT_H263) {
1127 if ((motion_x & 3) != 0)
1129 if ((motion_y & 3) != 0)
1136 dxy = ((my & 1) << 1) | (mx & 1);
1141 src_x = s->mb_x * 8 + mx;
1142 src_y = s->mb_y * (8 >> field_based) + my;
1143 src_x = clip(src_x, -8, s->width >> 1);
1144 if (src_x == (s->width >> 1))
1146 src_y = clip(src_y, -8, height >> 1);
1147 if (src_y == (height >> 1))
1149 offset = (src_y * uvlinesize) + src_x + (src_offset >> 1);
1150 ptr = ref_picture[1] + offset;
1152 emulated_edge_mc(s, ptr, uvlinesize, 9, (h>>1)+1, src_x, src_y, s->h_edge_pos>>1, v_edge_pos>>1);
1153 ptr= s->edge_emu_buffer;
1155 pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
1157 ptr = ref_picture[2] + offset;
1159 emulated_edge_mc(s, ptr, uvlinesize, 9, (h>>1)+1, src_x, src_y, s->h_edge_pos>>1, v_edge_pos>>1);
1160 ptr= s->edge_emu_buffer;
1162 pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
1165 static inline void qpel_motion(MpegEncContext *s,
1166 UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
1168 UINT8 **ref_picture, int src_offset,
1169 int field_based, op_pixels_func (*pix_op)[4],
1170 qpel_mc_func (*qpix_op)[16],
1171 int motion_x, int motion_y, int h)
1174 int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize;
1177 dxy = ((motion_y & 3) << 2) | (motion_x & 3);
1178 src_x = s->mb_x * 16 + (motion_x >> 2);
1179 src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
1181 height = s->height >> field_based;
1182 v_edge_pos = s->v_edge_pos >> field_based;
1183 src_x = clip(src_x, -16, s->width);
1184 if (src_x == s->width)
1186 src_y = clip(src_y, -16, height);
1187 if (src_y == height)
1189 linesize = s->linesize << field_based;
1190 uvlinesize = s->uvlinesize << field_based;
1191 ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset;
1192 dest_y += dest_offset;
1193 //printf("%d %d %d\n", src_x, src_y, dxy);
1195 if(s->flags&CODEC_FLAG_EMU_EDGE){
1196 if(src_x<0 || src_y<0 || src_x + (motion_x&3) + 16 > s->h_edge_pos
1197 || src_y + (motion_y&3) + h > v_edge_pos){
1198 emulated_edge_mc(s, ptr, linesize, 17, h+1, src_x, src_y, s->h_edge_pos, v_edge_pos);
1199 ptr= s->edge_emu_buffer;
1204 qpix_op[0][dxy](dest_y, ptr, linesize);
1206 //damn interlaced mode
1207 //FIXME boundary mirroring is not exactly correct here
1208 qpix_op[1][dxy](dest_y , ptr , linesize);
1209 qpix_op[1][dxy](dest_y+8, ptr+8, linesize);
1212 if(s->flags&CODEC_FLAG_GRAY) return;
1217 }else if(s->divx_version){
1218 mx= (motion_x>>1)|(motion_x&1);
1219 my= (motion_y>>1)|(motion_y&1);
1226 dxy= (mx&1) | ((my&1)<<1);
1230 src_x = s->mb_x * 8 + mx;
1231 src_y = s->mb_y * (8 >> field_based) + my;
1232 src_x = clip(src_x, -8, s->width >> 1);
1233 if (src_x == (s->width >> 1))
1235 src_y = clip(src_y, -8, height >> 1);
1236 if (src_y == (height >> 1))
1239 offset = (src_y * uvlinesize) + src_x + (src_offset >> 1);
1240 ptr = ref_picture[1] + offset;
1242 emulated_edge_mc(s, ptr, uvlinesize, 9, (h>>1)+1, src_x, src_y, s->h_edge_pos>>1, v_edge_pos>>1);
1243 ptr= s->edge_emu_buffer;
1245 pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
1247 ptr = ref_picture[2] + offset;
1249 emulated_edge_mc(s, ptr, uvlinesize, 9, (h>>1)+1, src_x, src_y, s->h_edge_pos>>1, v_edge_pos>>1);
1250 ptr= s->edge_emu_buffer;
1252 pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
1256 static inline void MPV_motion(MpegEncContext *s,
1257 UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
1258 int dir, UINT8 **ref_picture,
1259 op_pixels_func (*pix_op)[4], qpel_mc_func (*qpix_op)[16])
1261 int dxy, offset, mx, my, src_x, src_y, motion_x, motion_y;
1269 switch(s->mv_type) {
1272 gmc1_motion(s, dest_y, dest_cb, dest_cr, 0,
1275 }else if(s->quarter_sample){
1276 qpel_motion(s, dest_y, dest_cb, dest_cr, 0,
1279 s->mv[dir][0][0], s->mv[dir][0][1], 16);
1281 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
1284 s->mv[dir][0][0], s->mv[dir][0][1], 16);
1290 if(s->quarter_sample){
1292 motion_x = s->mv[dir][i][0];
1293 motion_y = s->mv[dir][i][1];
1295 dxy = ((motion_y & 3) << 2) | (motion_x & 3);
1296 src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
1297 src_y = mb_y * 16 + (motion_y >> 2) + (i >>1) * 8;
1299 /* WARNING: do no forget half pels */
1300 src_x = clip(src_x, -16, s->width);
1301 if (src_x == s->width)
1303 src_y = clip(src_y, -16, s->height);
1304 if (src_y == s->height)
1307 ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
1308 if(s->flags&CODEC_FLAG_EMU_EDGE){
1309 if(src_x<0 || src_y<0 || src_x + (motion_x&3) + 8 > s->h_edge_pos
1310 || src_y + (motion_y&3) + 8 > s->v_edge_pos){
1311 emulated_edge_mc(s, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
1312 ptr= s->edge_emu_buffer;
1315 dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
1316 qpix_op[1][dxy](dest, ptr, s->linesize);
1318 mx += s->mv[dir][i][0]/2;
1319 my += s->mv[dir][i][1]/2;
1323 motion_x = s->mv[dir][i][0];
1324 motion_y = s->mv[dir][i][1];
1326 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
1327 src_x = mb_x * 16 + (motion_x >> 1) + (i & 1) * 8;
1328 src_y = mb_y * 16 + (motion_y >> 1) + (i >>1) * 8;
1330 /* WARNING: do no forget half pels */
1331 src_x = clip(src_x, -16, s->width);
1332 if (src_x == s->width)
1334 src_y = clip(src_y, -16, s->height);
1335 if (src_y == s->height)
1338 ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
1339 if(s->flags&CODEC_FLAG_EMU_EDGE){
1340 if(src_x<0 || src_y<0 || src_x + (motion_x&1) + 8 > s->h_edge_pos
1341 || src_y + (motion_y&1) + 8 > s->v_edge_pos){
1342 emulated_edge_mc(s, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
1343 ptr= s->edge_emu_buffer;
1346 dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
1347 pix_op[1][dxy](dest, ptr, s->linesize, 8);
1349 mx += s->mv[dir][i][0];
1350 my += s->mv[dir][i][1];
1354 if(s->flags&CODEC_FLAG_GRAY) break;
1355 /* In case of 8X8, we construct a single chroma motion vector
1356 with a special rounding */
1360 mx = (h263_chroma_roundtab[mx & 0xf] + ((mx >> 3) & ~1));
1363 mx = -(h263_chroma_roundtab[mx & 0xf] + ((mx >> 3) & ~1));
1366 my = (h263_chroma_roundtab[my & 0xf] + ((my >> 3) & ~1));
1369 my = -(h263_chroma_roundtab[my & 0xf] + ((my >> 3) & ~1));
1371 dxy = ((my & 1) << 1) | (mx & 1);
1375 src_x = mb_x * 8 + mx;
1376 src_y = mb_y * 8 + my;
1377 src_x = clip(src_x, -8, s->width/2);
1378 if (src_x == s->width/2)
1380 src_y = clip(src_y, -8, s->height/2);
1381 if (src_y == s->height/2)
1384 offset = (src_y * (s->uvlinesize)) + src_x;
1385 ptr = ref_picture[1] + offset;
1386 if(s->flags&CODEC_FLAG_EMU_EDGE){
1387 if(src_x<0 || src_y<0 || src_x + (dxy &1) + 8 > s->h_edge_pos>>1
1388 || src_y + (dxy>>1) + 8 > s->v_edge_pos>>1){
1389 emulated_edge_mc(s, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1390 ptr= s->edge_emu_buffer;
1394 pix_op[1][dxy](dest_cb, ptr, s->uvlinesize, 8);
1396 ptr = ref_picture[2] + offset;
1398 emulated_edge_mc(s, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1399 ptr= s->edge_emu_buffer;
1401 pix_op[1][dxy](dest_cr, ptr, s->uvlinesize, 8);
1404 if (s->picture_structure == PICT_FRAME) {
1405 if(s->quarter_sample){
1407 qpel_motion(s, dest_y, dest_cb, dest_cr, 0,
1408 ref_picture, s->field_select[dir][0] ? s->linesize : 0,
1410 s->mv[dir][0][0], s->mv[dir][0][1], 8);
1412 qpel_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
1413 ref_picture, s->field_select[dir][1] ? s->linesize : 0,
1415 s->mv[dir][1][0], s->mv[dir][1][1], 8);
1418 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
1419 ref_picture, s->field_select[dir][0] ? s->linesize : 0,
1421 s->mv[dir][0][0], s->mv[dir][0][1], 8);
1423 mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
1424 ref_picture, s->field_select[dir][1] ? s->linesize : 0,
1426 s->mv[dir][1][0], s->mv[dir][1][1], 8);
1437 /* put block[] to dest[] */
1438 static inline void put_dct(MpegEncContext *s,
1439 DCTELEM *block, int i, UINT8 *dest, int line_size)
1442 s->dct_unquantize(s, block, i, s->qscale);
1443 ff_idct_put (dest, line_size, block);
1446 /* add block[] to dest[] */
1447 static inline void add_dct(MpegEncContext *s,
1448 DCTELEM *block, int i, UINT8 *dest, int line_size)
1450 if (s->block_last_index[i] >= 0) {
1451 ff_idct_add (dest, line_size, block);
1455 static inline void add_dequant_dct(MpegEncContext *s,
1456 DCTELEM *block, int i, UINT8 *dest, int line_size)
1458 if (s->block_last_index[i] >= 0) {
1459 s->dct_unquantize(s, block, i, s->qscale);
1461 ff_idct_add (dest, line_size, block);
1466 * cleans dc, ac, coded_block for the current non intra MB
1468 void ff_clean_intra_table_entries(MpegEncContext *s)
1470 int wrap = s->block_wrap[0];
1471 int xy = s->block_index[0];
1474 s->dc_val[0][xy + 1 ] =
1475 s->dc_val[0][xy + wrap] =
1476 s->dc_val[0][xy + 1 + wrap] = 1024;
1478 memset(s->ac_val[0][xy ], 0, 32 * sizeof(INT16));
1479 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(INT16));
1480 if (s->msmpeg4_version>=3) {
1481 s->coded_block[xy ] =
1482 s->coded_block[xy + 1 ] =
1483 s->coded_block[xy + wrap] =
1484 s->coded_block[xy + 1 + wrap] = 0;
1487 wrap = s->block_wrap[4];
1488 xy = s->mb_x + 1 + (s->mb_y + 1) * wrap;
1490 s->dc_val[2][xy] = 1024;
1492 memset(s->ac_val[1][xy], 0, 16 * sizeof(INT16));
1493 memset(s->ac_val[2][xy], 0, 16 * sizeof(INT16));
1495 s->mbintra_table[s->mb_x + s->mb_y*s->mb_width]= 0;
1498 /* generic function called after a macroblock has been parsed by the
1499 decoder or after it has been encoded by the encoder.
1501 Important variables used:
1502 s->mb_intra : true if intra macroblock
1503 s->mv_dir : motion vector direction
1504 s->mv_type : motion vector type
1505 s->mv : motion vector
1506 s->interlaced_dct : true if interlaced dct used (mpeg2)
1508 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
1511 const int mb_xy = s->mb_y * s->mb_width + s->mb_x;
1516 #ifdef FF_POSTPROCESS
1517 /* Obsolete. Exists for compatibility with mplayer only. */
1518 quant_store[mb_y][mb_x]=s->qscale;
1519 //printf("[%02d][%02d] %d\n",mb_x,mb_y,s->qscale);
1521 /* even more obsolete, exists for mplayer xp only */
1522 if(s->avctx->quant_store) s->avctx->quant_store[mb_y*s->avctx->qstride+mb_x] = s->qscale;
1524 s->qscale_table[mb_xy]= s->qscale;
1526 /* update DC predictors for P macroblocks */
1528 if (s->h263_pred || s->h263_aic) {
1529 if(s->mbintra_table[mb_xy])
1530 ff_clean_intra_table_entries(s);
1534 s->last_dc[2] = 128 << s->intra_dc_precision;
1537 else if (s->h263_pred || s->h263_aic)
1538 s->mbintra_table[mb_xy]=1;
1540 /* update motion predictor, not for B-frames as they need the motion_val from the last P/S-Frame */
1541 if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE) { //FIXME move into h263.c if possible, format specific stuff shouldnt be here
1543 const int wrap = s->block_wrap[0];
1544 const int xy = s->block_index[0];
1545 const int mb_index= s->mb_x + s->mb_y*s->mb_width;
1546 if(s->mv_type == MV_TYPE_8X8){
1547 s->co_located_type_table[mb_index]= CO_LOCATED_TYPE_4MV;
1549 int motion_x, motion_y;
1553 if(s->co_located_type_table)
1554 s->co_located_type_table[mb_index]= 0;
1555 } else if (s->mv_type == MV_TYPE_16X16) {
1556 motion_x = s->mv[0][0][0];
1557 motion_y = s->mv[0][0][1];
1558 if(s->co_located_type_table)
1559 s->co_located_type_table[mb_index]= 0;
1560 } else /*if (s->mv_type == MV_TYPE_FIELD)*/ {
1562 motion_x = s->mv[0][0][0] + s->mv[0][1][0];
1563 motion_y = s->mv[0][0][1] + s->mv[0][1][1];
1564 motion_x = (motion_x>>1) | (motion_x&1);
1566 s->field_mv_table[mb_index][i][0]= s->mv[0][i][0];
1567 s->field_mv_table[mb_index][i][1]= s->mv[0][i][1];
1568 s->field_select_table[mb_index][i]= s->field_select[0][i];
1570 s->co_located_type_table[mb_index]= CO_LOCATED_TYPE_FIELDMV;
1572 /* no update if 8X8 because it has been done during parsing */
1573 s->motion_val[xy][0] = motion_x;
1574 s->motion_val[xy][1] = motion_y;
1575 s->motion_val[xy + 1][0] = motion_x;
1576 s->motion_val[xy + 1][1] = motion_y;
1577 s->motion_val[xy + wrap][0] = motion_x;
1578 s->motion_val[xy + wrap][1] = motion_y;
1579 s->motion_val[xy + 1 + wrap][0] = motion_x;
1580 s->motion_val[xy + 1 + wrap][1] = motion_y;
1584 if (!(s->encoding && (s->intra_only || s->pict_type==B_TYPE))) {
1585 UINT8 *dest_y, *dest_cb, *dest_cr;
1586 int dct_linesize, dct_offset;
1587 op_pixels_func (*op_pix)[4];
1588 qpel_mc_func (*op_qpix)[16];
1590 /* avoid copy if macroblock skipped in last frame too
1591 dont touch it for B-frames as they need the skip info from the next p-frame */
1592 if (s->pict_type != B_TYPE) {
1593 UINT8 *mbskip_ptr = &s->mbskip_table[mb_xy];
1597 (*mbskip_ptr) ++; /* indicate that this time we skiped it */
1598 if(*mbskip_ptr >99) *mbskip_ptr= 99;
1600 /* if previous was skipped too, then nothing to do !
1601 skip only during decoding as we might trash the buffers during encoding a bit */
1602 if (*mbskip_ptr >= s->ip_buffer_count && !s->encoding)
1605 *mbskip_ptr = 0; /* not skipped */
1609 dest_y = s->current_picture [0] + (mb_y * 16* s->linesize ) + mb_x * 16;
1610 dest_cb = s->current_picture[1] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
1611 dest_cr = s->current_picture[2] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
1613 if (s->interlaced_dct) {
1614 dct_linesize = s->linesize * 2;
1615 dct_offset = s->linesize;
1617 dct_linesize = s->linesize;
1618 dct_offset = s->linesize * 8;
1622 /* motion handling */
1623 /* decoding or more than one mb_type (MC was allready done otherwise) */
1624 if((!s->encoding) || (s->mb_type[mb_xy]&(s->mb_type[mb_xy]-1))){
1625 if ((!s->no_rounding) || s->pict_type==B_TYPE){
1626 op_pix = put_pixels_tab;
1627 op_qpix= put_qpel_pixels_tab;
1629 op_pix = put_no_rnd_pixels_tab;
1630 op_qpix= put_no_rnd_qpel_pixels_tab;
1633 if (s->mv_dir & MV_DIR_FORWARD) {
1634 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture, op_pix, op_qpix);
1635 op_pix = avg_pixels_tab;
1636 op_qpix= avg_qpel_pixels_tab;
1638 if (s->mv_dir & MV_DIR_BACKWARD) {
1639 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture, op_pix, op_qpix);
1643 /* skip dequant / idct if we are really late ;) */
1644 if(s->hurry_up>1) goto the_end;
1646 /* add dct residue */
1647 if(s->encoding || !(s->mpeg2 || s->h263_msmpeg4 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
1648 add_dequant_dct(s, block[0], 0, dest_y, dct_linesize);
1649 add_dequant_dct(s, block[1], 1, dest_y + 8, dct_linesize);
1650 add_dequant_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
1651 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
1653 if(!(s->flags&CODEC_FLAG_GRAY)){
1654 add_dequant_dct(s, block[4], 4, dest_cb, s->uvlinesize);
1655 add_dequant_dct(s, block[5], 5, dest_cr, s->uvlinesize);
1658 add_dct(s, block[0], 0, dest_y, dct_linesize);
1659 add_dct(s, block[1], 1, dest_y + 8, dct_linesize);
1660 add_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
1661 add_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
1663 if(!(s->flags&CODEC_FLAG_GRAY)){
1664 add_dct(s, block[4], 4, dest_cb, s->uvlinesize);
1665 add_dct(s, block[5], 5, dest_cr, s->uvlinesize);
1669 /* dct only in intra block */
1670 put_dct(s, block[0], 0, dest_y, dct_linesize);
1671 put_dct(s, block[1], 1, dest_y + 8, dct_linesize);
1672 put_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
1673 put_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
1675 if(!(s->flags&CODEC_FLAG_GRAY)){
1676 put_dct(s, block[4], 4, dest_cb, s->uvlinesize);
1677 put_dct(s, block[5], 5, dest_cr, s->uvlinesize);
1682 emms_c(); //FIXME remove
1685 static inline void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
1687 static const char tab[64]=
1699 DCTELEM *block= s->block[n];
1700 const int last_index= s->block_last_index[n];
1705 threshold= -threshold;
1709 /* are all which we could set to zero are allready zero? */
1710 if(last_index<=skip_dc - 1) return;
1712 for(i=0; i<=last_index; i++){
1713 const int j = zigzag_direct[i];
1714 const int level = ABS(block[j]);
1716 if(skip_dc && i==0) continue;
1725 if(score >= threshold) return;
1726 for(i=skip_dc; i<=last_index; i++){
1727 const int j = zigzag_direct[i];
1730 if(block[0]) s->block_last_index[n]= 0;
1731 else s->block_last_index[n]= -1;
1734 static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block, int last_index)
1737 const int maxlevel= s->max_qcoeff;
1738 const int minlevel= s->min_qcoeff;
1740 for(i=0;i<=last_index; i++){
1741 const int j = zigzag_direct[i];
1742 int level = block[j];
1744 if (level>maxlevel) level=maxlevel;
1745 else if(level<minlevel) level=minlevel;
1750 static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
1752 const int mb_x= s->mb_x;
1753 const int mb_y= s->mb_y;
1757 if (s->interlaced_dct) {
1758 dct_linesize = s->linesize * 2;
1759 dct_offset = s->linesize;
1761 dct_linesize = s->linesize;
1762 dct_offset = s->linesize * 8;
1765 for(i=0; i<6; i++) skip_dct[i]=0;
1767 if(s->adaptive_quant){
1768 s->dquant= s->qscale_table[mb_x + mb_y*s->mb_width] - s->qscale;
1769 if(s->codec_id==CODEC_ID_MPEG4){
1770 if (s->dquant> 2) s->dquant= 2;
1771 else if(s->dquant<-2) s->dquant=-2;
1774 assert(s->dquant==0 || s->mv_type!=MV_TYPE_8X8);
1776 if(s->mv_dir&MV_DIRECT)
1780 s->qscale+= s->dquant;
1781 s->y_dc_scale= s->y_dc_scale_table[ s->qscale ];
1782 s->c_dc_scale= s->c_dc_scale_table[ s->qscale ];
1791 ptr = s->new_picture[0] + (mb_y * 16 * wrap) + mb_x * 16;
1792 if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
1793 emulated_edge_mc(s, ptr, wrap, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
1794 ptr= s->edge_emu_buffer;
1797 get_pixels(s->block[0], ptr , wrap);
1798 get_pixels(s->block[1], ptr + 8, wrap);
1799 get_pixels(s->block[2], ptr + 8 * wrap , wrap);
1800 get_pixels(s->block[3], ptr + 8 * wrap + 8, wrap);
1802 if(s->flags&CODEC_FLAG_GRAY){
1807 ptr = s->new_picture[1] + (mb_y * 8 * wrap) + mb_x * 8;
1809 emulated_edge_mc(s, ptr, wrap, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
1810 ptr= s->edge_emu_buffer;
1812 get_pixels(s->block[4], ptr, wrap);
1814 ptr = s->new_picture[2] + (mb_y * 8 * wrap) + mb_x * 8;
1816 emulated_edge_mc(s, ptr, wrap, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
1817 ptr= s->edge_emu_buffer;
1819 get_pixels(s->block[5], ptr, wrap);
1822 op_pixels_func (*op_pix)[4];
1823 qpel_mc_func (*op_qpix)[16];
1824 UINT8 *dest_y, *dest_cb, *dest_cr;
1825 UINT8 *ptr_y, *ptr_cb, *ptr_cr;
1829 dest_y = s->current_picture[0] + (mb_y * 16 * s->linesize ) + mb_x * 16;
1830 dest_cb = s->current_picture[1] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
1831 dest_cr = s->current_picture[2] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
1832 wrap_y = s->linesize;
1834 ptr_y = s->new_picture[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
1835 ptr_cb = s->new_picture[1] + (mb_y * 8 * wrap_c) + mb_x * 8;
1836 ptr_cr = s->new_picture[2] + (mb_y * 8 * wrap_c) + mb_x * 8;
1838 if ((!s->no_rounding) || s->pict_type==B_TYPE){
1839 op_pix = put_pixels_tab;
1840 op_qpix= put_qpel_pixels_tab;
1842 op_pix = put_no_rnd_pixels_tab;
1843 op_qpix= put_no_rnd_qpel_pixels_tab;
1846 if (s->mv_dir & MV_DIR_FORWARD) {
1847 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture, op_pix, op_qpix);
1848 op_pix = avg_pixels_tab;
1849 op_qpix= avg_qpel_pixels_tab;
1851 if (s->mv_dir & MV_DIR_BACKWARD) {
1852 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture, op_pix, op_qpix);
1855 if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
1856 emulated_edge_mc(s, ptr_y, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
1857 ptr_y= s->edge_emu_buffer;
1860 diff_pixels(s->block[0], ptr_y , dest_y , wrap_y);
1861 diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
1862 diff_pixels(s->block[2], ptr_y + 8 * wrap_y , dest_y + 8 * wrap_y , wrap_y);
1863 diff_pixels(s->block[3], ptr_y + 8 * wrap_y + 8, dest_y + 8 * wrap_y + 8, wrap_y);
1865 if(s->flags&CODEC_FLAG_GRAY){
1870 emulated_edge_mc(s, ptr_cb, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
1871 ptr_cb= s->edge_emu_buffer;
1873 diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
1875 emulated_edge_mc(s, ptr_cr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
1876 ptr_cr= s->edge_emu_buffer;
1878 diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
1881 /* pre quantization */
1882 if(s->mc_mb_var[s->mb_width*mb_y+ mb_x]<2*s->qscale*s->qscale){
1883 if(pix_abs8x8(ptr_y , dest_y , wrap_y) < 20*s->qscale) skip_dct[0]= 1;
1884 if(pix_abs8x8(ptr_y + 8, dest_y + 8, wrap_y) < 20*s->qscale) skip_dct[1]= 1;
1885 if(pix_abs8x8(ptr_y + 8*wrap_y , dest_y + 8*wrap_y , wrap_y) < 20*s->qscale) skip_dct[2]= 1;
1886 if(pix_abs8x8(ptr_y + 8*wrap_y + 8, dest_y + 8*wrap_y + 8, wrap_y) < 20*s->qscale) skip_dct[3]= 1;
1887 if(pix_abs8x8(ptr_cb , dest_cb , wrap_y) < 20*s->qscale) skip_dct[4]= 1;
1888 if(pix_abs8x8(ptr_cr , dest_cr , wrap_y) < 20*s->qscale) skip_dct[5]= 1;
1894 if(skip_dct[i]) num++;
1897 if(s->mb_x==0 && s->mb_y==0){
1899 printf("%6d %1d\n", stat[i], i);
1912 adap_parm = ((s->avg_mb_var << 1) + s->mb_var[s->mb_width*mb_y+mb_x] + 1.0) /
1913 ((s->mb_var[s->mb_width*mb_y+mb_x] << 1) + s->avg_mb_var + 1.0);
1915 printf("\ntype=%c qscale=%2d adap=%0.2f dquant=%4.2f var=%4d avgvar=%4d",
1916 (s->mb_type[s->mb_width*mb_y+mb_x] > 0) ? 'I' : 'P',
1917 s->qscale, adap_parm, s->qscale*adap_parm,
1918 s->mb_var[s->mb_width*mb_y+mb_x], s->avg_mb_var);
1921 /* DCT & quantize */
1922 if(s->out_format==FMT_MJPEG){
1925 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, 8, &overflow);
1926 if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
1932 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
1933 // FIXME we could decide to change to quantizer instead of clipping
1934 // JS: I don't think that would be a good idea it could lower quality instead
1935 // of improve it. Just INTRADC clipping deserves changes in quantizer
1936 if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
1938 s->block_last_index[i]= -1;
1940 if(s->luma_elim_threshold && !s->mb_intra)
1942 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
1943 if(s->chroma_elim_threshold && !s->mb_intra)
1945 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
1948 if((s->flags&CODEC_FLAG_GRAY) && s->mb_intra){
1949 s->block_last_index[4]=
1950 s->block_last_index[5]= 0;
1952 s->block[5][0]= 128;
1955 /* huffman encode */
1956 switch(s->out_format) {
1958 mpeg1_encode_mb(s, s->block, motion_x, motion_y);
1961 if (s->h263_msmpeg4)
1962 msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
1963 else if(s->h263_pred)
1964 mpeg4_encode_mb(s, s->block, motion_x, motion_y);
1966 h263_encode_mb(s, s->block, motion_x, motion_y);
1969 mjpeg_encode_mb(s, s->block);
1974 void ff_copy_bits(PutBitContext *pb, UINT8 *src, int length)
1976 int bytes= length>>4;
1977 int bits= length&15;
1980 if(length==0) return;
1982 for(i=0; i<bytes; i++) put_bits(pb, 16, be2me_16(((uint16_t*)src)[i]));
1983 put_bits(pb, bits, be2me_16(((uint16_t*)src)[i])>>(16-bits));
1986 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
1989 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop?
1992 d->mb_incr= s->mb_incr;
1994 d->last_dc[i]= s->last_dc[i];
1997 d->mv_bits= s->mv_bits;
1998 d->i_tex_bits= s->i_tex_bits;
1999 d->p_tex_bits= s->p_tex_bits;
2000 d->i_count= s->i_count;
2001 d->f_count= s->f_count;
2002 d->b_count= s->b_count;
2003 d->skip_count= s->skip_count;
2004 d->misc_bits= s->misc_bits;
2007 d->mb_skiped= s->mb_skiped;
2010 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2013 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2014 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop?
2017 d->mb_incr= s->mb_incr;
2019 d->last_dc[i]= s->last_dc[i];
2022 d->mv_bits= s->mv_bits;
2023 d->i_tex_bits= s->i_tex_bits;
2024 d->p_tex_bits= s->p_tex_bits;
2025 d->i_count= s->i_count;
2026 d->f_count= s->f_count;
2027 d->b_count= s->b_count;
2028 d->skip_count= s->skip_count;
2029 d->misc_bits= s->misc_bits;
2031 d->mb_intra= s->mb_intra;
2032 d->mb_skiped= s->mb_skiped;
2033 d->mv_type= s->mv_type;
2034 d->mv_dir= s->mv_dir;
2036 if(s->data_partitioning){
2038 d->tex_pb= s->tex_pb;
2042 d->block_last_index[i]= s->block_last_index[i];
2045 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2046 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2047 int *dmin, int *next_block, int motion_x, int motion_y)
2051 copy_context_before_encode(s, backup, type);
2053 s->block= s->blocks[*next_block];
2054 s->pb= pb[*next_block];
2055 if(s->data_partitioning){
2056 s->pb2 = pb2 [*next_block];
2057 s->tex_pb= tex_pb[*next_block];
2060 encode_mb(s, motion_x, motion_y);
2062 bits_count= get_bit_count(&s->pb);
2063 if(s->data_partitioning){
2064 bits_count+= get_bit_count(&s->pb2);
2065 bits_count+= get_bit_count(&s->tex_pb);
2068 if(bits_count<*dmin){
2072 copy_context_after_encode(best, s, type);
2076 static void encode_picture(MpegEncContext *s, int picture_number)
2078 int mb_x, mb_y, last_gob, pdif = 0;
2081 MpegEncContext best_s, backup_s;
2082 UINT8 bit_buf[2][3000];
2083 UINT8 bit_buf2[2][3000];
2084 UINT8 bit_buf_tex[2][3000];
2085 PutBitContext pb[2], pb2[2], tex_pb[2];
2088 init_put_bits(&pb [i], bit_buf [i], 3000, NULL, NULL);
2089 init_put_bits(&pb2 [i], bit_buf2 [i], 3000, NULL, NULL);
2090 init_put_bits(&tex_pb[i], bit_buf_tex[i], 3000, NULL, NULL);
2093 s->picture_number = picture_number;
2098 s->block_wrap[3]= s->mb_width*2 + 2;
2100 s->block_wrap[5]= s->mb_width + 2;
2102 /* Reset the average MB variance */
2104 s->mc_mb_var_sum = 0;
2106 /* we need to initialize some time vars before we can encode b-frames */
2107 if (s->h263_pred && !s->h263_msmpeg4)
2108 ff_set_mpeg4_time(s, s->picture_number);
2110 s->scene_change_score=0;
2112 s->qscale= (int)(s->frame_qscale + 0.5); //FIXME qscale / ... stuff for ME ratedistoration
2114 /* Estimate motion for every MB */
2115 if(s->pict_type != I_TYPE){
2116 for(mb_y=0; mb_y < s->mb_height; mb_y++) {
2117 s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1;
2118 s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1);
2119 s->block_index[2]= s->block_wrap[0]*(mb_y*2 + 2) - 1;
2120 s->block_index[3]= s->block_wrap[0]*(mb_y*2 + 2);
2121 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2124 s->block_index[0]+=2;
2125 s->block_index[1]+=2;
2126 s->block_index[2]+=2;
2127 s->block_index[3]+=2;
2129 /* compute motion vector & mb_type and store in context */
2130 if(s->pict_type==B_TYPE)
2131 ff_estimate_b_frame_motion(s, mb_x, mb_y);
2133 ff_estimate_p_frame_motion(s, mb_x, mb_y);
2134 // s->mb_type[mb_y*s->mb_width + mb_x]=MB_TYPE_INTER;
2138 }else /* if(s->pict_type == I_TYPE) */{
2140 //FIXME do we need to zero them?
2141 memset(s->motion_val[0], 0, sizeof(INT16)*(s->mb_width*2 + 2)*(s->mb_height*2 + 2)*2);
2142 memset(s->p_mv_table , 0, sizeof(INT16)*(s->mb_width+2)*(s->mb_height+2)*2);
2143 memset(s->mb_type , MB_TYPE_INTRA, sizeof(UINT8)*s->mb_width*s->mb_height);
2145 if(!s->fixed_qscale){
2146 /* finding spatial complexity for I-frame rate control */
2147 for(mb_y=0; mb_y < s->mb_height; mb_y++) {
2148 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2151 uint8_t *pix = s->new_picture[0] + (yy * s->linesize) + xx;
2153 int sum = pix_sum(pix, s->linesize);
2156 varc = (pix_norm1(pix, s->linesize) - sum*sum + 500 + 128)>>8;
2158 s->mb_var [s->mb_width * mb_y + mb_x] = varc;
2159 s->mb_mean[s->mb_width * mb_y + mb_x] = (sum+7)>>4;
2160 s->mb_var_sum += varc;
2165 if(s->scene_change_score > 0 && s->pict_type == P_TYPE){
2166 s->pict_type= I_TYPE;
2167 memset(s->mb_type , MB_TYPE_INTRA, sizeof(UINT8)*s->mb_width*s->mb_height);
2168 if(s->max_b_frames==0){
2169 s->input_pict_type= I_TYPE;
2170 s->input_picture_in_gop_number=0;
2172 //printf("Scene change detected, encoding as I Frame %d %d\n", s->mb_var_sum, s->mc_mb_var_sum);
2175 if(s->pict_type==P_TYPE || s->pict_type==S_TYPE)
2176 s->f_code= ff_get_best_fcode(s, s->p_mv_table, MB_TYPE_INTER);
2177 ff_fix_long_p_mvs(s);
2178 if(s->pict_type==B_TYPE){
2179 s->f_code= ff_get_best_fcode(s, s->b_forw_mv_table, MB_TYPE_FORWARD);
2180 s->b_code= ff_get_best_fcode(s, s->b_back_mv_table, MB_TYPE_BACKWARD);
2182 ff_fix_long_b_mvs(s, s->b_forw_mv_table, s->f_code, MB_TYPE_FORWARD);
2183 ff_fix_long_b_mvs(s, s->b_back_mv_table, s->b_code, MB_TYPE_BACKWARD);
2184 ff_fix_long_b_mvs(s, s->b_bidir_forw_mv_table, s->f_code, MB_TYPE_BIDIR);
2185 ff_fix_long_b_mvs(s, s->b_bidir_back_mv_table, s->b_code, MB_TYPE_BIDIR);
2188 if (s->fixed_qscale)
2189 s->frame_qscale = s->avctx->quality;
2191 s->frame_qscale = ff_rate_estimate_qscale(s);
2193 if(s->adaptive_quant && s->codec_id==CODEC_ID_MPEG4)
2194 ff_clean_mpeg4_qscales(s);
2196 if(s->adaptive_quant)
2197 s->qscale= s->qscale_table[0];
2199 s->qscale= (int)(s->frame_qscale + 0.5);
2201 if (s->out_format == FMT_MJPEG) {
2202 /* for mjpeg, we do include qscale in the matrix */
2203 s->intra_matrix[0] = ff_mpeg1_default_intra_matrix[0];
2205 s->intra_matrix[i] = CLAMP_TO_8BIT((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
2206 convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
2207 s->q_intra_matrix16_bias, s->intra_matrix, s->intra_quant_bias);
2210 s->last_bits= get_bit_count(&s->pb);
2211 switch(s->out_format) {
2213 mjpeg_picture_header(s);
2216 if (s->h263_msmpeg4)
2217 msmpeg4_encode_picture_header(s, picture_number);
2218 else if (s->h263_pred)
2219 mpeg4_encode_picture_header(s, picture_number);
2220 else if (s->h263_rv10)
2221 rv10_encode_picture_header(s, picture_number);
2223 h263_encode_picture_header(s, picture_number);
2226 mpeg1_encode_picture_header(s, picture_number);
2229 bits= get_bit_count(&s->pb);
2230 s->header_bits= bits - s->last_bits;
2241 /* init last dc values */
2242 /* note: quant matrix value (8) is implied here */
2243 s->last_dc[0] = 128;
2244 s->last_dc[1] = 128;
2245 s->last_dc[2] = 128;
2247 s->last_mv[0][0][0] = 0;
2248 s->last_mv[0][0][1] = 0;
2250 /* Get the GOB height based on picture height */
2251 if (s->out_format == FMT_H263 && !s->h263_pred && !s->h263_msmpeg4) {
2252 if (s->height <= 400)
2254 else if (s->height <= 800)
2258 }else if(s->codec_id==CODEC_ID_MPEG4){
2262 if(s->codec_id==CODEC_ID_MPEG4 && s->data_partitioning && s->pict_type!=B_TYPE)
2263 ff_mpeg4_init_partitions(s);
2267 for(mb_y=0; mb_y < s->mb_height; mb_y++) {
2268 /* Put GOB header based on RTP MTU for formats which support it per line (H263*)*/
2269 /* TODO: Put all this stuff in a separate generic function */
2272 s->ptr_lastgob = s->pb.buf;
2273 s->ptr_last_mb_line = s->pb.buf;
2274 } else if (s->out_format == FMT_H263 && !s->h263_pred && !s->h263_msmpeg4 && !(mb_y % s->gob_index)) {
2275 // MN: we could move the space check from h263 -> here, as its not h263 specific
2276 last_gob = h263_encode_gob_header(s, mb_y);
2278 s->first_slice_line = 1;
2280 /*MN: we reset it here instead at the end of each line cuz mpeg4 can have
2281 slice lines starting & ending in the middle*/
2282 s->first_slice_line = 0;
2287 s->y_dc_scale= s->y_dc_scale_table[ s->qscale ];
2288 s->c_dc_scale= s->c_dc_scale_table[ s->qscale ];
2290 s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1;
2291 s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1);
2292 s->block_index[2]= s->block_wrap[0]*(mb_y*2 + 2) - 1;
2293 s->block_index[3]= s->block_wrap[0]*(mb_y*2 + 2);
2294 s->block_index[4]= s->block_wrap[4]*(mb_y + 1) + s->block_wrap[0]*(s->mb_height*2 + 2);
2295 s->block_index[5]= s->block_wrap[4]*(mb_y + 1 + s->mb_height + 2) + s->block_wrap[0]*(s->mb_height*2 + 2);
2296 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2297 const int mb_type= s->mb_type[mb_y * s->mb_width + mb_x];
2298 const int xy= (mb_y+1) * (s->mb_width+2) + mb_x + 1;
2304 s->block_index[0]+=2;
2305 s->block_index[1]+=2;
2306 s->block_index[2]+=2;
2307 s->block_index[3]+=2;
2308 s->block_index[4]++;
2309 s->block_index[5]++;
2311 /* write gob / video packet header for formats which support it at any MB (MPEG4) */
2312 if(s->rtp_mode && s->mb_y>0 && s->codec_id==CODEC_ID_MPEG4){
2313 int pdif= pbBufPtr(&s->pb) - s->ptr_lastgob;
2315 //the *2 is there so we stay below the requested size
2316 if(pdif + s->mb_line_avgsize/s->mb_width >= s->rtp_payload_size){
2317 if(s->codec_id==CODEC_ID_MPEG4){
2318 if(s->data_partitioning && s->pict_type!=B_TYPE){
2319 ff_mpeg4_merge_partitions(s);
2320 ff_mpeg4_init_partitions(s);
2322 ff_mpeg4_encode_video_packet_header(s);
2324 if(s->flags&CODEC_FLAG_PASS1){
2325 int bits= get_bit_count(&s->pb);
2326 s->misc_bits+= bits - s->last_bits;
2329 ff_mpeg4_clean_buffers(s);
2331 s->ptr_lastgob = pbBufPtr(&s->pb);
2332 s->first_slice_line=1;
2333 s->resync_mb_x=mb_x;
2334 s->resync_mb_y=mb_y;
2337 if( (s->resync_mb_x == s->mb_x)
2338 && s->resync_mb_y+1 == s->mb_y){
2339 s->first_slice_line=0;
2343 if(mb_type & (mb_type-1)){ // more than 1 MB type possible
2345 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2347 copy_context_before_encode(&backup_s, s, -1);
2349 best_s.data_partitioning= s->data_partitioning;
2350 if(s->data_partitioning){
2351 backup_s.pb2= s->pb2;
2352 backup_s.tex_pb= s->tex_pb;
2355 if(mb_type&MB_TYPE_INTER){
2356 s->mv_dir = MV_DIR_FORWARD;
2357 s->mv_type = MV_TYPE_16X16;
2359 s->mv[0][0][0] = s->p_mv_table[xy][0];
2360 s->mv[0][0][1] = s->p_mv_table[xy][1];
2361 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER, pb, pb2, tex_pb,
2362 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2364 if(mb_type&MB_TYPE_INTER4V){
2365 s->mv_dir = MV_DIR_FORWARD;
2366 s->mv_type = MV_TYPE_8X8;
2369 s->mv[0][i][0] = s->motion_val[s->block_index[i]][0];
2370 s->mv[0][i][1] = s->motion_val[s->block_index[i]][1];
2372 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER4V, pb, pb2, tex_pb,
2373 &dmin, &next_block, 0, 0);
2375 if(mb_type&MB_TYPE_FORWARD){
2376 s->mv_dir = MV_DIR_FORWARD;
2377 s->mv_type = MV_TYPE_16X16;
2379 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2380 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2381 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_FORWARD, pb, pb2, tex_pb,
2382 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2384 if(mb_type&MB_TYPE_BACKWARD){
2385 s->mv_dir = MV_DIR_BACKWARD;
2386 s->mv_type = MV_TYPE_16X16;
2388 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2389 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2390 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BACKWARD, pb, pb2, tex_pb,
2391 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
2393 if(mb_type&MB_TYPE_BIDIR){
2394 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2395 s->mv_type = MV_TYPE_16X16;
2397 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2398 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2399 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2400 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2401 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BIDIR, pb, pb2, tex_pb,
2402 &dmin, &next_block, 0, 0);
2404 if(mb_type&MB_TYPE_DIRECT){
2405 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2406 s->mv_type = MV_TYPE_16X16; //FIXME
2408 s->mv[0][0][0] = s->b_direct_forw_mv_table[xy][0];
2409 s->mv[0][0][1] = s->b_direct_forw_mv_table[xy][1];
2410 s->mv[1][0][0] = s->b_direct_back_mv_table[xy][0];
2411 s->mv[1][0][1] = s->b_direct_back_mv_table[xy][1];
2412 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_DIRECT, pb, pb2, tex_pb,
2413 &dmin, &next_block, s->b_direct_mv_table[xy][0], s->b_direct_mv_table[xy][1]);
2415 if(mb_type&MB_TYPE_INTRA){
2416 s->mv_dir = MV_DIR_FORWARD;
2417 s->mv_type = MV_TYPE_16X16;
2421 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTRA, pb, pb2, tex_pb,
2422 &dmin, &next_block, 0, 0);
2423 /* force cleaning of ac/dc pred stuff if needed ... */
2424 if(s->h263_pred || s->h263_aic)
2425 s->mbintra_table[mb_x + mb_y*s->mb_width]=1;
2427 copy_context_after_encode(s, &best_s, -1);
2429 pb_bits_count= get_bit_count(&s->pb);
2430 flush_put_bits(&s->pb);
2431 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
2434 if(s->data_partitioning){
2435 pb2_bits_count= get_bit_count(&s->pb2);
2436 flush_put_bits(&s->pb2);
2437 ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
2438 s->pb2= backup_s.pb2;
2440 tex_pb_bits_count= get_bit_count(&s->tex_pb);
2441 flush_put_bits(&s->tex_pb);
2442 ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
2443 s->tex_pb= backup_s.tex_pb;
2445 s->last_bits= get_bit_count(&s->pb);
2447 int motion_x, motion_y;
2448 s->mv_type=MV_TYPE_16X16;
2449 // only one MB-Type possible
2452 s->mv_dir = MV_DIR_FORWARD;
2454 motion_x= s->mv[0][0][0] = 0;
2455 motion_y= s->mv[0][0][1] = 0;
2458 s->mv_dir = MV_DIR_FORWARD;
2460 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
2461 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
2463 case MB_TYPE_INTER4V:
2464 s->mv_dir = MV_DIR_FORWARD;
2465 s->mv_type = MV_TYPE_8X8;
2468 s->mv[0][i][0] = s->motion_val[s->block_index[i]][0];
2469 s->mv[0][i][1] = s->motion_val[s->block_index[i]][1];
2471 motion_x= motion_y= 0;
2473 case MB_TYPE_DIRECT:
2474 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2476 motion_x=s->b_direct_mv_table[xy][0];
2477 motion_y=s->b_direct_mv_table[xy][1];
2478 s->mv[0][0][0] = s->b_direct_forw_mv_table[xy][0];
2479 s->mv[0][0][1] = s->b_direct_forw_mv_table[xy][1];
2480 s->mv[1][0][0] = s->b_direct_back_mv_table[xy][0];
2481 s->mv[1][0][1] = s->b_direct_back_mv_table[xy][1];
2484 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2488 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2489 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2490 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2491 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2493 case MB_TYPE_BACKWARD:
2494 s->mv_dir = MV_DIR_BACKWARD;
2496 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2497 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2499 case MB_TYPE_FORWARD:
2500 s->mv_dir = MV_DIR_FORWARD;
2502 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2503 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2504 // printf(" %d %d ", motion_x, motion_y);
2507 motion_x=motion_y=0; //gcc warning fix
2508 printf("illegal MB type\n");
2510 encode_mb(s, motion_x, motion_y);
2512 /* clean the MV table in IPS frames for direct mode in B frames */
2513 if(s->mb_intra /* && I,P,S_TYPE */){
2514 s->p_mv_table[xy][0]=0;
2515 s->p_mv_table[xy][1]=0;
2518 MPV_decode_mb(s, s->block);
2519 //printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_width, get_bit_count(&s->pb));
2523 /* Obtain average GOB size for RTP */
2526 s->mb_line_avgsize = pbBufPtr(&s->pb) - s->ptr_last_mb_line;
2527 else if (!(mb_y % s->gob_index)) {
2528 s->mb_line_avgsize = (s->mb_line_avgsize + pbBufPtr(&s->pb) - s->ptr_last_mb_line) >> 1;
2529 s->ptr_last_mb_line = pbBufPtr(&s->pb);
2531 //fprintf(stderr, "\nMB line: %d\tSize: %u\tAvg. Size: %u", s->mb_y,
2532 // (s->pb.buf_ptr - s->ptr_last_mb_line), s->mb_line_avgsize);
2533 if(s->codec_id!=CODEC_ID_MPEG4) s->first_slice_line = 0; //FIXME clean
2538 if(s->codec_id==CODEC_ID_MPEG4 && s->data_partitioning && s->pict_type!=B_TYPE)
2539 ff_mpeg4_merge_partitions(s);
2541 if (s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == I_TYPE)
2542 msmpeg4_encode_ext_header(s);
2544 if(s->codec_id==CODEC_ID_MPEG4)
2545 ff_mpeg4_stuffing(&s->pb);
2547 //if (s->gob_number)
2548 // fprintf(stderr,"\nNumber of GOB: %d", s->gob_number);
2550 /* Send the last GOB if RTP */
2552 flush_put_bits(&s->pb);
2553 pdif = pbBufPtr(&s->pb) - s->ptr_lastgob;
2554 /* Call the RTP callback to send the last GOB */
2555 if (s->rtp_callback)
2556 s->rtp_callback(s->ptr_lastgob, pdif, s->gob_number);
2557 s->ptr_lastgob = pbBufPtr(&s->pb);
2558 //fprintf(stderr,"\nGOB: %2d size: %d (last)", s->gob_number, pdif);
2562 static int dct_quantize_c(MpegEncContext *s,
2563 DCTELEM *block, int n,
2564 int qscale, int *overflow)
2566 int i, j, level, last_non_zero, q;
2570 unsigned int threshold1, threshold2;
2574 /* we need this permutation so that we correct the IDCT
2575 permutation. will be moved into DCT code */
2576 block_permute(block);
2586 /* For AIC we skip quant/dequant of INTRADC */
2589 /* note: block[0] is assumed to be positive */
2590 block[0] = (block[0] + (q >> 1)) / q;
2593 qmat = s->q_intra_matrix[qscale];
2594 bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
2598 qmat = s->q_inter_matrix[qscale];
2599 bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
2601 threshold1= (1<<QMAT_SHIFT) - bias - 1;
2602 threshold2= (threshold1<<1);
2605 j = zigzag_direct[i];
2607 level = level * qmat[j];
2609 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
2610 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
2611 if(((unsigned)(level+threshold1))>threshold2){
2613 level= (bias + level)>>QMAT_SHIFT;
2616 level= (bias - level)>>QMAT_SHIFT;
2625 *overflow= s->max_qcoeff < max; //overflow might have happend
2627 return last_non_zero;
2630 static void dct_unquantize_mpeg1_c(MpegEncContext *s,
2631 DCTELEM *block, int n, int qscale)
2633 int i, level, nCoeffs;
2634 const UINT16 *quant_matrix;
2636 if(s->alternate_scan) nCoeffs= 64;
2637 else nCoeffs= s->block_last_index[n]+1;
2641 block[0] = block[0] * s->y_dc_scale;
2643 block[0] = block[0] * s->c_dc_scale;
2644 /* XXX: only mpeg1 */
2645 quant_matrix = s->intra_matrix;
2646 for(i=1;i<nCoeffs;i++) {
2647 int j= zigzag_direct[i];
2652 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2653 level = (level - 1) | 1;
2656 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2657 level = (level - 1) | 1;
2660 if (level < -2048 || level > 2047)
2661 fprintf(stderr, "unquant error %d %d\n", i, level);
2668 quant_matrix = s->inter_matrix;
2669 for(;i<nCoeffs;i++) {
2670 int j= zigzag_direct[i];
2675 level = (((level << 1) + 1) * qscale *
2676 ((int) (quant_matrix[j]))) >> 4;
2677 level = (level - 1) | 1;
2680 level = (((level << 1) + 1) * qscale *
2681 ((int) (quant_matrix[j]))) >> 4;
2682 level = (level - 1) | 1;
2685 if (level < -2048 || level > 2047)
2686 fprintf(stderr, "unquant error %d %d\n", i, level);
2694 static void dct_unquantize_mpeg2_c(MpegEncContext *s,
2695 DCTELEM *block, int n, int qscale)
2697 int i, level, nCoeffs;
2698 const UINT16 *quant_matrix;
2700 if(s->alternate_scan) nCoeffs= 64;
2701 else nCoeffs= s->block_last_index[n]+1;
2705 block[0] = block[0] * s->y_dc_scale;
2707 block[0] = block[0] * s->c_dc_scale;
2708 quant_matrix = s->intra_matrix;
2709 for(i=1;i<nCoeffs;i++) {
2710 int j= zigzag_direct[i];
2715 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2718 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2721 if (level < -2048 || level > 2047)
2722 fprintf(stderr, "unquant error %d %d\n", i, level);
2730 quant_matrix = s->inter_matrix;
2731 for(;i<nCoeffs;i++) {
2732 int j= zigzag_direct[i];
2737 level = (((level << 1) + 1) * qscale *
2738 ((int) (quant_matrix[j]))) >> 4;
2741 level = (((level << 1) + 1) * qscale *
2742 ((int) (quant_matrix[j]))) >> 4;
2745 if (level < -2048 || level > 2047)
2746 fprintf(stderr, "unquant error %d %d\n", i, level);
2757 static void dct_unquantize_h263_c(MpegEncContext *s,
2758 DCTELEM *block, int n, int qscale)
2760 int i, level, qmul, qadd;
2766 block[0] = block[0] * s->y_dc_scale;
2768 block[0] = block[0] * s->c_dc_scale;
2771 nCoeffs= 64; //does not allways use zigzag table
2774 nCoeffs= zigzag_end[ s->block_last_index[n] ];
2777 qmul = s->qscale << 1;
2778 if (s->h263_aic && s->mb_intra)
2781 qadd = (s->qscale - 1) | 1;
2783 for(;i<nCoeffs;i++) {
2787 level = level * qmul - qadd;
2789 level = level * qmul + qadd;
2792 if (level < -2048 || level > 2047)
2793 fprintf(stderr, "unquant error %d %d\n", i, level);
2800 static void remove_ac(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int mb_x, int mb_y)
2802 int dc, dcb, dcr, y, i;
2804 dc= s->dc_val[0][mb_x*2+1 + (i&1) + (mb_y*2+1 + (i>>1))*(s->mb_width*2+2)];
2808 dest_y[x + (i&1)*8 + (y + (i>>1)*8)*s->linesize]= dc/8;
2812 dcb = s->dc_val[1][mb_x+1 + (mb_y+1)*(s->mb_width+2)];
2813 dcr= s->dc_val[2][mb_x+1 + (mb_y+1)*(s->mb_width+2)];
2817 dest_cb[x + y*(s->uvlinesize)]= dcb/8;
2818 dest_cr[x + y*(s->uvlinesize)]= dcr/8;
2824 * will conceal past errors, and allso drop b frames if needed
2827 void ff_conceal_past_errors(MpegEncContext *s, int unknown_pos)
2832 int i, intra_count=0, inter_count=0;
2833 int intra_conceal= s->msmpeg4_version ? 50 : 50; //FIXME finetune
2834 int inter_conceal= s->msmpeg4_version ? 50 : 50;
2837 if(mb_x>=s->mb_width) mb_x= s->mb_width -1;
2838 if(mb_y>=s->mb_height) mb_y= s->mb_height-1;
2840 if(s->decoding_error==0 && unknown_pos){
2841 if(s->data_partitioning && s->pict_type!=B_TYPE)
2842 s->decoding_error= DECODING_AC_LOST;
2844 s->decoding_error= DECODING_DESYNC;
2847 if(s->decoding_error==DECODING_DESYNC && s->pict_type!=B_TYPE) s->next_p_frame_damaged=1;
2849 for(i=mb_x + mb_y*s->mb_width; i>=0; i--){
2850 if(s->mbintra_table[i]) intra_count++;
2854 if(s->decoding_error==DECODING_AC_LOST){
2857 }else if(s->decoding_error==DECODING_ACDC_LOST){
2862 if(unknown_pos && (intra_count<inter_count)){
2863 intra_conceal= inter_conceal= s->mb_num;
2864 // printf("%d %d\n",intra_count, inter_count);
2867 fprintf(stderr, "concealing errors\n");
2869 /* for all MBs from the current one back until the last resync marker */
2870 for(; mb_y>=0 && mb_y>=s->resync_mb_y; mb_y--){
2871 for(; mb_x>=0; mb_x--){
2872 uint8_t *dest_y = s->current_picture[0] + (mb_y * 16* s->linesize ) + mb_x * 16;
2873 uint8_t *dest_cb = s->current_picture[1] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
2874 uint8_t *dest_cr = s->current_picture[2] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
2875 int mb_x_backup= s->mb_x; //FIXME pass xy to mpeg_motion
2876 int mb_y_backup= s->mb_y;
2879 if(s->mbintra_table[mb_y*s->mb_width + mb_x] && mb_dist<intra_conceal){
2880 if(s->decoding_error==DECODING_AC_LOST){
2881 remove_ac(s, dest_y, dest_cb, dest_cr, mb_x, mb_y);
2882 // printf("remove ac to %d %d\n", mb_x, mb_y);
2884 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2885 s->last_picture, 0, 0, put_pixels_tab,
2886 0/*mx*/, 0/*my*/, 16);
2889 else if(!s->mbintra_table[mb_y*s->mb_width + mb_x] && mb_dist<inter_conceal){
2893 if(s->decoding_error!=DECODING_DESYNC){
2894 int xy= mb_x*2+1 + (mb_y*2+1)*(s->mb_width*2+2);
2895 mx= s->motion_val[ xy ][0];
2896 my= s->motion_val[ xy ][1];
2899 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2900 s->last_picture, 0, 0, put_pixels_tab,
2903 s->mb_x= mb_x_backup;
2904 s->mb_y= mb_y_backup;
2906 if(mb_x== s->resync_mb_x && mb_y== s->resync_mb_y) return;
2907 if(!s->mbskip_table[mb_x + mb_y*s->mb_width]) mb_dist++;
2913 AVCodec mpeg1video_encoder = {
2916 CODEC_ID_MPEG1VIDEO,
2917 sizeof(MpegEncContext),
2923 AVCodec h263_encoder = {
2927 sizeof(MpegEncContext),
2933 AVCodec h263p_encoder = {
2937 sizeof(MpegEncContext),
2943 AVCodec rv10_encoder = {
2947 sizeof(MpegEncContext),
2953 AVCodec mjpeg_encoder = {
2957 sizeof(MpegEncContext),
2963 AVCodec mpeg4_encoder = {
2967 sizeof(MpegEncContext),
2973 AVCodec msmpeg4v1_encoder = {
2977 sizeof(MpegEncContext),
2983 AVCodec msmpeg4v2_encoder = {
2987 sizeof(MpegEncContext),
2993 AVCodec msmpeg4v3_encoder = {
2997 sizeof(MpegEncContext),
3003 AVCodec wmv1_encoder = {
3007 sizeof(MpegEncContext),
3013 AVCodec wmv2_encoder = {
3017 sizeof(MpegEncContext),