- put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
- }
- }
- }else{
- s->dsp.idct_put(dest_y , dct_linesize, block[0]);
- s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
- s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
- s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
-
- if(!(s->flags&CODEC_FLAG_GRAY)){
- if(s->chroma_y_shift){
- s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
- s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
- }else{
-
- dct_linesize = uvlinesize << s->interlaced_dct;
- dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
-
- s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
- s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
- s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
- s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
- if(!s->chroma_x_shift){//Chroma444
- s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
- s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
- s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
- s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
- }
- }
- }//gray
- }
- }
-skip_idct:
- if(!readable){
- s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
- s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
- s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
- }
- }
-}
-
-void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
- if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1);
- else MPV_decode_mb_internal(s, block, 0);
-}
-
-#ifdef CONFIG_ENCODERS
-
-static inline void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
-{
- static const char tab[64]=
- {3,2,2,1,1,1,1,1,
- 1,1,1,1,1,1,1,1,
- 1,1,1,1,1,1,1,1,
- 0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0};
- int score=0;
- int run=0;
- int i;
- DCTELEM *block= s->block[n];
- const int last_index= s->block_last_index[n];
- int skip_dc;
-
- if(threshold<0){
- skip_dc=0;
- threshold= -threshold;
- }else
- skip_dc=1;
-
- /* are all which we could set to zero are allready zero? */
- if(last_index<=skip_dc - 1) return;
-
- for(i=0; i<=last_index; i++){
- const int j = s->intra_scantable.permutated[i];
- const int level = FFABS(block[j]);
- if(level==1){
- if(skip_dc && i==0) continue;
- score+= tab[run];
- run=0;
- }else if(level>1){
- return;
- }else{
- run++;
- }
- }
- if(score >= threshold) return;
- for(i=skip_dc; i<=last_index; i++){
- const int j = s->intra_scantable.permutated[i];
- block[j]=0;
- }
- if(block[0]) s->block_last_index[n]= 0;
- else s->block_last_index[n]= -1;
-}
-
-static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block, int last_index)
-{
- int i;
- const int maxlevel= s->max_qcoeff;
- const int minlevel= s->min_qcoeff;
- int overflow=0;
-
- if(s->mb_intra){
- i=1; //skip clipping of intra dc
- }else
- i=0;
-
- for(;i<=last_index; i++){
- const int j= s->intra_scantable.permutated[i];
- int level = block[j];
-
- if (level>maxlevel){
- level=maxlevel;
- overflow++;
- }else if(level<minlevel){
- level=minlevel;
- overflow++;
- }
-
- block[j]= level;
- }
-
- if(overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
- av_log(s->avctx, AV_LOG_INFO, "warning, clipping %d dct coefficients to %d..%d\n", overflow, minlevel, maxlevel);
-}
-
-#endif //CONFIG_ENCODERS
-
-/**
- *
- * @param h is the normal height, this will be reduced automatically if needed for the last row
- */
-void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
- if (s->avctx->draw_horiz_band) {
- AVFrame *src;
- int offset[4];
-
- if(s->picture_structure != PICT_FRAME){
- h <<= 1;
- y <<= 1;
- if(s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
- }
-
- h= FFMIN(h, s->avctx->height - y);
-
- if(s->pict_type==B_TYPE || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
- src= (AVFrame*)s->current_picture_ptr;
- else if(s->last_picture_ptr)
- src= (AVFrame*)s->last_picture_ptr;
- else
- return;
-
- if(s->pict_type==B_TYPE && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
- offset[0]=
- offset[1]=
- offset[2]=
- offset[3]= 0;
- }else{
- offset[0]= y * s->linesize;;
- offset[1]=
- offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
- offset[3]= 0;
- }
-
- emms_c();
-
- s->avctx->draw_horiz_band(s->avctx, src, offset,
- y, s->picture_structure, h);
- }
-}
-
-void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
- const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
- const int uvlinesize= s->current_picture.linesize[1];
- const int mb_size= 4 - s->avctx->lowres;
-
- s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
- s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
- s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
- s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
- s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
- s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
- //block_index is not used by mpeg2, so it is not affected by chroma_format
-
- s->dest[0] = s->current_picture.data[0] + ((s->mb_x - 1) << mb_size);
- s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
- s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
-
- if(!(s->pict_type==B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
- {
- s->dest[0] += s->mb_y * linesize << mb_size;
- s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
- s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
- }
-}
-
-#ifdef CONFIG_ENCODERS
-
-static void get_vissual_weight(int16_t *weight, uint8_t *ptr, int stride){
- int x, y;
-//FIXME optimize
- for(y=0; y<8; y++){
- for(x=0; x<8; x++){
- int x2, y2;
- int sum=0;
- int sqr=0;
- int count=0;
-
- for(y2= FFMAX(y-1, 0); y2 < FFMIN(8, y+2); y2++){
- for(x2= FFMAX(x-1, 0); x2 < FFMIN(8, x+2); x2++){
- int v= ptr[x2 + y2*stride];
- sum += v;
- sqr += v*v;
- count++;
- }
- }
- weight[x + 8*y]= (36*ff_sqrt(count*sqr - sum*sum)) / count;
- }
- }
-}
-
-static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_count)
-{
- int16_t weight[8][64];
- DCTELEM orig[8][64];
- const int mb_x= s->mb_x;
- const int mb_y= s->mb_y;
- int i;
- int skip_dct[8];
- int dct_offset = s->linesize*8; //default for progressive frames
- uint8_t *ptr_y, *ptr_cb, *ptr_cr;
- int wrap_y, wrap_c;
-
- for(i=0; i<mb_block_count; i++) skip_dct[i]=s->skipdct;
-
- if(s->adaptive_quant){
- const int last_qp= s->qscale;
- const int mb_xy= mb_x + mb_y*s->mb_stride;
-
- s->lambda= s->lambda_table[mb_xy];
- update_qscale(s);
-
- if(!(s->flags&CODEC_FLAG_QP_RD)){
- s->qscale= s->current_picture_ptr->qscale_table[mb_xy];
- s->dquant= s->qscale - last_qp;
-
- if(s->out_format==FMT_H263){
- s->dquant= av_clip(s->dquant, -2, 2);
-
- if(s->codec_id==CODEC_ID_MPEG4){
- if(!s->mb_intra){
- if(s->pict_type == B_TYPE){
- if(s->dquant&1 || s->mv_dir&MV_DIRECT)
- s->dquant= 0;
- }
- if(s->mv_type==MV_TYPE_8X8)
- s->dquant=0;
- }
- }
- }
- }
- ff_set_qscale(s, last_qp + s->dquant);
- }else if(s->flags&CODEC_FLAG_QP_RD)
- ff_set_qscale(s, s->qscale + s->dquant);
-
- wrap_y = s->linesize;
- wrap_c = s->uvlinesize;
- ptr_y = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
- ptr_cb = s->new_picture.data[1] + (mb_y * mb_block_height * wrap_c) + mb_x * 8;
- ptr_cr = s->new_picture.data[2] + (mb_y * mb_block_height * wrap_c) + mb_x * 8;
-
- if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
- uint8_t *ebuf= s->edge_emu_buffer + 32;
- ff_emulated_edge_mc(ebuf , ptr_y , wrap_y,16,16,mb_x*16,mb_y*16, s->width , s->height);
- ptr_y= ebuf;
- ff_emulated_edge_mc(ebuf+18*wrap_y , ptr_cb, wrap_c, 8, mb_block_height, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
- ptr_cb= ebuf+18*wrap_y;
- ff_emulated_edge_mc(ebuf+18*wrap_y+8, ptr_cr, wrap_c, 8, mb_block_height, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
- ptr_cr= ebuf+18*wrap_y+8;
- }
-
- if (s->mb_intra) {
- if(s->flags&CODEC_FLAG_INTERLACED_DCT){
- int progressive_score, interlaced_score;
-
- s->interlaced_dct=0;
- progressive_score= s->dsp.ildct_cmp[4](s, ptr_y , NULL, wrap_y, 8)
- +s->dsp.ildct_cmp[4](s, ptr_y + wrap_y*8, NULL, wrap_y, 8) - 400;
-
- if(progressive_score > 0){
- interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y , NULL, wrap_y*2, 8)
- +s->dsp.ildct_cmp[4](s, ptr_y + wrap_y , NULL, wrap_y*2, 8);
- if(progressive_score > interlaced_score){
- s->interlaced_dct=1;
-
- dct_offset= wrap_y;
- wrap_y<<=1;
- if (s->chroma_format == CHROMA_422)
- wrap_c<<=1;
- }
- }
- }
-
- s->dsp.get_pixels(s->block[0], ptr_y , wrap_y);
- s->dsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
- s->dsp.get_pixels(s->block[2], ptr_y + dct_offset , wrap_y);
- s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
-
- if(s->flags&CODEC_FLAG_GRAY){
- skip_dct[4]= 1;
- skip_dct[5]= 1;
- }else{
- s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
- s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
- if(!s->chroma_y_shift){ /* 422 */
- s->dsp.get_pixels(s->block[6], ptr_cb + (dct_offset>>1), wrap_c);
- s->dsp.get_pixels(s->block[7], ptr_cr + (dct_offset>>1), wrap_c);
- }
- }
- }else{
- op_pixels_func (*op_pix)[4];
- qpel_mc_func (*op_qpix)[16];
- uint8_t *dest_y, *dest_cb, *dest_cr;
-
- dest_y = s->dest[0];
- dest_cb = s->dest[1];
- dest_cr = s->dest[2];
-
- if ((!s->no_rounding) || s->pict_type==B_TYPE){
- op_pix = s->dsp.put_pixels_tab;
- op_qpix= s->dsp.put_qpel_pixels_tab;
- }else{
- op_pix = s->dsp.put_no_rnd_pixels_tab;
- op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab;
- }
-
- if (s->mv_dir & MV_DIR_FORWARD) {
- MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
- op_pix = s->dsp.avg_pixels_tab;
- op_qpix= s->dsp.avg_qpel_pixels_tab;
- }
- if (s->mv_dir & MV_DIR_BACKWARD) {
- MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
- }
-
- if(s->flags&CODEC_FLAG_INTERLACED_DCT){
- int progressive_score, interlaced_score;
-
- s->interlaced_dct=0;
- progressive_score= s->dsp.ildct_cmp[0](s, dest_y , ptr_y , wrap_y, 8)
- +s->dsp.ildct_cmp[0](s, dest_y + wrap_y*8, ptr_y + wrap_y*8, wrap_y, 8) - 400;
-
- if(s->avctx->ildct_cmp == FF_CMP_VSSE) progressive_score -= 400;
-
- if(progressive_score>0){
- interlaced_score = s->dsp.ildct_cmp[0](s, dest_y , ptr_y , wrap_y*2, 8)
- +s->dsp.ildct_cmp[0](s, dest_y + wrap_y , ptr_y + wrap_y , wrap_y*2, 8);
-
- if(progressive_score > interlaced_score){
- s->interlaced_dct=1;
-
- dct_offset= wrap_y;
- wrap_y<<=1;
- if (s->chroma_format == CHROMA_422)
- wrap_c<<=1;
- }
- }
- }
-
- s->dsp.diff_pixels(s->block[0], ptr_y , dest_y , wrap_y);
- s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
- s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset , dest_y + dct_offset , wrap_y);
- s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8, dest_y + dct_offset + 8, wrap_y);
-
- if(s->flags&CODEC_FLAG_GRAY){
- skip_dct[4]= 1;
- skip_dct[5]= 1;
- }else{
- s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
- s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
- if(!s->chroma_y_shift){ /* 422 */
- s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset>>1), dest_cb + (dct_offset>>1), wrap_c);
- s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset>>1), dest_cr + (dct_offset>>1), wrap_c);
- }
- }
- /* pre quantization */
- if(s->current_picture.mc_mb_var[s->mb_stride*mb_y+ mb_x]<2*s->qscale*s->qscale){
- //FIXME optimize
- if(s->dsp.sad[1](NULL, ptr_y , dest_y , wrap_y, 8) < 20*s->qscale) skip_dct[0]= 1;
- if(s->dsp.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20*s->qscale) skip_dct[1]= 1;
- if(s->dsp.sad[1](NULL, ptr_y +dct_offset , dest_y +dct_offset , wrap_y, 8) < 20*s->qscale) skip_dct[2]= 1;
- if(s->dsp.sad[1](NULL, ptr_y +dct_offset+ 8, dest_y +dct_offset+ 8, wrap_y, 8) < 20*s->qscale) skip_dct[3]= 1;
- if(s->dsp.sad[1](NULL, ptr_cb , dest_cb , wrap_c, 8) < 20*s->qscale) skip_dct[4]= 1;
- if(s->dsp.sad[1](NULL, ptr_cr , dest_cr , wrap_c, 8) < 20*s->qscale) skip_dct[5]= 1;
- if(!s->chroma_y_shift){ /* 422 */
- if(s->dsp.sad[1](NULL, ptr_cb +(dct_offset>>1), dest_cb +(dct_offset>>1), wrap_c, 8) < 20*s->qscale) skip_dct[6]= 1;
- if(s->dsp.sad[1](NULL, ptr_cr +(dct_offset>>1), dest_cr +(dct_offset>>1), wrap_c, 8) < 20*s->qscale) skip_dct[7]= 1;
- }
- }
- }
-
- if(s->avctx->quantizer_noise_shaping){
- if(!skip_dct[0]) get_vissual_weight(weight[0], ptr_y , wrap_y);
- if(!skip_dct[1]) get_vissual_weight(weight[1], ptr_y + 8, wrap_y);
- if(!skip_dct[2]) get_vissual_weight(weight[2], ptr_y + dct_offset , wrap_y);
- if(!skip_dct[3]) get_vissual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
- if(!skip_dct[4]) get_vissual_weight(weight[4], ptr_cb , wrap_c);
- if(!skip_dct[5]) get_vissual_weight(weight[5], ptr_cr , wrap_c);
- if(!s->chroma_y_shift){ /* 422 */
- if(!skip_dct[6]) get_vissual_weight(weight[6], ptr_cb + (dct_offset>>1), wrap_c);
- if(!skip_dct[7]) get_vissual_weight(weight[7], ptr_cr + (dct_offset>>1), wrap_c);
- }
- memcpy(orig[0], s->block[0], sizeof(DCTELEM)*64*mb_block_count);
- }
-
- /* DCT & quantize */
- assert(s->out_format!=FMT_MJPEG || s->qscale==8);
- {
- for(i=0;i<mb_block_count;i++) {
- if(!skip_dct[i]){
- int overflow;
- s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
- // FIXME we could decide to change to quantizer instead of clipping
- // JS: I don't think that would be a good idea it could lower quality instead
- // of improve it. Just INTRADC clipping deserves changes in quantizer
- if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
- }else
- s->block_last_index[i]= -1;
- }
- if(s->avctx->quantizer_noise_shaping){
- for(i=0;i<mb_block_count;i++) {
- if(!skip_dct[i]){
- s->block_last_index[i] = dct_quantize_refine(s, s->block[i], weight[i], orig[i], i, s->qscale);
- }
- }
- }
-
- if(s->luma_elim_threshold && !s->mb_intra)
- for(i=0; i<4; i++)
- dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
- if(s->chroma_elim_threshold && !s->mb_intra)
- for(i=4; i<mb_block_count; i++)
- dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
-
- if(s->flags & CODEC_FLAG_CBP_RD){
- for(i=0;i<mb_block_count;i++) {
- if(s->block_last_index[i] == -1)
- s->coded_score[i]= INT_MAX/256;
- }
- }
- }
-
- if((s->flags&CODEC_FLAG_GRAY) && s->mb_intra){
- s->block_last_index[4]=
- s->block_last_index[5]= 0;
- s->block[4][0]=
- s->block[5][0]= (1024 + s->c_dc_scale/2)/ s->c_dc_scale;
- }
-
- //non c quantize code returns incorrect block_last_index FIXME
- if(s->alternate_scan && s->dct_quantize != dct_quantize_c){
- for(i=0; i<mb_block_count; i++){
- int j;
- if(s->block_last_index[i]>0){
- for(j=63; j>0; j--){
- if(s->block[i][ s->intra_scantable.permutated[j] ]) break;
- }
- s->block_last_index[i]= j;
- }
- }
- }
-
- /* huffman encode */
- switch(s->codec_id){ //FIXME funct ptr could be slightly faster
- case CODEC_ID_MPEG1VIDEO:
- case CODEC_ID_MPEG2VIDEO:
- mpeg1_encode_mb(s, s->block, motion_x, motion_y); break;
- case CODEC_ID_MPEG4:
- mpeg4_encode_mb(s, s->block, motion_x, motion_y); break;
- case CODEC_ID_MSMPEG4V2:
- case CODEC_ID_MSMPEG4V3:
- case CODEC_ID_WMV1:
- if (ENABLE_MSMPEG4_ENCODER)
- msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
- break;
- case CODEC_ID_WMV2:
- if (ENABLE_WMV2_ENCODER)
- ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
- break;
- case CODEC_ID_H261:
- if (ENABLE_H261_ENCODER)
- ff_h261_encode_mb(s, s->block, motion_x, motion_y);
- break;
- case CODEC_ID_H263:
- case CODEC_ID_H263P:
- case CODEC_ID_FLV1:
- case CODEC_ID_RV10:
- case CODEC_ID_RV20:
- h263_encode_mb(s, s->block, motion_x, motion_y); break;
- case CODEC_ID_MJPEG:
- if (ENABLE_MJPEG_ENCODER)
- ff_mjpeg_encode_mb(s, s->block);
- break;
- default:
- assert(0);
- }
-}
-
-static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
-{
- if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 6);
- else encode_mb_internal(s, motion_x, motion_y, 16, 8);
-}
-
-#endif //CONFIG_ENCODERS
-
-void ff_mpeg_flush(AVCodecContext *avctx){
- int i;
- MpegEncContext *s = avctx->priv_data;
-
- if(s==NULL || s->picture==NULL)
- return;
-
- for(i=0; i<MAX_PICTURE_COUNT; i++){
- if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
- || s->picture[i].type == FF_BUFFER_TYPE_USER))
- avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
- }
- s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
-
- s->mb_x= s->mb_y= 0;
-
- s->parse_context.state= -1;
- s->parse_context.frame_start_found= 0;
- s->parse_context.overread= 0;
- s->parse_context.overread_index= 0;
- s->parse_context.index= 0;
- s->parse_context.last_index= 0;
- s->bitstream_buffer_size=0;
- s->pp_time=0;
-}
-
-#ifdef CONFIG_ENCODERS
-void ff_copy_bits(PutBitContext *pb, uint8_t *src, int length)
-{
- const uint16_t *srcw= (uint16_t*)src;
- int words= length>>4;
- int bits= length&15;
- int i;
-
- if(length==0) return;
-
- if(words < 16){
- for(i=0; i<words; i++) put_bits(pb, 16, be2me_16(srcw[i]));
- }else if(put_bits_count(pb)&7){
- for(i=0; i<words; i++) put_bits(pb, 16, be2me_16(srcw[i]));
- }else{
- for(i=0; put_bits_count(pb)&31; i++)
- put_bits(pb, 8, src[i]);
- flush_put_bits(pb);
- memcpy(pbBufPtr(pb), src+i, 2*words-i);
- skip_put_bytes(pb, 2*words-i);
- }
-
- put_bits(pb, bits, be2me_16(srcw[words])>>(16-bits));
-}
-
-static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
- int i;
-
- memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop?
-
- /* mpeg1 */
- d->mb_skip_run= s->mb_skip_run;
- for(i=0; i<3; i++)
- d->last_dc[i]= s->last_dc[i];
-
- /* statistics */
- d->mv_bits= s->mv_bits;
- d->i_tex_bits= s->i_tex_bits;
- d->p_tex_bits= s->p_tex_bits;
- d->i_count= s->i_count;
- d->f_count= s->f_count;
- d->b_count= s->b_count;
- d->skip_count= s->skip_count;
- d->misc_bits= s->misc_bits;
- d->last_bits= 0;
-
- d->mb_skipped= 0;
- d->qscale= s->qscale;
- d->dquant= s->dquant;
-}
-
-static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
- int i;
-
- memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
- memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop?
-
- /* mpeg1 */
- d->mb_skip_run= s->mb_skip_run;
- for(i=0; i<3; i++)
- d->last_dc[i]= s->last_dc[i];
-
- /* statistics */
- d->mv_bits= s->mv_bits;
- d->i_tex_bits= s->i_tex_bits;
- d->p_tex_bits= s->p_tex_bits;
- d->i_count= s->i_count;
- d->f_count= s->f_count;
- d->b_count= s->b_count;
- d->skip_count= s->skip_count;
- d->misc_bits= s->misc_bits;
-
- d->mb_intra= s->mb_intra;
- d->mb_skipped= s->mb_skipped;
- d->mv_type= s->mv_type;
- d->mv_dir= s->mv_dir;
- d->pb= s->pb;
- if(s->data_partitioning){
- d->pb2= s->pb2;
- d->tex_pb= s->tex_pb;
- }
- d->block= s->block;
- for(i=0; i<8; i++)
- d->block_last_index[i]= s->block_last_index[i];
- d->interlaced_dct= s->interlaced_dct;
- d->qscale= s->qscale;
-}
-
-static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
- PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
- int *dmin, int *next_block, int motion_x, int motion_y)
-{
- int score;
- uint8_t *dest_backup[3];
-
- copy_context_before_encode(s, backup, type);
-
- s->block= s->blocks[*next_block];
- s->pb= pb[*next_block];
- if(s->data_partitioning){
- s->pb2 = pb2 [*next_block];
- s->tex_pb= tex_pb[*next_block];
- }
-
- if(*next_block){
- memcpy(dest_backup, s->dest, sizeof(s->dest));
- s->dest[0] = s->rd_scratchpad;
- s->dest[1] = s->rd_scratchpad + 16*s->linesize;
- s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
- assert(s->linesize >= 32); //FIXME
- }
-
- encode_mb(s, motion_x, motion_y);
-
- score= put_bits_count(&s->pb);
- if(s->data_partitioning){
- score+= put_bits_count(&s->pb2);
- score+= put_bits_count(&s->tex_pb);
- }
-
- if(s->avctx->mb_decision == FF_MB_DECISION_RD){
- MPV_decode_mb(s, s->block);
-
- score *= s->lambda2;
- score += sse_mb(s) << FF_LAMBDA_SHIFT;
- }
-
- if(*next_block){
- memcpy(s->dest, dest_backup, sizeof(s->dest));
- }
-
- if(score<*dmin){
- *dmin= score;
- *next_block^=1;
-
- copy_context_after_encode(best, s, type);
- }
-}
-
-static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
- uint32_t *sq = ff_squareTbl + 256;
- int acc=0;
- int x,y;
-
- if(w==16 && h==16)
- return s->dsp.sse[0](NULL, src1, src2, stride, 16);
- else if(w==8 && h==8)
- return s->dsp.sse[1](NULL, src1, src2, stride, 8);
-
- for(y=0; y<h; y++){
- for(x=0; x<w; x++){
- acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
- }
- }
-
- assert(acc>=0);
-
- return acc;
-}
-
-static int sse_mb(MpegEncContext *s){
- int w= 16;
- int h= 16;
-
- if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
- if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
-
- if(w==16 && h==16)
- if(s->avctx->mb_cmp == FF_CMP_NSSE){
- return s->dsp.nsse[0](s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
- +s->dsp.nsse[1](s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
- +s->dsp.nsse[1](s, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
- }else{
- return s->dsp.sse[0](NULL, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
- +s->dsp.sse[1](NULL, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
- +s->dsp.sse[1](NULL, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
- }
- else
- return sse(s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
- +sse(s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
- +sse(s, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
-}
-
-static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
- MpegEncContext *s= arg;
-
-
- s->me.pre_pass=1;
- s->me.dia_size= s->avctx->pre_dia_size;
- s->first_slice_line=1;
- for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
- for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
- ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
- }
- s->first_slice_line=0;
- }
-
- s->me.pre_pass=0;
-
- return 0;
-}
-
-static int estimate_motion_thread(AVCodecContext *c, void *arg){
- MpegEncContext *s= arg;
-
- ff_check_alignment();
-
- s->me.dia_size= s->avctx->dia_size;
- s->first_slice_line=1;
- for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
- s->mb_x=0; //for block init below
- ff_init_block_index(s);
- for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
- s->block_index[0]+=2;
- s->block_index[1]+=2;
- s->block_index[2]+=2;
- s->block_index[3]+=2;
-
- /* compute motion vector & mb_type and store in context */
- if(s->pict_type==B_TYPE)
- ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
- else
- ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
- }
- s->first_slice_line=0;
- }
- return 0;
-}
-
-static int mb_var_thread(AVCodecContext *c, void *arg){
- MpegEncContext *s= arg;
- int mb_x, mb_y;
-
- ff_check_alignment();
-
- for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
- for(mb_x=0; mb_x < s->mb_width; mb_x++) {
- int xx = mb_x * 16;
- int yy = mb_y * 16;
- uint8_t *pix = s->new_picture.data[0] + (yy * s->linesize) + xx;
- int varc;
- int sum = s->dsp.pix_sum(pix, s->linesize);
-
- varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)(sum*sum))>>8) + 500 + 128)>>8;
-
- s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
- s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
- s->me.mb_var_sum_temp += varc;
- }
- }
- return 0;
-}
-
-static void write_slice_end(MpegEncContext *s){
- if(s->codec_id==CODEC_ID_MPEG4){
- if(s->partitioned_frame){
- ff_mpeg4_merge_partitions(s);
- }
-
- ff_mpeg4_stuffing(&s->pb);
- }else if(ENABLE_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
- ff_mjpeg_encode_stuffing(&s->pb);
- }
-
- align_put_bits(&s->pb);
- flush_put_bits(&s->pb);
-
- if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
- s->misc_bits+= get_bits_diff(s);
-}
-
-static int encode_thread(AVCodecContext *c, void *arg){
- MpegEncContext *s= arg;
- int mb_x, mb_y, pdif = 0;
- int i, j;
- MpegEncContext best_s, backup_s;
- uint8_t bit_buf[2][MAX_MB_BYTES];
- uint8_t bit_buf2[2][MAX_MB_BYTES];
- uint8_t bit_buf_tex[2][MAX_MB_BYTES];
- PutBitContext pb[2], pb2[2], tex_pb[2];
-//printf("%d->%d\n", s->resync_mb_y, s->end_mb_y);
-
- ff_check_alignment();
-
- for(i=0; i<2; i++){
- init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
- init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
- init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
- }
-
- s->last_bits= put_bits_count(&s->pb);
- s->mv_bits=0;
- s->misc_bits=0;
- s->i_tex_bits=0;
- s->p_tex_bits=0;
- s->i_count=0;
- s->f_count=0;
- s->b_count=0;
- s->skip_count=0;
-
- for(i=0; i<3; i++){
- /* init last dc values */
- /* note: quant matrix value (8) is implied here */
- s->last_dc[i] = 128 << s->intra_dc_precision;
-
- s->current_picture.error[i] = 0;
- }
- s->mb_skip_run = 0;
- memset(s->last_mv, 0, sizeof(s->last_mv));
-
- s->last_mv_dir = 0;
-
- switch(s->codec_id){
- case CODEC_ID_H263:
- case CODEC_ID_H263P:
- case CODEC_ID_FLV1:
- s->gob_index = ff_h263_get_gob_height(s);
- break;
- case CODEC_ID_MPEG4:
- if(s->partitioned_frame)
- ff_mpeg4_init_partitions(s);
- break;
- }
-
- s->resync_mb_x=0;
- s->resync_mb_y=0;
- s->first_slice_line = 1;
- s->ptr_lastgob = s->pb.buf;
- for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
-// printf("row %d at %X\n", s->mb_y, (int)s);
- s->mb_x=0;
- s->mb_y= mb_y;
-
- ff_set_qscale(s, s->qscale);
- ff_init_block_index(s);
-
- for(mb_x=0; mb_x < s->mb_width; mb_x++) {
- int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
- int mb_type= s->mb_type[xy];
-// int d;
- int dmin= INT_MAX;
- int dir;
-
- if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
- av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
- return -1;
- }
- if(s->data_partitioning){
- if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
- || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
- av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
- return -1;
- }
- }
-
- s->mb_x = mb_x;
- s->mb_y = mb_y; // moved into loop, can get changed by H.261
- ff_update_block_index(s);
-
- if(ENABLE_H261_ENCODER && s->codec_id == CODEC_ID_H261){
- ff_h261_reorder_mb_index(s);
- xy= s->mb_y*s->mb_stride + s->mb_x;
- mb_type= s->mb_type[xy];
- }
-
- /* write gob / video packet header */
- if(s->rtp_mode){
- int current_packet_size, is_gob_start;
-
- current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
-
- is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
-
- if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
-
- switch(s->codec_id){
- case CODEC_ID_H263:
- case CODEC_ID_H263P:
- if(!s->h263_slice_structured)
- if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
- break;
- case CODEC_ID_MPEG2VIDEO:
- if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
- case CODEC_ID_MPEG1VIDEO:
- if(s->mb_skip_run) is_gob_start=0;
- break;
- }
-
- if(is_gob_start){
- if(s->start_mb_y != mb_y || mb_x!=0){
- write_slice_end(s);
-
- if(s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame){
- ff_mpeg4_init_partitions(s);
- }
- }
-
- assert((put_bits_count(&s->pb)&7) == 0);
- current_packet_size= pbBufPtr(&s->pb) - s->ptr_lastgob;
-
- if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){
- int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
- int d= 100 / s->avctx->error_rate;
- if(r % d == 0){
- current_packet_size=0;
-#ifndef ALT_BITSTREAM_WRITER
- s->pb.buf_ptr= s->ptr_lastgob;
-#endif
- assert(pbBufPtr(&s->pb) == s->ptr_lastgob);
- }
- }
-
- if (s->avctx->rtp_callback){
- int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
- s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
- }
-
- switch(s->codec_id){
- case CODEC_ID_MPEG4:
- ff_mpeg4_encode_video_packet_header(s);
- ff_mpeg4_clean_buffers(s);
- break;
- case CODEC_ID_MPEG1VIDEO:
- case CODEC_ID_MPEG2VIDEO:
- ff_mpeg1_encode_slice_header(s);
- ff_mpeg1_clean_buffers(s);
- break;
- case CODEC_ID_H263:
- case CODEC_ID_H263P:
- h263_encode_gob_header(s, mb_y);
- break;
- }
-
- if(s->flags&CODEC_FLAG_PASS1){
- int bits= put_bits_count(&s->pb);
- s->misc_bits+= bits - s->last_bits;
- s->last_bits= bits;
- }
-
- s->ptr_lastgob += current_packet_size;
- s->first_slice_line=1;
- s->resync_mb_x=mb_x;
- s->resync_mb_y=mb_y;
- }
- }
-
- if( (s->resync_mb_x == s->mb_x)
- && s->resync_mb_y+1 == s->mb_y){
- s->first_slice_line=0;
- }
-
- s->mb_skipped=0;
- s->dquant=0; //only for QP_RD
-
- if(mb_type & (mb_type-1) || (s->flags & CODEC_FLAG_QP_RD)){ // more than 1 MB type possible or CODEC_FLAG_QP_RD
- int next_block=0;
- int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
-
- copy_context_before_encode(&backup_s, s, -1);
- backup_s.pb= s->pb;
- best_s.data_partitioning= s->data_partitioning;
- best_s.partitioned_frame= s->partitioned_frame;
- if(s->data_partitioning){
- backup_s.pb2= s->pb2;
- backup_s.tex_pb= s->tex_pb;
- }
-
- if(mb_type&CANDIDATE_MB_TYPE_INTER){
- s->mv_dir = MV_DIR_FORWARD;
- s->mv_type = MV_TYPE_16X16;
- s->mb_intra= 0;
- s->mv[0][0][0] = s->p_mv_table[xy][0];
- s->mv[0][0][1] = s->p_mv_table[xy][1];
- encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
- &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
- }
- if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
- s->mv_dir = MV_DIR_FORWARD;
- s->mv_type = MV_TYPE_FIELD;
- s->mb_intra= 0;
- for(i=0; i<2; i++){
- j= s->field_select[0][i] = s->p_field_select_table[i][xy];
- s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
- s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
- }
- encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
- &dmin, &next_block, 0, 0);
- }
- if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
- s->mv_dir = MV_DIR_FORWARD;
- s->mv_type = MV_TYPE_16X16;
- s->mb_intra= 0;
- s->mv[0][0][0] = 0;
- s->mv[0][0][1] = 0;
- encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
- &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
- }
- if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
- s->mv_dir = MV_DIR_FORWARD;
- s->mv_type = MV_TYPE_8X8;
- s->mb_intra= 0;
- for(i=0; i<4; i++){
- s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
- s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
- }
- encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
- &dmin, &next_block, 0, 0);
- }
- if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
- s->mv_dir = MV_DIR_FORWARD;
- s->mv_type = MV_TYPE_16X16;
- s->mb_intra= 0;
- s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
- s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
- encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
- &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
- }
- if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
- s->mv_dir = MV_DIR_BACKWARD;
- s->mv_type = MV_TYPE_16X16;
- s->mb_intra= 0;
- s->mv[1][0][0] = s->b_back_mv_table[xy][0];
- s->mv[1][0][1] = s->b_back_mv_table[xy][1];
- encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
- &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
- }
- if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
- s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
- s->mv_type = MV_TYPE_16X16;
- s->mb_intra= 0;
- s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
- s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
- s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
- s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
- encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
- &dmin, &next_block, 0, 0);
- }
- if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
- s->mv_dir = MV_DIR_FORWARD;
- s->mv_type = MV_TYPE_FIELD;
- s->mb_intra= 0;
- for(i=0; i<2; i++){
- j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
- s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
- s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
- }
- encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
- &dmin, &next_block, 0, 0);
- }
- if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
- s->mv_dir = MV_DIR_BACKWARD;
- s->mv_type = MV_TYPE_FIELD;
- s->mb_intra= 0;
- for(i=0; i<2; i++){
- j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
- s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
- s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
- }
- encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
- &dmin, &next_block, 0, 0);
- }
- if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
- s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
- s->mv_type = MV_TYPE_FIELD;
- s->mb_intra= 0;
- for(dir=0; dir<2; dir++){
- for(i=0; i<2; i++){
- j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
- s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
- s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
- }
- }
- encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
- &dmin, &next_block, 0, 0);
- }
- if(mb_type&CANDIDATE_MB_TYPE_INTRA){
- s->mv_dir = 0;
- s->mv_type = MV_TYPE_16X16;
- s->mb_intra= 1;
- s->mv[0][0][0] = 0;
- s->mv[0][0][1] = 0;
- encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
- &dmin, &next_block, 0, 0);
- if(s->h263_pred || s->h263_aic){
- if(best_s.mb_intra)
- s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
- else
- ff_clean_intra_table_entries(s); //old mode?
- }
- }
-
- if((s->flags & CODEC_FLAG_QP_RD) && dmin < INT_MAX){
- if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
- const int last_qp= backup_s.qscale;
- int qpi, qp, dc[6];
- DCTELEM ac[6][16];
- const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
- static const int dquant_tab[4]={-1,1,-2,2};
-
- assert(backup_s.dquant == 0);
-
- //FIXME intra
- s->mv_dir= best_s.mv_dir;
- s->mv_type = MV_TYPE_16X16;
- s->mb_intra= best_s.mb_intra;
- s->mv[0][0][0] = best_s.mv[0][0][0];
- s->mv[0][0][1] = best_s.mv[0][0][1];
- s->mv[1][0][0] = best_s.mv[1][0][0];
- s->mv[1][0][1] = best_s.mv[1][0][1];
-
- qpi = s->pict_type == B_TYPE ? 2 : 0;
- for(; qpi<4; qpi++){
- int dquant= dquant_tab[qpi];
- qp= last_qp + dquant;
- if(qp < s->avctx->qmin || qp > s->avctx->qmax)
- continue;
- backup_s.dquant= dquant;
- if(s->mb_intra && s->dc_val[0]){
- for(i=0; i<6; i++){
- dc[i]= s->dc_val[0][ s->block_index[i] ];
- memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(DCTELEM)*16);
- }
- }
-
- encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
- &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
- if(best_s.qscale != qp){
- if(s->mb_intra && s->dc_val[0]){
- for(i=0; i<6; i++){
- s->dc_val[0][ s->block_index[i] ]= dc[i];
- memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(DCTELEM)*16);
- }
- }
- }
- }
- }
- }
- if(mb_type&CANDIDATE_MB_TYPE_DIRECT){
- int mx= s->b_direct_mv_table[xy][0];
- int my= s->b_direct_mv_table[xy][1];
-
- backup_s.dquant = 0;
- s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
- s->mb_intra= 0;
- ff_mpeg4_set_direct_mv(s, mx, my);
- encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
- &dmin, &next_block, mx, my);
- }
- if(mb_type&CANDIDATE_MB_TYPE_DIRECT0){
- backup_s.dquant = 0;
- s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
- s->mb_intra= 0;
- ff_mpeg4_set_direct_mv(s, 0, 0);
- encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
- &dmin, &next_block, 0, 0);
- }
- if(!best_s.mb_intra && s->flags2&CODEC_FLAG2_SKIP_RD){
- int coded=0;
- for(i=0; i<6; i++)
- coded |= s->block_last_index[i];
- if(coded){
- int mx,my;
- memcpy(s->mv, best_s.mv, sizeof(s->mv));
- if(best_s.mv_dir & MV_DIRECT){
- mx=my=0; //FIXME find the one we actually used
- ff_mpeg4_set_direct_mv(s, mx, my);
- }else if(best_s.mv_dir&MV_DIR_BACKWARD){
- mx= s->mv[1][0][0];
- my= s->mv[1][0][1];
- }else{
- mx= s->mv[0][0][0];
- my= s->mv[0][0][1];
- }
-
- s->mv_dir= best_s.mv_dir;
- s->mv_type = best_s.mv_type;
- s->mb_intra= 0;
-/* s->mv[0][0][0] = best_s.mv[0][0][0];
- s->mv[0][0][1] = best_s.mv[0][0][1];
- s->mv[1][0][0] = best_s.mv[1][0][0];
- s->mv[1][0][1] = best_s.mv[1][0][1];*/
- backup_s.dquant= 0;
- s->skipdct=1;
- encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
- &dmin, &next_block, mx, my);
- s->skipdct=0;
- }
- }
-
- s->current_picture.qscale_table[xy]= best_s.qscale;
-
- copy_context_after_encode(s, &best_s, -1);
-
- pb_bits_count= put_bits_count(&s->pb);
- flush_put_bits(&s->pb);
- ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
- s->pb= backup_s.pb;
-
- if(s->data_partitioning){
- pb2_bits_count= put_bits_count(&s->pb2);
- flush_put_bits(&s->pb2);
- ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
- s->pb2= backup_s.pb2;
-
- tex_pb_bits_count= put_bits_count(&s->tex_pb);
- flush_put_bits(&s->tex_pb);
- ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
- s->tex_pb= backup_s.tex_pb;
- }
- s->last_bits= put_bits_count(&s->pb);
-
- if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE)
- ff_h263_update_motion_val(s);
-
- if(next_block==0){ //FIXME 16 vs linesize16
- s->dsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16);
- s->dsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
- s->dsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
- }
-
- if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
- MPV_decode_mb(s, s->block);
- } else {
- int motion_x = 0, motion_y = 0;
- s->mv_type=MV_TYPE_16X16;
- // only one MB-Type possible
-
- switch(mb_type){
- case CANDIDATE_MB_TYPE_INTRA:
- s->mv_dir = 0;
- s->mb_intra= 1;
- motion_x= s->mv[0][0][0] = 0;
- motion_y= s->mv[0][0][1] = 0;
- break;
- case CANDIDATE_MB_TYPE_INTER:
- s->mv_dir = MV_DIR_FORWARD;
- s->mb_intra= 0;
- motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
- motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
- break;
- case CANDIDATE_MB_TYPE_INTER_I:
- s->mv_dir = MV_DIR_FORWARD;
- s->mv_type = MV_TYPE_FIELD;
- s->mb_intra= 0;
- for(i=0; i<2; i++){
- j= s->field_select[0][i] = s->p_field_select_table[i][xy];
- s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
- s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
- }
- break;
- case CANDIDATE_MB_TYPE_INTER4V:
- s->mv_dir = MV_DIR_FORWARD;
- s->mv_type = MV_TYPE_8X8;
- s->mb_intra= 0;
- for(i=0; i<4; i++){
- s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
- s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
- }
- break;
- case CANDIDATE_MB_TYPE_DIRECT:
- s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
- s->mb_intra= 0;
- motion_x=s->b_direct_mv_table[xy][0];
- motion_y=s->b_direct_mv_table[xy][1];
- ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
- break;
- case CANDIDATE_MB_TYPE_DIRECT0:
- s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
- s->mb_intra= 0;
- ff_mpeg4_set_direct_mv(s, 0, 0);
- break;
- case CANDIDATE_MB_TYPE_BIDIR:
- s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
- s->mb_intra= 0;
- s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
- s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
- s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
- s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
- break;
- case CANDIDATE_MB_TYPE_BACKWARD:
- s->mv_dir = MV_DIR_BACKWARD;
- s->mb_intra= 0;
- motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
- motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
- break;
- case CANDIDATE_MB_TYPE_FORWARD:
- s->mv_dir = MV_DIR_FORWARD;
- s->mb_intra= 0;
- motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
- motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
-// printf(" %d %d ", motion_x, motion_y);
- break;
- case CANDIDATE_MB_TYPE_FORWARD_I:
- s->mv_dir = MV_DIR_FORWARD;
- s->mv_type = MV_TYPE_FIELD;
- s->mb_intra= 0;
- for(i=0; i<2; i++){
- j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
- s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
- s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
- }
- break;
- case CANDIDATE_MB_TYPE_BACKWARD_I:
- s->mv_dir = MV_DIR_BACKWARD;
- s->mv_type = MV_TYPE_FIELD;
- s->mb_intra= 0;
- for(i=0; i<2; i++){
- j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
- s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
- s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
- }
- break;
- case CANDIDATE_MB_TYPE_BIDIR_I:
- s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
- s->mv_type = MV_TYPE_FIELD;
- s->mb_intra= 0;
- for(dir=0; dir<2; dir++){
- for(i=0; i<2; i++){
- j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
- s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
- s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
- }
- }
- break;
- default:
- av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
- }
-
- encode_mb(s, motion_x, motion_y);
-
- // RAL: Update last macroblock type
- s->last_mv_dir = s->mv_dir;
-
- if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE)
- ff_h263_update_motion_val(s);
-
- MPV_decode_mb(s, s->block);
- }
-
- /* clean the MV table in IPS frames for direct mode in B frames */
- if(s->mb_intra /* && I,P,S_TYPE */){
- s->p_mv_table[xy][0]=0;
- s->p_mv_table[xy][1]=0;
- }
-
- if(s->flags&CODEC_FLAG_PSNR){
- int w= 16;
- int h= 16;
-
- if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
- if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
-
- s->current_picture.error[0] += sse(
- s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
- s->dest[0], w, h, s->linesize);
- s->current_picture.error[1] += sse(
- s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
- s->dest[1], w>>1, h>>1, s->uvlinesize);
- s->current_picture.error[2] += sse(
- s, s->new_picture .data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
- s->dest[2], w>>1, h>>1, s->uvlinesize);
- }
- if(s->loop_filter){
- if(s->out_format == FMT_H263)
- ff_h263_loop_filter(s);
- }
-//printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_stride, put_bits_count(&s->pb));
- }
- }
-
- //not beautiful here but we must write it before flushing so it has to be here
- if (ENABLE_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == I_TYPE)
- msmpeg4_encode_ext_header(s);
-
- write_slice_end(s);
-
- /* Send the last GOB if RTP */
- if (s->avctx->rtp_callback) {
- int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
- pdif = pbBufPtr(&s->pb) - s->ptr_lastgob;
- /* Call the RTP callback to send the last GOB */
- emms_c();
- s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
- }
-
- return 0;
-}
-
-#define MERGE(field) dst->field += src->field; src->field=0
-static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
- MERGE(me.scene_change_score);
- MERGE(me.mc_mb_var_sum_temp);
- MERGE(me.mb_var_sum_temp);
-}
-
-static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
- int i;
-
- MERGE(dct_count[0]); //note, the other dct vars are not part of the context
- MERGE(dct_count[1]);
- MERGE(mv_bits);
- MERGE(i_tex_bits);
- MERGE(p_tex_bits);
- MERGE(i_count);
- MERGE(f_count);
- MERGE(b_count);
- MERGE(skip_count);
- MERGE(misc_bits);
- MERGE(error_count);
- MERGE(padding_bug_score);
- MERGE(current_picture.error[0]);
- MERGE(current_picture.error[1]);
- MERGE(current_picture.error[2]);
-
- if(dst->avctx->noise_reduction){
- for(i=0; i<64; i++){
- MERGE(dct_error_sum[0][i]);
- MERGE(dct_error_sum[1][i]);
- }
- }
-
- assert(put_bits_count(&src->pb) % 8 ==0);
- assert(put_bits_count(&dst->pb) % 8 ==0);
- ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
- flush_put_bits(&dst->pb);
-}
-
-static int estimate_qp(MpegEncContext *s, int dry_run){
- if (s->next_lambda){
- s->current_picture_ptr->quality=
- s->current_picture.quality = s->next_lambda;
- if(!dry_run) s->next_lambda= 0;
- } else if (!s->fixed_qscale) {
- s->current_picture_ptr->quality=
- s->current_picture.quality = ff_rate_estimate_qscale(s, dry_run);
- if (s->current_picture.quality < 0)
- return -1;
- }
-
- if(s->adaptive_quant){
- switch(s->codec_id){
- case CODEC_ID_MPEG4:
- ff_clean_mpeg4_qscales(s);
- break;
- case CODEC_ID_H263:
- case CODEC_ID_H263P:
- case CODEC_ID_FLV1:
- ff_clean_h263_qscales(s);
- break;
- }
-
- s->lambda= s->lambda_table[0];
- //FIXME broken
- }else
- s->lambda= s->current_picture.quality;
-//printf("%d %d\n", s->avctx->global_quality, s->current_picture.quality);
- update_qscale(s);
- return 0;
-}
-
-static int encode_picture(MpegEncContext *s, int picture_number)
-{
- int i;
- int bits;
-
- s->picture_number = picture_number;
-
- /* Reset the average MB variance */
- s->me.mb_var_sum_temp =
- s->me.mc_mb_var_sum_temp = 0;
-
- /* we need to initialize some time vars before we can encode b-frames */
- // RAL: Condition added for MPEG1VIDEO
- if (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->h263_msmpeg4))
- ff_set_mpeg4_time(s, s->picture_number); //FIXME rename and use has_b_frames or similar
-
- s->me.scene_change_score=0;
-
-// s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME ratedistoration
-
- if(s->pict_type==I_TYPE){
- if(s->msmpeg4_version >= 3) s->no_rounding=1;
- else s->no_rounding=0;
- }else if(s->pict_type!=B_TYPE){
- if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4)
- s->no_rounding ^= 1;
- }
-
- if(s->flags & CODEC_FLAG_PASS2){
- if (estimate_qp(s,1) < 0)
- return -1;
- ff_get_2pass_fcode(s);
- }else if(!(s->flags & CODEC_FLAG_QSCALE)){
- if(s->pict_type==B_TYPE)
- s->lambda= s->last_lambda_for[s->pict_type];
- else
- s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
- update_qscale(s);
- }
-
- s->mb_intra=0; //for the rate distortion & bit compare functions
- for(i=1; i<s->avctx->thread_count; i++){
- ff_update_duplicate_context(s->thread_context[i], s);
- }
-
- ff_init_me(s);
-
- /* Estimate motion for every MB */
- if(s->pict_type != I_TYPE){
- s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
- s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
- if(s->pict_type != B_TYPE && s->avctx->me_threshold==0){
- if((s->avctx->pre_me && s->last_non_b_pict_type==I_TYPE) || s->avctx->pre_me==2){
- s->avctx->execute(s->avctx, pre_estimate_motion_thread, (void**)&(s->thread_context[0]), NULL, s->avctx->thread_count);
- }
- }
-
- s->avctx->execute(s->avctx, estimate_motion_thread, (void**)&(s->thread_context[0]), NULL, s->avctx->thread_count);
- }else /* if(s->pict_type == I_TYPE) */{
- /* I-Frame */
- for(i=0; i<s->mb_stride*s->mb_height; i++)
- s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
-
- if(!s->fixed_qscale){
- /* finding spatial complexity for I-frame rate control */
- s->avctx->execute(s->avctx, mb_var_thread, (void**)&(s->thread_context[0]), NULL, s->avctx->thread_count);
- }
- }
- for(i=1; i<s->avctx->thread_count; i++){
- merge_context_after_me(s, s->thread_context[i]);
- }
- s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
- s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
- emms_c();
-
- if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == P_TYPE){
- s->pict_type= I_TYPE;
- for(i=0; i<s->mb_stride*s->mb_height; i++)
- s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
-//printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
- }
-
- if(!s->umvplus){
- if(s->pict_type==P_TYPE || s->pict_type==S_TYPE) {
- s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
-
- if(s->flags & CODEC_FLAG_INTERLACED_ME){
- int a,b;
- a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
- b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
- s->f_code= FFMAX(s->f_code, FFMAX(a,b));
- }
-
- ff_fix_long_p_mvs(s);
- ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
- if(s->flags & CODEC_FLAG_INTERLACED_ME){
- int j;
- for(i=0; i<2; i++){
- for(j=0; j<2; j++)
- ff_fix_long_mvs(s, s->p_field_select_table[i], j,
- s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
- }
- }
- }
-
- if(s->pict_type==B_TYPE){
- int a, b;
-
- a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
- b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
- s->f_code = FFMAX(a, b);
-
- a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
- b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
- s->b_code = FFMAX(a, b);
-
- ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
- ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
- ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
- ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
- if(s->flags & CODEC_FLAG_INTERLACED_ME){
- int dir, j;
- for(dir=0; dir<2; dir++){
- for(i=0; i<2; i++){
- for(j=0; j<2; j++){
- int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
- : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
- ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
- s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
- }
- }
- }
- }
- }
- }
-
- if (estimate_qp(s, 0) < 0)
- return -1;
-
- if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==I_TYPE && !(s->flags & CODEC_FLAG_QSCALE))
- s->qscale= 3; //reduce clipping problems
-
- if (s->out_format == FMT_MJPEG) {
- /* for mjpeg, we do include qscale in the matrix */
- s->intra_matrix[0] = ff_mpeg1_default_intra_matrix[0];
- for(i=1;i<64;i++){
- int j= s->dsp.idct_permutation[i];
-
- s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
- }
- convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
- s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
- s->qscale= 8;
- }
-
- //FIXME var duplication
- s->current_picture_ptr->key_frame=
- s->current_picture.key_frame= s->pict_type == I_TYPE; //FIXME pic_ptr
- s->current_picture_ptr->pict_type=
- s->current_picture.pict_type= s->pict_type;
-
- if(s->current_picture.key_frame)
- s->picture_in_gop_number=0;
-
- s->last_bits= put_bits_count(&s->pb);
- switch(s->out_format) {
- case FMT_MJPEG:
- if (ENABLE_MJPEG_ENCODER)
- ff_mjpeg_encode_picture_header(s);
- break;
- case FMT_H261:
- if (ENABLE_H261_ENCODER)
- ff_h261_encode_picture_header(s, picture_number);
- break;
- case FMT_H263:
- if (ENABLE_WMV2_ENCODER && s->codec_id == CODEC_ID_WMV2)
- ff_wmv2_encode_picture_header(s, picture_number);
- else if (ENABLE_MSMPEG4_ENCODER && s->h263_msmpeg4)
- msmpeg4_encode_picture_header(s, picture_number);
- else if (s->h263_pred)
- mpeg4_encode_picture_header(s, picture_number);
- else if (ENABLE_RV10_ENCODER && s->codec_id == CODEC_ID_RV10)
- rv10_encode_picture_header(s, picture_number);
- else if (ENABLE_RV20_ENCODER && s->codec_id == CODEC_ID_RV20)
- rv20_encode_picture_header(s, picture_number);
- else if (s->codec_id == CODEC_ID_FLV1)
- ff_flv_encode_picture_header(s, picture_number);
- else
- h263_encode_picture_header(s, picture_number);
- break;
- case FMT_MPEG1:
- mpeg1_encode_picture_header(s, picture_number);
- break;
- case FMT_H264:
- break;
- default:
- assert(0);
- }
- bits= put_bits_count(&s->pb);
- s->header_bits= bits - s->last_bits;
-
- for(i=1; i<s->avctx->thread_count; i++){
- update_duplicate_context_after_me(s->thread_context[i], s);
- }
- s->avctx->execute(s->avctx, encode_thread, (void**)&(s->thread_context[0]), NULL, s->avctx->thread_count);
- for(i=1; i<s->avctx->thread_count; i++){
- merge_context_after_encode(s, s->thread_context[i]);
- }
- emms_c();
- return 0;
-}
-
-static void denoise_dct_c(MpegEncContext *s, DCTELEM *block){
- const int intra= s->mb_intra;
- int i;
-
- s->dct_count[intra]++;
-
- for(i=0; i<64; i++){
- int level= block[i];
-
- if(level){
- if(level>0){
- s->dct_error_sum[intra][i] += level;
- level -= s->dct_offset[intra][i];
- if(level<0) level=0;
- }else{
- s->dct_error_sum[intra][i] -= level;
- level += s->dct_offset[intra][i];
- if(level>0) level=0;
- }
- block[i]= level;
- }
- }
-}
-
-static int dct_quantize_trellis_c(MpegEncContext *s,
- DCTELEM *block, int n,
- int qscale, int *overflow){
- const int *qmat;
- const uint8_t *scantable= s->intra_scantable.scantable;
- const uint8_t *perm_scantable= s->intra_scantable.permutated;
- int max=0;
- unsigned int threshold1, threshold2;
- int bias=0;
- int run_tab[65];
- int level_tab[65];
- int score_tab[65];
- int survivor[65];
- int survivor_count;
- int last_run=0;
- int last_level=0;
- int last_score= 0;
- int last_i;
- int coeff[2][64];
- int coeff_count[64];
- int qmul, qadd, start_i, last_non_zero, i, dc;
- const int esc_length= s->ac_esc_length;
- uint8_t * length;
- uint8_t * last_length;
- const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
-
- s->dsp.fdct (block);
-
- if(s->dct_error_sum)
- s->denoise_dct(s, block);
- qmul= qscale*16;
- qadd= ((qscale-1)|1)*8;
-
- if (s->mb_intra) {
- int q;
- if (!s->h263_aic) {
- if (n < 4)
- q = s->y_dc_scale;
- else
- q = s->c_dc_scale;
- q = q << 3;
- } else{
- /* For AIC we skip quant/dequant of INTRADC */
- q = 1 << 3;
- qadd=0;
- }
-
- /* note: block[0] is assumed to be positive */
- block[0] = (block[0] + (q >> 1)) / q;
- start_i = 1;
- last_non_zero = 0;
- qmat = s->q_intra_matrix[qscale];
- if(s->mpeg_quant || s->out_format == FMT_MPEG1)
- bias= 1<<(QMAT_SHIFT-1);
- length = s->intra_ac_vlc_length;
- last_length= s->intra_ac_vlc_last_length;
- } else {
- start_i = 0;
- last_non_zero = -1;
- qmat = s->q_inter_matrix[qscale];
- length = s->inter_ac_vlc_length;
- last_length= s->inter_ac_vlc_last_length;
- }
- last_i= start_i;
-
- threshold1= (1<<QMAT_SHIFT) - bias - 1;
- threshold2= (threshold1<<1);
-
- for(i=63; i>=start_i; i--) {
- const int j = scantable[i];
- int level = block[j] * qmat[j];
-
- if(((unsigned)(level+threshold1))>threshold2){
- last_non_zero = i;
- break;
- }
- }
-
- for(i=start_i; i<=last_non_zero; i++) {
- const int j = scantable[i];
- int level = block[j] * qmat[j];
-
-// if( bias+level >= (1<<(QMAT_SHIFT - 3))
-// || bias-level >= (1<<(QMAT_SHIFT - 3))){
- if(((unsigned)(level+threshold1))>threshold2){
- if(level>0){
- level= (bias + level)>>QMAT_SHIFT;
- coeff[0][i]= level;
- coeff[1][i]= level-1;
-// coeff[2][k]= level-2;
- }else{
- level= (bias - level)>>QMAT_SHIFT;
- coeff[0][i]= -level;
- coeff[1][i]= -level+1;
-// coeff[2][k]= -level+2;
- }
- coeff_count[i]= FFMIN(level, 2);
- assert(coeff_count[i]);
- max |=level;
- }else{
- coeff[0][i]= (level>>31)|1;
- coeff_count[i]= 1;
- }
- }
-
- *overflow= s->max_qcoeff < max; //overflow might have happened
-
- if(last_non_zero < start_i){
- memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
- return last_non_zero;
- }
-
- score_tab[start_i]= 0;
- survivor[0]= start_i;
- survivor_count= 1;
-
- for(i=start_i; i<=last_non_zero; i++){
- int level_index, j;
- const int dct_coeff= FFABS(block[ scantable[i] ]);
- const int zero_distoration= dct_coeff*dct_coeff;
- int best_score=256*256*256*120;
- for(level_index=0; level_index < coeff_count[i]; level_index++){
- int distoration;
- int level= coeff[level_index][i];
- const int alevel= FFABS(level);
- int unquant_coeff;
-
- assert(level);
-
- if(s->out_format == FMT_H263){
- unquant_coeff= alevel*qmul + qadd;
- }else{ //MPEG1
- j= s->dsp.idct_permutation[ scantable[i] ]; //FIXME optimize
- if(s->mb_intra){
- unquant_coeff = (int)( alevel * qscale * s->intra_matrix[j]) >> 3;
- unquant_coeff = (unquant_coeff - 1) | 1;
- }else{
- unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
- unquant_coeff = (unquant_coeff - 1) | 1;
- }
- unquant_coeff<<= 3;
- }
-
- distoration= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distoration;
- level+=64;
- if((level&(~127)) == 0){
- for(j=survivor_count-1; j>=0; j--){
- int run= i - survivor[j];
- int score= distoration + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
- score += score_tab[i-run];
-
- if(score < best_score){
- best_score= score;
- run_tab[i+1]= run;
- level_tab[i+1]= level-64;
- }
- }
-
- if(s->out_format == FMT_H263){
- for(j=survivor_count-1; j>=0; j--){
- int run= i - survivor[j];
- int score= distoration + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
- score += score_tab[i-run];
- if(score < last_score){
- last_score= score;
- last_run= run;
- last_level= level-64;
- last_i= i+1;
- }