}
ff_MPV_frame_end(s);
- av_assert0(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type);
- av_assert0(s->current_picture.f.pict_type == s->pict_type);
- assert(s->current_picture.f->pict_type == s->current_picture_ptr->f->pict_type);
- assert(s->current_picture.f->pict_type == s->pict_type);
++ av_assert0(s->current_picture.f->pict_type == s->current_picture_ptr->f->pict_type);
++ av_assert0(s->current_picture.f->pict_type == s->pict_type);
- if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
+ if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
return ret;
- ff_print_debug_info(s, s->current_picture_ptr);
+ ff_print_debug_info(s, s->current_picture_ptr, pict);
*got_frame = 1;
if (!s->divx_packed && avctx->hwaccel)
ff_thread_finish_setup(avctx);
- av_assert1(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type);
- av_assert1(s->current_picture.f.pict_type == s->pict_type);
- assert(s->current_picture.f->pict_type ==
- s->current_picture_ptr->f->pict_type);
- assert(s->current_picture.f->pict_type == s->pict_type);
++ av_assert1(s->current_picture.f->pict_type == s->current_picture_ptr->f->pict_type);
++ av_assert1(s->current_picture.f->pict_type == s->pict_type);
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
- if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
+ if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
return ret;
- ff_print_debug_info(s, s->current_picture_ptr);
+ ff_print_debug_info(s, s->current_picture_ptr, pict);
+ ff_mpv_export_qp_table(s, pict, s->current_picture_ptr, FF_QSCALE_TYPE_MPEG1);
} else if (s->last_picture_ptr != NULL) {
- if ((ret = av_frame_ref(pict, &s->last_picture_ptr->f)) < 0)
+ if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0)
return ret;
- ff_print_debug_info(s, s->last_picture_ptr);
+ ff_print_debug_info(s, s->last_picture_ptr, pict);
+ ff_mpv_export_qp_table(s, pict, s->last_picture_ptr, FF_QSCALE_TYPE_MPEG1);
}
- if (s->last_picture_ptr || s->low_delay)
+ if (s->last_picture_ptr || s->low_delay) {
+ if ( pict->format == AV_PIX_FMT_YUV420P
+ && (s->codec_tag == AV_RL32("GEOV") || s->codec_tag == AV_RL32("GEOX"))) {
+ int x, y, p;
+ av_frame_make_writable(pict);
+ for (p=0; p<3; p++) {
+ int w = FF_CEIL_RSHIFT(pict-> width, !!p);
+ int h = FF_CEIL_RSHIFT(pict->height, !!p);
+ int linesize = pict->linesize[p];
+ for (y=0; y<(h>>1); y++)
+ for (x=0; x<w; x++)
+ FFSWAP(int,
+ pict->data[p][x + y*linesize],
+ pict->data[p][x + (h-1-y)*linesize]);
+ }
+ }
*got_frame = 1;
+ }
- if (ret && (avctx->err_recognition & AV_EF_EXPLODE))
+ if (slice_ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE))
return ret;
else
return get_consumed_bytes(s, buf_size);
}else{
w->dsp.spatial_compensation[w->orient]( s->edge_emu_buffer,
s->dest[chroma],
- s->current_picture.f.linesize[!!chroma] );
+ s->current_picture.f->linesize[!!chroma] );
}
if(!zeros_only)
- s->dsp.idct_add ( s->dest[chroma],
+ w->wdsp.idct_add (s->dest[chroma],
- s->current_picture.f.linesize[!!chroma],
+ s->current_picture.f->linesize[!!chroma],
s->block[0] );
block_placed:
int mb_type=0;
Picture * const pic= &s->current_picture;
- init_ref(c, s->new_picture.f.data, s->last_picture.f.data, NULL, 16*mb_x, 16*mb_y, 0);
+ init_ref(c, s->new_picture.f->data, s->last_picture.f->data, NULL, 16*mb_x, 16*mb_y, 0);
- assert(s->quarter_sample==0 || s->quarter_sample==1);
- assert(s->linesize == c->stride);
- assert(s->uvlinesize == c->uvstride);
+ av_assert0(s->quarter_sample==0 || s->quarter_sample==1);
+ av_assert0(s->linesize == c->stride);
+ av_assert0(s->uvlinesize == c->uvstride);
c->penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_cmp);
c->sub_penalty_factor= get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_sub_cmp);
int P[10][2];
const int shift= 1+s->quarter_sample;
const int xy= mb_x + mb_y*s->mb_stride;
- init_ref(c, s->new_picture.f.data, s->last_picture.f.data, NULL, 16*mb_x, 16*mb_y, 0);
+ init_ref(c, s->new_picture.f->data, s->last_picture.f->data, NULL, 16*mb_x, 16*mb_y, 0);
- assert(s->quarter_sample==0 || s->quarter_sample==1);
+ av_assert0(s->quarter_sample==0 || s->quarter_sample==1);
c->pre_penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_pre_cmp);
c->current_mv_penalty= c->mv_penalty[s->f_code] + MAX_MV;
s->pict_type = AV_PICTURE_TYPE_P;
} else
s->pict_type = AV_PICTURE_TYPE_B;
- s->current_picture.f.pict_type = s->pict_type;
- s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
+ s->current_picture.f->pict_type = s->pict_type;
+ s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
}
+ s->mpeg_f_code[0][0] += !s->mpeg_f_code[0][0];
+ s->mpeg_f_code[0][1] += !s->mpeg_f_code[0][1];
+ s->mpeg_f_code[1][0] += !s->mpeg_f_code[1][0];
+ s->mpeg_f_code[1][1] += !s->mpeg_f_code[1][1];
+
s->intra_dc_precision = get_bits(&s->gb, 2);
s->picture_structure = get_bits(&s->gb, 2);
s->top_field_first = get_bits1(&s->gb);
ff_MPV_frame_end(s);
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
- int ret = av_frame_ref(pict, &s->current_picture_ptr->f);
+ int ret = av_frame_ref(pict, s->current_picture_ptr->f);
if (ret < 0)
return ret;
- ff_print_debug_info(s, s->current_picture_ptr);
+ ff_print_debug_info(s, s->current_picture_ptr, pict);
+ ff_mpv_export_qp_table(s, pict, s->current_picture_ptr, FF_QSCALE_TYPE_MPEG2);
} else {
if (avctx->active_thread_type & FF_THREAD_FRAME)
s->picture_number++;
/* latency of 1 frame for I- and P-frames */
/* XXX: use another variable than picture_number */
if (s->last_picture_ptr != NULL) {
- int ret = av_frame_ref(pict, &s->last_picture_ptr->f);
+ int ret = av_frame_ref(pict, s->last_picture_ptr->f);
if (ret < 0)
return ret;
- ff_print_debug_info(s, s->last_picture_ptr);
+ ff_print_debug_info(s, s->last_picture_ptr, pict);
+ ff_mpv_export_qp_table(s, pict, s->last_picture_ptr, FF_QSCALE_TYPE_MPEG2);
}
}
/* time code: we must convert from the real frame rate to a
* fake MPEG frame rate in case of low frame rate */
fps = (framerate.num + framerate.den / 2) / framerate.den;
- time_code = s->current_picture_ptr->f.coded_picture_number +
+ time_code = s->current_picture_ptr->f->coded_picture_number +
s->avctx->timecode_frame_start;
- s->gop_picture_number = s->current_picture_ptr->f.coded_picture_number;
+ s->gop_picture_number = s->current_picture_ptr->f->coded_picture_number;
- if (s->drop_frame_timecode) {
- /* only works for NTSC 29.97 */
- int d = time_code / 17982;
- int m = time_code % 17982;
- /* not needed since -2,-1 / 1798 in C returns 0 */
- // if (m < 2)
- // m += 2;
- time_code += 18 * d + 2 * ((m - 2) / 1798);
- }
+
+ av_assert0(s->drop_frame_timecode == !!(s->tc.flags & AV_TIMECODE_FLAG_DROPFRAME));
+ if (s->drop_frame_timecode)
+ time_code = av_timecode_adjust_ntsc_framenum2(time_code, fps);
+
put_bits(&s->pb, 5, (uint32_t)((time_code / (fps * 3600)) % 24));
put_bits(&s->pb, 6, (uint32_t)((time_code / (fps * 60)) % 60));
put_bits(&s->pb, 1, 1);
x = s->mb_x * 16;
y = s->mb_y * 16;
- if (x + 16 > s->width)
- x = s->width - 16;
- if (y + 16 > s->height)
- y = s->height - 16;
offset = x + y * s->linesize;
- p_pic = s->new_picture.f.data[0] + offset;
+ p_pic = s->new_picture.f->data[0] + offset;
s->mb_skipped = 1;
for (i = 0; i < s->max_b_frames; i++) {
int diff;
Picture *pic = s->reordered_input_picture[i + 1];
- if (!pic || pic->f.pict_type != AV_PICTURE_TYPE_B)
+ if (!pic || pic->f->pict_type != AV_PICTURE_TYPE_B)
break;
- b_pic = pic->f.data[0] + offset;
+ b_pic = pic->f->data[0] + offset;
if (!pic->shared)
b_pic += INPLACE_OFFSET;
- diff = s->dsp.sad[0](NULL, p_pic, b_pic, s->linesize, 16);
+
+ if (x + 16 > s->width || y + 16 > s->height) {
+ int x1, y1;
+ int xe = FFMIN(16, s->width - x);
+ int ye = FFMIN(16, s->height - y);
+ diff = 0;
+ for (y1 = 0; y1 < ye; y1++) {
+ for (x1 = 0; x1 < xe; x1++) {
+ diff += FFABS(p_pic[x1 + y1 * s->linesize] - b_pic[x1 + y1 * s->linesize]);
+ }
+ }
+ diff = diff * 256 / (xe * ye);
+ } else {
+ diff = s->dsp.sad[0](NULL, p_pic, b_pic, s->linesize, 16);
+ }
if (diff > s->qscale * 70) { // FIXME check that 70 is optimal
s->mb_skipped = 0;
break;
put_bits(&s->pb, 16, 0);
put_bits(&s->pb, 16, GOP_STARTCODE);
- time = s->current_picture_ptr->f.pts;
+ time = s->current_picture_ptr->f->pts;
if (s->reordered_input_picture[1])
- time = FFMIN(time, s->reordered_input_picture[1]->f.pts);
+ time = FFMIN(time, s->reordered_input_picture[1]->f->pts);
time = time * s->avctx->time_base.num;
+ s->last_time_base = FFUDIV(time, s->avctx->time_base.den);
- seconds = time / s->avctx->time_base.den;
- minutes = seconds / 60;
- seconds %= 60;
- hours = minutes / 60;
- minutes %= 60;
- hours %= 24;
+ seconds = FFUDIV(time, s->avctx->time_base.den);
+ minutes = FFUDIV(seconds, 60); seconds = FFUMOD(seconds, 60);
+ hours = FFUDIV(minutes, 60); minutes = FFUMOD(minutes, 60);
+ hours = FFUMOD(hours , 24);
put_bits(&s->pb, 5, hours);
put_bits(&s->pb, 6, minutes);
s->dsp.clear_blocks(s->block[0]);
- s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
- s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
- s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
+ s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
+ s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
+ s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
- assert(ref == 0);
+ if (ref)
+ av_log(s->avctx, AV_LOG_DEBUG, "Interlaced error concealment is not fully implemented\n");
ff_MPV_decode_mb(s, s->block);
}
{
int i, ret;
+ if (pic->qscale_table_buf)
+ if ( pic->alloc_mb_width != s->mb_width
+ || pic->alloc_mb_height != s->mb_height)
+ ff_free_picture_tables(pic);
+
if (shared) {
- av_assert0(pic->f.data[0]);
- assert(pic->f->data[0]);
++ av_assert0(pic->f->data[0]);
pic->shared = 1;
} else {
- av_assert0(!pic->f.buf[0]);
- assert(!pic->f->buf[0]);
++ av_assert0(!pic->f->buf[0]);
if (alloc_frame_buffer(s, pic) < 0)
return -1;
s->coded_picture_number = s1->coded_picture_number;
s->picture_number = s1->picture_number;
+ av_assert0(!s->picture || s->picture != s1->picture);
+ if(s->picture)
for (i = 0; i < MAX_PICTURE_COUNT; i++) {
ff_mpeg_unref_picture(s, &s->picture[i]);
- if (s1->picture[i].f.buf[0] &&
+ if (s1->picture[i].f->buf[0] &&
(ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
return ret;
}
memset(&s->next_picture, 0, sizeof(s->next_picture));
memset(&s->last_picture, 0, sizeof(s->last_picture));
memset(&s->current_picture, 0, sizeof(s->current_picture));
- av_frame_unref(&s->next_picture.f);
- av_frame_unref(&s->last_picture.f);
- av_frame_unref(&s->current_picture.f);
+ memset(&s->new_picture, 0, sizeof(s->new_picture));
+ s->next_picture.f = av_frame_alloc();
+ if (!s->next_picture.f)
+ goto fail;
+ s->last_picture.f = av_frame_alloc();
+ if (!s->last_picture.f)
+ goto fail;
+ s->current_picture.f = av_frame_alloc();
+ if (!s->current_picture.f)
+ goto fail;
+ s->new_picture.f = av_frame_alloc();
+ if (!s->new_picture.f)
+ goto fail;
- if (s->width && s->height) {
if (init_context_frame(s))
goto fail;
static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
{
- if (pic->f.buf[0] == NULL)
+ if (pic == s->last_picture_ptr)
+ return 0;
+ if (pic->f->buf[0] == NULL)
return 1;
if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
return 1;
if (shared) {
for (i = 0; i < MAX_PICTURE_COUNT; i++) {
- if (s->picture[i].f.buf[0] == NULL && &s->picture[i] != s->last_picture_ptr)
- if (s->picture[i].f->buf[0] == NULL)
++ if (s->picture[i].f->buf[0] == NULL && &s->picture[i] != s->last_picture_ptr)
return i;
}
} else {
int h_chroma_shift, v_chroma_shift;
av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
&h_chroma_shift, &v_chroma_shift);
- if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f.buf[0])
- if (s->pict_type != AV_PICTURE_TYPE_I)
++ if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
+ av_log(avctx, AV_LOG_DEBUG,
+ "allocating dummy last picture for B frame\n");
+ else if (s->pict_type != AV_PICTURE_TYPE_I)
av_log(avctx, AV_LOG_ERROR,
"warning: first frame is no keyframe\n");
else if (s->picture_structure != PICT_FRAME)
s->last_picture_ptr = &s->picture[i];
s->last_picture_ptr->reference = 3;
- s->last_picture_ptr->f.key_frame = 0;
- s->last_picture_ptr->f.pict_type = AV_PICTURE_TYPE_P;
- s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_I;
++ s->last_picture_ptr->f->key_frame = 0;
++ s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
s->last_picture_ptr = NULL;
return -1;
}
- memset(s->last_picture_ptr->f->data[0], 0,
- avctx->height * s->last_picture_ptr->f->linesize[0]);
- memset(s->last_picture_ptr->f->data[1], 0x80,
- (avctx->height >> v_chroma_shift) *
- s->last_picture_ptr->f->linesize[1]);
- memset(s->last_picture_ptr->f->data[2], 0x80,
- (avctx->height >> v_chroma_shift) *
- s->last_picture_ptr->f->linesize[2]);
+ if (!avctx->hwaccel) {
+ for(i=0; i<avctx->height; i++)
- memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i,
++ memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
+ 0x80, avctx->width);
+ for(i=0; i<FF_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
- memset(s->last_picture_ptr->f.data[1] + s->last_picture_ptr->f.linesize[1]*i,
++ memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
+ 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
- memset(s->last_picture_ptr->f.data[2] + s->last_picture_ptr->f.linesize[2]*i,
++ memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
+ 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
+ }
+
+ if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
+ for(i=0; i<avctx->height; i++)
- memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
++ memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
+ }
+ }
ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
s->next_picture_ptr = &s->picture[i];
s->next_picture_ptr->reference = 3;
- s->next_picture_ptr->f.key_frame = 0;
- s->next_picture_ptr->f.pict_type = AV_PICTURE_TYPE_P;
- s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_I;
++ s->next_picture_ptr->f->key_frame = 0;
++ s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
s->next_picture_ptr = NULL;
ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
}
- memset(s->last_picture.f.data, 0, sizeof(s->last_picture.f.data));
- memset(s->next_picture.f.data, 0, sizeof(s->next_picture.f.data));
+#if 0 // BUFREF-FIXME
++ memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
++ memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
+#endif
if (s->last_picture_ptr) {
ff_mpeg_unref_picture(s, &s->last_picture);
- if (s->last_picture_ptr->f.buf[0] &&
+ if (s->last_picture_ptr->f->buf[0] &&
(ret = ff_mpeg_ref_picture(s, &s->last_picture,
s->last_picture_ptr)) < 0)
return ret;
return ret;
}
- if (s->pict_type != AV_PICTURE_TYPE_I &&
- !(s->last_picture_ptr && s->last_picture_ptr->f->buf[0])) {
- av_log(s, AV_LOG_ERROR,
- "Non-reference picture received and no reference available\n");
- return AVERROR_INVALIDDATA;
- }
+ av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
- s->last_picture_ptr->f.buf[0]));
++ s->last_picture_ptr->f->buf[0]));
if (s->picture_structure!= PICT_FRAME) {
int i;
if (IS_INTERLACED(mb_type))
- av_log(s->avctx, AV_LOG_DEBUG, "=");
+ av_log(avctx, AV_LOG_DEBUG, "=");
else
- av_log(s->avctx, AV_LOG_DEBUG, " ");
+ av_log(avctx, AV_LOG_DEBUG, " ");
+ }
+ }
+ av_log(avctx, AV_LOG_DEBUG, "\n");
+ }
+ }
+
+ if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
+ (avctx->debug_mv)) {
+ const int shift = 1 + quarter_sample;
+ int mb_y;
+ uint8_t *ptr;
+ int i;
+ int h_chroma_shift, v_chroma_shift, block_height;
+ const int width = avctx->width;
+ const int height = avctx->height;
+ const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
+ const int mv_stride = (mb_width << mv_sample_log2) +
+ (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
+
+ *low_delay = 0; // needed to see the vectors without trashing the buffers
+
+ avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
+
+ av_frame_make_writable(pict);
+
+ pict->opaque = NULL;
+ ptr = pict->data[0];
+ block_height = 16 >> v_chroma_shift;
+
+ for (mb_y = 0; mb_y < mb_height; mb_y++) {
+ int mb_x;
+ for (mb_x = 0; mb_x < mb_width; mb_x++) {
+ const int mb_index = mb_x + mb_y * mb_stride;
+ if ((avctx->debug_mv) && motion_val[0]) {
+ int type;
+ for (type = 0; type < 3; type++) {
+ int direction = 0;
+ switch (type) {
+ case 0:
+ if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
+ (pict->pict_type!= AV_PICTURE_TYPE_P))
+ continue;
+ direction = 0;
+ break;
+ case 1:
+ if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
+ (pict->pict_type!= AV_PICTURE_TYPE_B))
+ continue;
+ direction = 0;
+ break;
+ case 2:
+ if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
+ (pict->pict_type!= AV_PICTURE_TYPE_B))
+ continue;
+ direction = 1;
+ break;
+ }
+ if (!USES_LIST(mbtype_table[mb_index], direction))
+ continue;
+
+ if (IS_8X8(mbtype_table[mb_index])) {
+ int i;
+ for (i = 0; i < 4; i++) {
+ int sx = mb_x * 16 + 4 + 8 * (i & 1);
+ int sy = mb_y * 16 + 4 + 8 * (i >> 1);
+ int xy = (mb_x * 2 + (i & 1) +
+ (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
+ int mx = (motion_val[direction][xy][0] >> shift) + sx;
+ int my = (motion_val[direction][xy][1] >> shift) + sy;
+ draw_arrow(ptr, sx, sy, mx, my, width,
+ height, pict->linesize[0], 100);
+ }
+ } else if (IS_16X8(mbtype_table[mb_index])) {
+ int i;
+ for (i = 0; i < 2; i++) {
+ int sx = mb_x * 16 + 8;
+ int sy = mb_y * 16 + 4 + 8 * i;
+ int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
+ int mx = (motion_val[direction][xy][0] >> shift);
+ int my = (motion_val[direction][xy][1] >> shift);
+
+ if (IS_INTERLACED(mbtype_table[mb_index]))
+ my *= 2;
+
+ draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
+ height, pict->linesize[0], 100);
+ }
+ } else if (IS_8X16(mbtype_table[mb_index])) {
+ int i;
+ for (i = 0; i < 2; i++) {
+ int sx = mb_x * 16 + 4 + 8 * i;
+ int sy = mb_y * 16 + 8;
+ int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
+ int mx = motion_val[direction][xy][0] >> shift;
+ int my = motion_val[direction][xy][1] >> shift;
+
+ if (IS_INTERLACED(mbtype_table[mb_index]))
+ my *= 2;
+
+ draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
+ height, pict->linesize[0], 100);
+ }
+ } else {
+ int sx= mb_x * 16 + 8;
+ int sy= mb_y * 16 + 8;
+ int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
+ int mx= (motion_val[direction][xy][0]>>shift) + sx;
+ int my= (motion_val[direction][xy][1]>>shift) + sy;
+ draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100);
+ }
+ }
+ }
+ if ((avctx->debug & FF_DEBUG_VIS_QP)) {
+ uint64_t c = (qscale_table[mb_index] * 128 / 31) *
+ 0x0101010101010101ULL;
+ int y;
+ for (y = 0; y < block_height; y++) {
+ *(uint64_t *)(pict->data[1] + 8 * mb_x +
+ (block_height * mb_y + y) *
+ pict->linesize[1]) = c;
+ *(uint64_t *)(pict->data[2] + 8 * mb_x +
+ (block_height * mb_y + y) *
+ pict->linesize[2]) = c;
+ }
+ }
+ if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
+ motion_val[0]) {
+ int mb_type = mbtype_table[mb_index];
+ uint64_t u,v;
+ int y;
+#define COLOR(theta, r) \
+ u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
+ v = (int)(128 + r * sin(theta * 3.141592 / 180));
+
+
+ u = v = 128;
+ if (IS_PCM(mb_type)) {
+ COLOR(120, 48)
+ } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
+ IS_INTRA16x16(mb_type)) {
+ COLOR(30, 48)
+ } else if (IS_INTRA4x4(mb_type)) {
+ COLOR(90, 48)
+ } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
+ // COLOR(120, 48)
+ } else if (IS_DIRECT(mb_type)) {
+ COLOR(150, 48)
+ } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
+ COLOR(170, 48)
+ } else if (IS_GMC(mb_type)) {
+ COLOR(190, 48)
+ } else if (IS_SKIP(mb_type)) {
+ // COLOR(180, 48)
+ } else if (!USES_LIST(mb_type, 1)) {
+ COLOR(240, 48)
+ } else if (!USES_LIST(mb_type, 0)) {
+ COLOR(0, 48)
+ } else {
+ av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
+ COLOR(300,48)
+ }
+
+ u *= 0x0101010101010101ULL;
+ v *= 0x0101010101010101ULL;
+ for (y = 0; y < block_height; y++) {
+ *(uint64_t *)(pict->data[1] + 8 * mb_x +
+ (block_height * mb_y + y) * pict->linesize[1]) = u;
+ *(uint64_t *)(pict->data[2] + 8 * mb_x +
+ (block_height * mb_y + y) * pict->linesize[2]) = v;
+ }
+
+ // segmentation
+ if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
+ *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
+ (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
+ *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
+ (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
+ }
+ if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
+ for (y = 0; y < 16; y++)
+ pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
+ pict->linesize[0]] ^= 0x80;
+ }
+ if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
+ int dm = 1 << (mv_sample_log2 - 2);
+ for (i = 0; i < 4; i++) {
+ int sx = mb_x * 16 + 8 * (i & 1);
+ int sy = mb_y * 16 + 8 * (i >> 1);
+ int xy = (mb_x * 2 + (i & 1) +
+ (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
+ // FIXME bidir
+ int32_t *mv = (int32_t *) &motion_val[0][xy];
+ if (mv[0] != mv[dm] ||
+ mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
+ for (y = 0; y < 8; y++)
+ pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
+ if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
+ *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
+ pict->linesize[0]) ^= 0x8080808080808080ULL;
+ }
+ }
+
+ if (IS_INTERLACED(mb_type) &&
+ avctx->codec->id == AV_CODEC_ID_H264) {
+ // hmm
+ }
+ }
+ mbskip_table[mb_index] = 0;
+ }
+ }
+ }
+}
+
+void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
+{
+ ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
+ p->qscale_table, p->motion_val, &s->low_delay,
+ s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
+}
+
+int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
+{
+ AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
+ int offset = 2*s->mb_stride + 1;
+ if(!ref)
+ return AVERROR(ENOMEM);
+ av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
+ ref->size -= offset;
+ ref->data += offset;
+ return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
+}
+
+static inline int hpel_motion_lowres(MpegEncContext *s,
+ uint8_t *dest, uint8_t *src,
+ int field_based, int field_select,
+ int src_x, int src_y,
+ int width, int height, ptrdiff_t stride,
+ int h_edge_pos, int v_edge_pos,
+ int w, int h, h264_chroma_mc_func *pix_op,
+ int motion_x, int motion_y)
+{
+ const int lowres = s->avctx->lowres;
+ const int op_index = FFMIN(lowres, 3);
+ const int s_mask = (2 << lowres) - 1;
+ int emu = 0;
+ int sx, sy;
+
+ if (s->quarter_sample) {
+ motion_x /= 2;
+ motion_y /= 2;
+ }
+
+ sx = motion_x & s_mask;
+ sy = motion_y & s_mask;
+ src_x += motion_x >> lowres + 1;
+ src_y += motion_y >> lowres + 1;
+
+ src += src_y * stride + src_x;
+
+ if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
+ (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
+ s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
+ s->linesize, s->linesize,
+ w + 1, (h + 1) << field_based,
+ src_x, src_y << field_based,
+ h_edge_pos, v_edge_pos);
+ src = s->edge_emu_buffer;
+ emu = 1;
+ }
+
+ sx = (sx << 2) >> lowres;
+ sy = (sy << 2) >> lowres;
+ if (field_select)
+ src += s->linesize;
+ pix_op[op_index](dest, src, stride, h, sx, sy);
+ return emu;
+}
+
+/* apply one mpeg motion vector to the three components */
+static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
+ uint8_t *dest_y,
+ uint8_t *dest_cb,
+ uint8_t *dest_cr,
+ int field_based,
+ int bottom_field,
+ int field_select,
+ uint8_t **ref_picture,
+ h264_chroma_mc_func *pix_op,
+ int motion_x, int motion_y,
+ int h, int mb_y)
+{
+ uint8_t *ptr_y, *ptr_cb, *ptr_cr;
+ int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
+ ptrdiff_t uvlinesize, linesize;
+ const int lowres = s->avctx->lowres;
+ const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
+ const int block_s = 8>>lowres;
+ const int s_mask = (2 << lowres) - 1;
+ const int h_edge_pos = s->h_edge_pos >> lowres;
+ const int v_edge_pos = s->v_edge_pos >> lowres;
- linesize = s->current_picture.f.linesize[0] << field_based;
- uvlinesize = s->current_picture.f.linesize[1] << field_based;
++ linesize = s->current_picture.f->linesize[0] << field_based;
++ uvlinesize = s->current_picture.f->linesize[1] << field_based;
+
+ // FIXME obviously not perfect but qpel will not work in lowres anyway
+ if (s->quarter_sample) {
+ motion_x /= 2;
+ motion_y /= 2;
+ }
+
+ if(field_based){
+ motion_y += (bottom_field - field_select)*((1 << lowres)-1);
+ }
+
+ sx = motion_x & s_mask;
+ sy = motion_y & s_mask;
+ src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
+ src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
+
+ if (s->out_format == FMT_H263) {
+ uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
+ uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
+ uvsrc_x = src_x >> 1;
+ uvsrc_y = src_y >> 1;
+ } else if (s->out_format == FMT_H261) {
+ // even chroma mv's are full pel in H261
+ mx = motion_x / 4;
+ my = motion_y / 4;
+ uvsx = (2 * mx) & s_mask;
+ uvsy = (2 * my) & s_mask;
+ uvsrc_x = s->mb_x * block_s + (mx >> lowres);
+ uvsrc_y = mb_y * block_s + (my >> lowres);
+ } else {
+ if(s->chroma_y_shift){
+ mx = motion_x / 2;
+ my = motion_y / 2;
+ uvsx = mx & s_mask;
+ uvsy = my & s_mask;
+ uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
+ uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
+ } else {
+ if(s->chroma_x_shift){
+ //Chroma422
+ mx = motion_x / 2;
+ uvsx = mx & s_mask;
+ uvsy = motion_y & s_mask;
+ uvsrc_y = src_y;
+ uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
+ } else {
+ //Chroma444
+ uvsx = motion_x & s_mask;
+ uvsy = motion_y & s_mask;
+ uvsrc_x = src_x;
+ uvsrc_y = src_y;
+ }
+ }
+ }
+
+ ptr_y = ref_picture[0] + src_y * linesize + src_x;
+ ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
+ ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
+
+ if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
+ (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
+ s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
+ linesize >> field_based, linesize >> field_based,
+ 17, 17 + field_based,
+ src_x, src_y << field_based, h_edge_pos,
+ v_edge_pos);
+ ptr_y = s->edge_emu_buffer;
+ if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
+ uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
+ s->vdsp.emulated_edge_mc(uvbuf, ptr_cb,
+ uvlinesize >> field_based, uvlinesize >> field_based,
+ 9, 9 + field_based,
+ uvsrc_x, uvsrc_y << field_based,
+ h_edge_pos >> 1, v_edge_pos >> 1);
+ s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr,
+ uvlinesize >> field_based,uvlinesize >> field_based,
+ 9, 9 + field_based,
+ uvsrc_x, uvsrc_y << field_based,
+ h_edge_pos >> 1, v_edge_pos >> 1);
+ ptr_cb = uvbuf;
+ ptr_cr = uvbuf + 16;
+ }
+ }
+
- // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
++ // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
+ if (bottom_field) {
+ dest_y += s->linesize;
+ dest_cb += s->uvlinesize;
+ dest_cr += s->uvlinesize;
+ }
+
+ if (field_select) {
+ ptr_y += s->linesize;
+ ptr_cb += s->uvlinesize;
+ ptr_cr += s->uvlinesize;
+ }
+
+ sx = (sx << 2) >> lowres;
+ sy = (sy << 2) >> lowres;
+ pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
+
+ if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
+ int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
+ uvsx = (uvsx << 2) >> lowres;
+ uvsy = (uvsy << 2) >> lowres;
+ if (hc) {
+ pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
+ pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
+ }
+ }
+ // FIXME h261 lowres loop filter
+}
+
+static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
+ uint8_t *dest_cb, uint8_t *dest_cr,
+ uint8_t **ref_picture,
+ h264_chroma_mc_func * pix_op,
+ int mx, int my)
+{
+ const int lowres = s->avctx->lowres;
+ const int op_index = FFMIN(lowres, 3);
+ const int block_s = 8 >> lowres;
+ const int s_mask = (2 << lowres) - 1;
+ const int h_edge_pos = s->h_edge_pos >> lowres + 1;
+ const int v_edge_pos = s->v_edge_pos >> lowres + 1;
+ int emu = 0, src_x, src_y, sx, sy;
+ ptrdiff_t offset;
+ uint8_t *ptr;
+
+ if (s->quarter_sample) {
+ mx /= 2;
+ my /= 2;
+ }
+
+ /* In case of 8X8, we construct a single chroma motion vector
+ with a special rounding */
+ mx = ff_h263_round_chroma(mx);
+ my = ff_h263_round_chroma(my);
+
+ sx = mx & s_mask;
+ sy = my & s_mask;
+ src_x = s->mb_x * block_s + (mx >> lowres + 1);
+ src_y = s->mb_y * block_s + (my >> lowres + 1);
+
+ offset = src_y * s->uvlinesize + src_x;
+ ptr = ref_picture[1] + offset;
+ if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
+ (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
+ s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
+ s->uvlinesize, s->uvlinesize,
+ 9, 9,
+ src_x, src_y, h_edge_pos, v_edge_pos);
+ ptr = s->edge_emu_buffer;
+ emu = 1;
+ }
+ sx = (sx << 2) >> lowres;
+ sy = (sy << 2) >> lowres;
+ pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
+
+ ptr = ref_picture[2] + offset;
+ if (emu) {
+ s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
+ s->uvlinesize, s->uvlinesize,
+ 9, 9,
+ src_x, src_y, h_edge_pos, v_edge_pos);
+ ptr = s->edge_emu_buffer;
+ }
+ pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
+}
+
+/**
+ * motion compensation of a single macroblock
+ * @param s context
+ * @param dest_y luma destination pointer
+ * @param dest_cb chroma cb/u destination pointer
+ * @param dest_cr chroma cr/v destination pointer
+ * @param dir direction (0->forward, 1->backward)
+ * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
+ * @param pix_op halfpel motion compensation function (average or put normally)
+ * the motion vectors are taken from s->mv and the MV type from s->mv_type
+ */
+static inline void MPV_motion_lowres(MpegEncContext *s,
+ uint8_t *dest_y, uint8_t *dest_cb,
+ uint8_t *dest_cr,
+ int dir, uint8_t **ref_picture,
+ h264_chroma_mc_func *pix_op)
+{
+ int mx, my;
+ int mb_x, mb_y, i;
+ const int lowres = s->avctx->lowres;
+ const int block_s = 8 >>lowres;
+
+ mb_x = s->mb_x;
+ mb_y = s->mb_y;
+
+ switch (s->mv_type) {
+ case MV_TYPE_16X16:
+ mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+ 0, 0, 0,
+ ref_picture, pix_op,
+ s->mv[dir][0][0], s->mv[dir][0][1],
+ 2 * block_s, mb_y);
+ break;
+ case MV_TYPE_8X8:
+ mx = 0;
+ my = 0;
+ for (i = 0; i < 4; i++) {
+ hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
+ s->linesize) * block_s,
+ ref_picture[0], 0, 0,
+ (2 * mb_x + (i & 1)) * block_s,
+ (2 * mb_y + (i >> 1)) * block_s,
+ s->width, s->height, s->linesize,
+ s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
+ block_s, block_s, pix_op,
+ s->mv[dir][i][0], s->mv[dir][i][1]);
+
+ mx += s->mv[dir][i][0];
+ my += s->mv[dir][i][1];
+ }
+
+ if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
+ chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
+ pix_op, mx, my);
+ break;
+ case MV_TYPE_FIELD:
+ if (s->picture_structure == PICT_FRAME) {
+ /* top field */
+ mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+ 1, 0, s->field_select[dir][0],
+ ref_picture, pix_op,
+ s->mv[dir][0][0], s->mv[dir][0][1],
+ block_s, mb_y);
+ /* bottom field */
+ mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+ 1, 1, s->field_select[dir][1],
+ ref_picture, pix_op,
+ s->mv[dir][1][0], s->mv[dir][1][1],
+ block_s, mb_y);
+ } else {
+ if (s->picture_structure != s->field_select[dir][0] + 1 &&
+ s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
- ref_picture = s->current_picture_ptr->f.data;
++ ref_picture = s->current_picture_ptr->f->data;
+
+ }
+ mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+ 0, 0, s->field_select[dir][0],
+ ref_picture, pix_op,
+ s->mv[dir][0][0],
+ s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
+ }
+ break;
+ case MV_TYPE_16X8:
+ for (i = 0; i < 2; i++) {
+ uint8_t **ref2picture;
+
+ if (s->picture_structure == s->field_select[dir][i] + 1 ||
+ s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
+ ref2picture = ref_picture;
+ } else {
- ref2picture = s->current_picture_ptr->f.data;
++ ref2picture = s->current_picture_ptr->f->data;
+ }
+
+ mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+ 0, 0, s->field_select[dir][i],
+ ref2picture, pix_op,
+ s->mv[dir][i][0], s->mv[dir][i][1] +
+ 2 * block_s * i, block_s, mb_y >> 1);
+
+ dest_y += 2 * block_s * s->linesize;
+ dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
+ dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
+ }
+ break;
+ case MV_TYPE_DMV:
+ if (s->picture_structure == PICT_FRAME) {
+ for (i = 0; i < 2; i++) {
+ int j;
+ for (j = 0; j < 2; j++) {
+ mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+ 1, j, j ^ i,
+ ref_picture, pix_op,
+ s->mv[dir][2 * i + j][0],
+ s->mv[dir][2 * i + j][1],
+ block_s, mb_y);
+ }
+ pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
+ }
+ } else {
+ for (i = 0; i < 2; i++) {
+ mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+ 0, 0, s->picture_structure != i + 1,
+ ref_picture, pix_op,
+ s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
+ 2 * block_s, mb_y >> 1);
+
+ // after put we make avg of the same block
+ pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
+
+ // opposite parity is always in the same
+ // frame if this is second field
+ if (!s->first_field) {
- ref_picture = s->current_picture_ptr->f.data;
++ ref_picture = s->current_picture_ptr->f->data;
}
}
- av_log(s->avctx, AV_LOG_DEBUG, "\n");
}
+ break;
+ default:
+ av_assert2(0);
}
}
int dct_linesize, dct_offset;
op_pixels_func (*op_pix)[4];
qpel_mc_func (*op_qpix)[16];
- const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
- const int uvlinesize = s->current_picture.f.linesize[1];
+ const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
+ const int uvlinesize = s->current_picture.f->linesize[1];
- const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
- const int block_size = 8;
+ const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
+ const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
/* avoid copy if macroblock skipped in last frame too */
/* skip only during decoding as we might trash the buffers during encoding a bit */
}
}
- op_qpix= s->me.qpel_put;
- if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
- op_pix = s->hdsp.put_pixels_tab;
+ if(lowres_flag){
+ h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
+
+ if (s->mv_dir & MV_DIR_FORWARD) {
- MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
++ MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
+ op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
+ }
+ if (s->mv_dir & MV_DIR_BACKWARD) {
- MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
++ MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
+ }
}else{
- op_pix = s->hdsp.put_no_rnd_pixels_tab;
- }
- if (s->mv_dir & MV_DIR_FORWARD) {
- ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
- op_pix = s->hdsp.avg_pixels_tab;
- op_qpix= s->me.qpel_avg;
- }
- if (s->mv_dir & MV_DIR_BACKWARD) {
- ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
+ op_qpix = s->me.qpel_put;
+ if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
+ op_pix = s->hdsp.put_pixels_tab;
+ }else{
+ op_pix = s->hdsp.put_no_rnd_pixels_tab;
+ }
+ if (s->mv_dir & MV_DIR_FORWARD) {
- ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
++ ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
+ op_pix = s->hdsp.avg_pixels_tab;
+ op_qpix= s->me.qpel_avg;
+ }
+ if (s->mv_dir & MV_DIR_BACKWARD) {
- ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
++ ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
+ }
}
}
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
{
- ff_draw_horiz_band(s->avctx, &s->current_picture_ptr->f,
- &s->last_picture_ptr->f, y, h, s->picture_structure,
- ff_draw_horiz_band(s->avctx, s->current_picture.f,
- s->last_picture.f, y, h, s->picture_structure,
++ ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
++ s->last_picture_ptr ? s->last_picture_ptr->f : NULL, y, h, s->picture_structure,
s->first_field, s->low_delay);
}
void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
- const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
- const int uvlinesize = s->current_picture.f.linesize[1];
+ const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
+ const int uvlinesize = s->current_picture.f->linesize[1];
- const int mb_size= 4;
+ const int mb_size= 4 - s->avctx->lowres;
s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
* Picture.
*/
typedef struct Picture{
- struct AVFrame f;
+ struct AVFrame *f;
+ uint8_t avframe_padding[1024]; // hack to allow linking to a avutil with larger AVFrame
ThreadFrame tf;
AVBufferRef *qscale_table_buf;
if (ff_MPV_common_init(s) < 0)
return -1;
- s->avctx->coded_frame = &s->current_picture.f;
- if (ARCH_X86)
- ff_MPV_encode_init_x86(s);
-
+ s->avctx->coded_frame = s->current_picture.f;
if (s->msmpeg4_version) {
FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
int w = s->width >> h_shift;
int h = s->height >> v_shift;
uint8_t *src = pic_arg->data[i];
- uint8_t *dst = pic->f.data[i];
+ uint8_t *dst = pic->f->data[i];
+ if (s->codec_id == AV_CODEC_ID_AMV && !(s->avctx->flags & CODEC_FLAG_EMU_EDGE)) {
+ h = ((s->height + 15)/16*16) >> v_shift;
+ }
+
if (!s->avctx->rc_buffer_size)
dst += INPLACE_OFFSET;
for (y = 0; y < s->mb_height * bw; y++) {
for (x = 0; x < s->mb_width * bw; x++) {
int off = p->shared ? 0 : 16;
- uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
- uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
+ uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
+ uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
int v = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
- switch (s->avctx->frame_skip_exp) {
+ switch (FFABS(s->avctx->frame_skip_exp)) {
case 0: score = FFMAX(score, v); break;
case 1: score += FFABS(v); break;
- case 2: score += v * v; break;
- case 3: score64 += FFABS(v * v * (int64_t)v); break;
- case 4: score64 += v * v * (int64_t)(v * v); break;
+ case 2: score64 += v * (int64_t)v; break;
+ case 3: score64 += FFABS(v * (int64_t)v * v); break;
+ case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
}
}
}
/* set next picture type & ordering */
if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
- av_frame_unref(&s->input_picture[0]->f);
+ if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
+ if (s->picture_in_gop_number < s->gop_size &&
+ s->next_picture_ptr &&
+ skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
+ // FIXME check that te gop check above is +-1 correct
++ av_frame_unref(s->input_picture[0]->f);
+
+ ff_vbv_update(s, 0);
+
+ goto no_output_pic;
+ }
+ }
+
if (/*s->picture_in_gop_number >= s->gop_size ||*/
s->next_picture_ptr == NULL || s->intra_only) {
s->reordered_input_picture[0] = s->input_picture[0];
} else {
int b_frames;
- if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
- if (s->picture_in_gop_number < s->gop_size &&
- skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
- // FIXME check that te gop check above is +-1 correct
- av_frame_unref(s->input_picture[0]->f);
-
- emms_c();
- ff_vbv_update(s, 0);
-
- goto no_output_pic;
- }
- }
-
if (s->flags & CODEC_FLAG_PASS2) {
for (i = 0; i < s->max_b_frames + 1; i++) {
- int pict_num = s->input_picture[0]->f.display_picture_number + i;
+ int pict_num = s->input_picture[0]->f->display_picture_number + i;
if (pict_num >= s->rc_context.num_entries)
break;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
int hshift = desc->log2_chroma_w;
int vshift = desc->log2_chroma_h;
- s->dsp.draw_edges(s->current_picture.f.data[0], s->current_picture.f.linesize[0],
- s->dsp.draw_edges(s->current_picture.f->data[0], s->linesize,
++ s->dsp.draw_edges(s->current_picture.f->data[0], s->current_picture.f->linesize[0],
s->h_edge_pos, s->v_edge_pos,
EDGE_WIDTH, EDGE_WIDTH,
EDGE_TOP | EDGE_BOTTOM);
- s->dsp.draw_edges(s->current_picture.f.data[1], s->current_picture.f.linesize[1],
- s->dsp.draw_edges(s->current_picture.f->data[1], s->uvlinesize,
++ s->dsp.draw_edges(s->current_picture.f->data[1], s->current_picture.f->linesize[1],
s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
EDGE_TOP | EDGE_BOTTOM);
- s->dsp.draw_edges(s->current_picture.f.data[2], s->current_picture.f.linesize[2],
- s->dsp.draw_edges(s->current_picture.f->data[2], s->uvlinesize,
++ s->dsp.draw_edges(s->current_picture.f->data[2], s->current_picture.f->linesize[2],
s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
EDGE_TOP | EDGE_BOTTOM);
if (s->pict_type!= AV_PICTURE_TYPE_B)
s->last_non_b_pict_type = s->pict_type;
- s->avctx->coded_frame = &s->current_picture_ptr->f;
- if (s->encoding) {
- /* release non-reference frames */
- for (i = 0; i < MAX_PICTURE_COUNT; i++) {
- if (!s->picture[i].reference)
- ff_mpeg_unref_picture(s, &s->picture[i]);
- }
- }
-
+ s->avctx->coded_frame = s->current_picture_ptr->f;
}
}
/* output? */
- if (s->new_picture.f.data[0]) {
+ if (s->new_picture.f->data[0]) {
- if (!pkt->data &&
- (ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*MAX_MB_BYTES)) < 0)
+ if ((ret = ff_alloc_packet2(avctx, pkt, s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000)) < 0)
return ret;
if (s->mb_info) {
s->mb_info_ptr = av_packet_new_side_data(pkt,
s->total_bits += s->frame_bits;
avctx->frame_bits = s->frame_bits;
- pkt->pts = s->current_picture.f.pts;
+ pkt->pts = s->current_picture.f->pts;
- if (!s->low_delay) {
+ if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
- if (!s->current_picture.f.coded_picture_number)
+ if (!s->current_picture.f->coded_picture_number)
pkt->dts = pkt->pts - s->dts_delta;
else
pkt->dts = s->reordered_pts;
- s->reordered_pts = s->input_picture[0]->f->pts;
+ s->reordered_pts = pkt->pts;
} else
pkt->dts = pkt->pts;
- if (s->current_picture.f.key_frame)
+ if (s->current_picture.f->key_frame)
pkt->flags |= AV_PKT_FLAG_KEY;
if (s->mb_info)
av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
wrap_y = s->linesize;
wrap_c = s->uvlinesize;
- ptr_y = s->new_picture.f.data[0] +
+ ptr_y = s->new_picture.f->data[0] +
(mb_y * 16 * wrap_y) + mb_x * 16;
- ptr_cb = s->new_picture.f.data[1] +
+ ptr_cb = s->new_picture.f->data[1] +
- (mb_y * mb_block_height * wrap_c) + mb_x * 8;
+ (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
- ptr_cr = s->new_picture.f.data[2] +
+ ptr_cr = s->new_picture.f->data[2] +
- (mb_y * mb_block_height * wrap_c) + mb_x * 8;
+ (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
- if (mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) {
+ if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
uint8_t *ebuf = s->edge_emu_buffer + 32;
+ int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
+ int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
s->vdsp.emulated_edge_mc(ebuf, ptr_y,
wrap_y, wrap_y,
16, 16, mb_x * 16, mb_y * 16,
/* note: quant matrix value (8) is implied here */
s->last_dc[i] = 128 << s->intra_dc_precision;
- s->current_picture.f.error[i] = 0;
+ s->current_picture.f->error[i] = 0;
}
+ if(s->codec_id==AV_CODEC_ID_AMV){
+ s->last_dc[0] = 128*8/13;
+ s->last_dc[1] = 128*8/14;
+ s->last_dc[2] = 128*8/14;
+ }
s->mb_skip_run = 0;
memset(s->last_mv, 0, sizeof(s->last_mv));
}
//FIXME var duplication
- s->current_picture_ptr->f.key_frame =
- s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
- s->current_picture_ptr->f.pict_type =
- s->current_picture.f.pict_type = s->pict_type;
+ s->current_picture_ptr->f->key_frame =
+ s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
+ s->current_picture_ptr->f->pict_type =
+ s->current_picture.f->pict_type = s->pict_type;
- if (s->current_picture.f.key_frame)
+ if (s->current_picture.f->key_frame)
s->picture_in_gop_number=0;
+ s->mb_x = s->mb_y = 0;
s->last_bits= put_bits_count(&s->pb);
switch(s->out_format) {
case FMT_MJPEG:
s->mv[dir][1][0], s->mv[dir][1][1], 8, mb_y);
}
} else {
- if (s->picture_structure != s->field_select[dir][0] + 1 &&
- s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
+ if ( s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field
+ || !ref_picture[0]) {
- ref_picture = s->current_picture_ptr->f.data;
+ ref_picture = s->current_picture_ptr->f->data;
}
mpeg_motion(s, dest_y, dest_cb, dest_cr,
for (i = 0; i < 2; i++) {
uint8_t **ref2picture;
- if (s->picture_structure == s->field_select[dir][i] + 1
- || s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
+ if ((s->picture_structure == s->field_select[dir][i] + 1
+ || s->pict_type == AV_PICTURE_TYPE_B || s->first_field) && ref_picture[0]) {
ref2picture = ref_picture;
} else {
- ref2picture = s->current_picture_ptr->f.data;
+ ref2picture = s->current_picture_ptr->f->data;
}
mpeg_motion(s, dest_y, dest_cb, dest_cr,
pix_op = s->hdsp.avg_pixels_tab;
}
} else {
- ref_picture = s->current_picture_ptr->f.data;
+ if (!ref_picture[0]) {
++ ref_picture = s->current_picture_ptr->f->data;
+ }
for (i = 0; i < 2; i++) {
mpeg_motion(s, dest_y, dest_cb, dest_cr,
s->picture_structure != i + 1,
* This function should be called for every new field and/or frame.
* It should be safe to call the function a few times for the same field.
*/
-int ff_xvmc_field_start(MpegEncContext *s, AVCodecContext *avctx)
+static int ff_xvmc_field_start(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
{
- struct xvmc_pix_fmt *last, *next, *render = (struct xvmc_pix_fmt*)s->current_picture.f.data[2];
+ struct MpegEncContext *s = avctx->priv_data;
+ struct xvmc_pix_fmt *last, *next, *render = (struct xvmc_pix_fmt*)s->current_picture.f->data[2];
const int mb_block_count = 4 + (1 << s->chroma_format);
assert(avctx);
* some leftover blocks, for example from error_resilience(), may remain.
* It should be safe to call the function a few times for the same field.
*/
-void ff_xvmc_field_end(MpegEncContext *s)
+static int ff_xvmc_field_end(AVCodecContext *avctx)
{
- struct xvmc_pix_fmt *render = (struct xvmc_pix_fmt*)s->current_picture.f.data[2];
+ struct MpegEncContext *s = avctx->priv_data;
+ struct xvmc_pix_fmt *render = (struct xvmc_pix_fmt*)s->current_picture.f->data[2];
assert(render);
if (render->filled_mv_blocks_num > 0)
*dir_ptr = 0;
}
}else{
+ int bs = 8 >> s->avctx->lowres;
if(n<4){
wrap= s->linesize;
- dest= s->current_picture.f.data[0] + (((n >> 1) + 2*s->mb_y) * bs* wrap ) + ((n & 1) + 2*s->mb_x) * bs;
- dest= s->current_picture.f->data[0] + (((n >> 1) + 2*s->mb_y) * 8* wrap ) + ((n & 1) + 2*s->mb_x) * 8;
++ dest= s->current_picture.f->data[0] + (((n >> 1) + 2*s->mb_y) * bs* wrap ) + ((n & 1) + 2*s->mb_x) * bs;
}else{
wrap= s->uvlinesize;
- dest= s->current_picture.f.data[n - 3] + (s->mb_y * bs * wrap) + s->mb_x * bs;
- dest= s->current_picture.f->data[n - 3] + (s->mb_y * 8 * wrap) + s->mb_x * 8;
++ dest= s->current_picture.f->data[n - 3] + (s->mb_y * bs * wrap) + s->mb_x * bs;
}
if(s->mb_x==0) a= (1024 + (scale>>1))/scale;
- else a= get_dc(dest-8, wrap, scale*8);
+ else a= get_dc(dest-bs, wrap, scale*8>>(2*s->avctx->lowres), bs);
if(s->mb_y==0) c= (1024 + (scale>>1))/scale;
- else c= get_dc(dest-8*wrap, wrap, scale*8);
+ else c= get_dc(dest-bs*wrap, wrap, scale*8>>(2*s->avctx->lowres), bs);
if (s->h263_aic_dir==0) {
pred= a;
{
snprintf(s->avctx->stats_out, 256,
"in:%d out:%d type:%d q:%d itex:%d ptex:%d mv:%d misc:%d "
- "fcode:%d bcode:%d mc-var:%d var:%d icount:%d skipcount:%d hbits:%d;\n",
+ "fcode:%d bcode:%d mc-var:%"PRId64" var:%"PRId64" icount:%d skipcount:%d hbits:%d;\n",
- s->current_picture_ptr->f.display_picture_number,
- s->current_picture_ptr->f.coded_picture_number,
+ s->current_picture_ptr->f->display_picture_number,
+ s->current_picture_ptr->f->coded_picture_number,
s->pict_type,
- s->current_picture.f.quality,
+ s->current_picture.f->quality,
s->i_tex_bits,
s->p_tex_bits,
s->mv_bits,
ff_MPV_frame_end(s);
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
- if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
+ if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
return ret;
- ff_print_debug_info(s, s->current_picture_ptr);
+ ff_print_debug_info(s, s->current_picture_ptr, pict);
+ ff_mpv_export_qp_table(s, pict, s->current_picture_ptr, FF_QSCALE_TYPE_MPEG1);
} else if (s->last_picture_ptr != NULL) {
- if ((ret = av_frame_ref(pict, &s->last_picture_ptr->f)) < 0)
+ if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0)
return ret;
- ff_print_debug_info(s, s->last_picture_ptr);
+ ff_print_debug_info(s, s->last_picture_ptr, pict);
+ ff_mpv_export_qp_table(s, pict,s->last_picture_ptr, FF_QSCALE_TYPE_MPEG1);
}
if (s->last_picture_ptr || s->low_delay) {
ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
- if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
+ if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
return ret;
- ff_print_debug_info(s, s->current_picture_ptr);
+ ff_print_debug_info(s, s->current_picture_ptr, pict);
+ ff_mpv_export_qp_table(s, pict, s->current_picture_ptr, FF_QSCALE_TYPE_MPEG1);
got_picture = 1;
} else if (s->last_picture_ptr != NULL) {
- if ((ret = av_frame_ref(pict, &s->last_picture_ptr->f)) < 0)
+ if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0)
return ret;
- ff_print_debug_info(s, s->last_picture_ptr);
+ ff_print_debug_info(s, s->last_picture_ptr, pict);
+ ff_mpv_export_qp_table(s, pict, s->last_picture_ptr, FF_QSCALE_TYPE_MPEG1);
got_picture = 1;
}
--- /dev/null
- s->m.last_picture.f.pts = s->m.current_picture.f.pts;
- s->m.current_picture.f.pts = pict->pts;
+/*
+ * Copyright (C) 2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/intmath.h"
+#include "libavutil/log.h"
+#include "libavutil/opt.h"
+#include "avcodec.h"
+#include "dsputil.h"
+#include "internal.h"
+#include "snow_dwt.h"
+#include "snow.h"
+
+#include "rangecoder.h"
+#include "mathops.h"
+
+#include "mpegvideo.h"
+#include "h263.h"
+
+static av_cold int encode_init(AVCodecContext *avctx)
+{
+ SnowContext *s = avctx->priv_data;
+ int plane_index, ret;
+
+ if(avctx->prediction_method == DWT_97
+ && (avctx->flags & CODEC_FLAG_QSCALE)
+ && avctx->global_quality == 0){
+ av_log(avctx, AV_LOG_ERROR, "The 9/7 wavelet is incompatible with lossless mode.\n");
+ return -1;
+ }
+
+ s->spatial_decomposition_type= avctx->prediction_method; //FIXME add decorrelator type r transform_type
+
+ s->mv_scale = (avctx->flags & CODEC_FLAG_QPEL) ? 2 : 4;
+ s->block_max_depth= (avctx->flags & CODEC_FLAG_4MV ) ? 1 : 0;
+
+ for(plane_index=0; plane_index<3; plane_index++){
+ s->plane[plane_index].diag_mc= 1;
+ s->plane[plane_index].htaps= 6;
+ s->plane[plane_index].hcoeff[0]= 40;
+ s->plane[plane_index].hcoeff[1]= -10;
+ s->plane[plane_index].hcoeff[2]= 2;
+ s->plane[plane_index].fast_mc= 1;
+ }
+
+ if ((ret = ff_snow_common_init(avctx)) < 0) {
+ ff_snow_common_end(avctx->priv_data);
+ return ret;
+ }
+ ff_snow_alloc_blocks(s);
+
+ s->version=0;
+
+ s->m.avctx = avctx;
+ s->m.flags = avctx->flags;
+ s->m.bit_rate= avctx->bit_rate;
+
+ s->m.me.temp =
+ s->m.me.scratchpad= av_mallocz((avctx->width+64)*2*16*2*sizeof(uint8_t));
+ s->m.me.map = av_mallocz(ME_MAP_SIZE*sizeof(uint32_t));
+ s->m.me.score_map = av_mallocz(ME_MAP_SIZE*sizeof(uint32_t));
+ s->m.obmc_scratchpad= av_mallocz(MB_SIZE*MB_SIZE*12*sizeof(uint32_t));
+ if (!s->m.me.scratchpad || !s->m.me.map || !s->m.me.score_map || !s->m.obmc_scratchpad)
+ return AVERROR(ENOMEM);
+
+ ff_h263_encode_init(&s->m); //mv_penalty
+
+ s->max_ref_frames = FFMAX(FFMIN(avctx->refs, MAX_REF_FRAMES), 1);
+
+ if(avctx->flags&CODEC_FLAG_PASS1){
+ if(!avctx->stats_out)
+ avctx->stats_out = av_mallocz(256);
+
+ if (!avctx->stats_out)
+ return AVERROR(ENOMEM);
+ }
+ if((avctx->flags&CODEC_FLAG_PASS2) || !(avctx->flags&CODEC_FLAG_QSCALE)){
+ if(ff_rate_control_init(&s->m) < 0)
+ return -1;
+ }
+ s->pass1_rc= !(avctx->flags & (CODEC_FLAG_QSCALE|CODEC_FLAG_PASS2));
+
+ switch(avctx->pix_fmt){
+ case AV_PIX_FMT_YUV444P:
+// case AV_PIX_FMT_YUV422P:
+ case AV_PIX_FMT_YUV420P:
+// case AV_PIX_FMT_YUV411P:
+ case AV_PIX_FMT_YUV410P:
+ s->nb_planes = 3;
+ s->colorspace_type= 0;
+ break;
+ case AV_PIX_FMT_GRAY8:
+ s->nb_planes = 1;
+ s->colorspace_type = 1;
+ break;
+/* case AV_PIX_FMT_RGB32:
+ s->colorspace= 1;
+ break;*/
+ default:
+ av_log(avctx, AV_LOG_ERROR, "pixel format not supported\n");
+ return -1;
+ }
+ avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift, &s->chroma_v_shift);
+
+ ff_set_cmp(&s->dsp, s->dsp.me_cmp, s->avctx->me_cmp);
+ ff_set_cmp(&s->dsp, s->dsp.me_sub_cmp, s->avctx->me_sub_cmp);
+
+ s->input_picture = av_frame_alloc();
+ if (!s->input_picture)
+ return AVERROR(ENOMEM);
+ if ((ret = ff_get_buffer(s->avctx, s->input_picture, AV_GET_BUFFER_FLAG_REF)) < 0)
+ return ret;
+
+ if(s->avctx->me_method == ME_ITER){
+ int i;
+ int size= s->b_width * s->b_height << 2*s->block_max_depth;
+ for(i=0; i<s->max_ref_frames; i++){
+ s->ref_mvs[i]= av_mallocz(size*sizeof(int16_t[2]));
+ s->ref_scores[i]= av_mallocz(size*sizeof(uint32_t));
+ if (!s->ref_mvs[i] || !s->ref_scores[i])
+ return AVERROR(ENOMEM);
+ }
+ }
+
+ return 0;
+}
+
+//near copy & paste from dsputil, FIXME
+static int pix_sum(uint8_t * pix, int line_size, int w, int h)
+{
+ int s, i, j;
+
+ s = 0;
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ s += pix[0];
+ pix ++;
+ }
+ pix += line_size - w;
+ }
+ return s;
+}
+
+//near copy & paste from dsputil, FIXME
+static int pix_norm1(uint8_t * pix, int line_size, int w)
+{
+ int s, i, j;
+ uint32_t *sq = ff_square_tab + 256;
+
+ s = 0;
+ for (i = 0; i < w; i++) {
+ for (j = 0; j < w; j ++) {
+ s += sq[pix[0]];
+ pix ++;
+ }
+ pix += line_size - w;
+ }
+ return s;
+}
+
+static inline int get_penalty_factor(int lambda, int lambda2, int type){
+ switch(type&0xFF){
+ default:
+ case FF_CMP_SAD:
+ return lambda>>FF_LAMBDA_SHIFT;
+ case FF_CMP_DCT:
+ return (3*lambda)>>(FF_LAMBDA_SHIFT+1);
+ case FF_CMP_W53:
+ return (4*lambda)>>(FF_LAMBDA_SHIFT);
+ case FF_CMP_W97:
+ return (2*lambda)>>(FF_LAMBDA_SHIFT);
+ case FF_CMP_SATD:
+ case FF_CMP_DCT264:
+ return (2*lambda)>>FF_LAMBDA_SHIFT;
+ case FF_CMP_RD:
+ case FF_CMP_PSNR:
+ case FF_CMP_SSE:
+ case FF_CMP_NSSE:
+ return lambda2>>FF_LAMBDA_SHIFT;
+ case FF_CMP_BIT:
+ return 1;
+ }
+}
+
+//FIXME copy&paste
+#define P_LEFT P[1]
+#define P_TOP P[2]
+#define P_TOPRIGHT P[3]
+#define P_MEDIAN P[4]
+#define P_MV1 P[9]
+#define FLAG_QPEL 1 //must be 1
+
+static int encode_q_branch(SnowContext *s, int level, int x, int y){
+ uint8_t p_buffer[1024];
+ uint8_t i_buffer[1024];
+ uint8_t p_state[sizeof(s->block_state)];
+ uint8_t i_state[sizeof(s->block_state)];
+ RangeCoder pc, ic;
+ uint8_t *pbbak= s->c.bytestream;
+ uint8_t *pbbak_start= s->c.bytestream_start;
+ int score, score2, iscore, i_len, p_len, block_s, sum, base_bits;
+ const int w= s->b_width << s->block_max_depth;
+ const int h= s->b_height << s->block_max_depth;
+ const int rem_depth= s->block_max_depth - level;
+ const int index= (x + y*w) << rem_depth;
+ const int block_w= 1<<(LOG2_MB_SIZE - level);
+ int trx= (x+1)<<rem_depth;
+ int try= (y+1)<<rem_depth;
+ const BlockNode *left = x ? &s->block[index-1] : &null_block;
+ const BlockNode *top = y ? &s->block[index-w] : &null_block;
+ const BlockNode *right = trx<w ? &s->block[index+1] : &null_block;
+ const BlockNode *bottom= try<h ? &s->block[index+w] : &null_block;
+ const BlockNode *tl = y && x ? &s->block[index-w-1] : left;
+ const BlockNode *tr = y && trx<w && ((x&1)==0 || level==0) ? &s->block[index-w+(1<<rem_depth)] : tl; //FIXME use lt
+ int pl = left->color[0];
+ int pcb= left->color[1];
+ int pcr= left->color[2];
+ int pmx, pmy;
+ int mx=0, my=0;
+ int l,cr,cb;
+ const int stride= s->current_picture->linesize[0];
+ const int uvstride= s->current_picture->linesize[1];
+ uint8_t *current_data[3]= { s->input_picture->data[0] + (x + y* stride)*block_w,
+ s->input_picture->data[1] + ((x*block_w)>>s->chroma_h_shift) + ((y*uvstride*block_w)>>s->chroma_v_shift),
+ s->input_picture->data[2] + ((x*block_w)>>s->chroma_h_shift) + ((y*uvstride*block_w)>>s->chroma_v_shift)};
+ int P[10][2];
+ int16_t last_mv[3][2];
+ int qpel= !!(s->avctx->flags & CODEC_FLAG_QPEL); //unused
+ const int shift= 1+qpel;
+ MotionEstContext *c= &s->m.me;
+ int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
+ int mx_context= av_log2(2*FFABS(left->mx - top->mx));
+ int my_context= av_log2(2*FFABS(left->my - top->my));
+ int s_context= 2*left->level + 2*top->level + tl->level + tr->level;
+ int ref, best_ref, ref_score, ref_mx, ref_my;
+
+ av_assert0(sizeof(s->block_state) >= 256);
+ if(s->keyframe){
+ set_blocks(s, level, x, y, pl, pcb, pcr, 0, 0, 0, BLOCK_INTRA);
+ return 0;
+ }
+
+// clip predictors / edge ?
+
+ P_LEFT[0]= left->mx;
+ P_LEFT[1]= left->my;
+ P_TOP [0]= top->mx;
+ P_TOP [1]= top->my;
+ P_TOPRIGHT[0]= tr->mx;
+ P_TOPRIGHT[1]= tr->my;
+
+ last_mv[0][0]= s->block[index].mx;
+ last_mv[0][1]= s->block[index].my;
+ last_mv[1][0]= right->mx;
+ last_mv[1][1]= right->my;
+ last_mv[2][0]= bottom->mx;
+ last_mv[2][1]= bottom->my;
+
+ s->m.mb_stride=2;
+ s->m.mb_x=
+ s->m.mb_y= 0;
+ c->skip= 0;
+
+ av_assert1(c-> stride == stride);
+ av_assert1(c->uvstride == uvstride);
+
+ c->penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_cmp);
+ c->sub_penalty_factor= get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_sub_cmp);
+ c->mb_penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->mb_cmp);
+ c->current_mv_penalty= c->mv_penalty[s->m.f_code=1] + MAX_MV;
+
+ c->xmin = - x*block_w - 16+3;
+ c->ymin = - y*block_w - 16+3;
+ c->xmax = - (x+1)*block_w + (w<<(LOG2_MB_SIZE - s->block_max_depth)) + 16-3;
+ c->ymax = - (y+1)*block_w + (h<<(LOG2_MB_SIZE - s->block_max_depth)) + 16-3;
+
+ if(P_LEFT[0] > (c->xmax<<shift)) P_LEFT[0] = (c->xmax<<shift);
+ if(P_LEFT[1] > (c->ymax<<shift)) P_LEFT[1] = (c->ymax<<shift);
+ if(P_TOP[0] > (c->xmax<<shift)) P_TOP[0] = (c->xmax<<shift);
+ if(P_TOP[1] > (c->ymax<<shift)) P_TOP[1] = (c->ymax<<shift);
+ if(P_TOPRIGHT[0] < (c->xmin<<shift)) P_TOPRIGHT[0]= (c->xmin<<shift);
+ if(P_TOPRIGHT[0] > (c->xmax<<shift)) P_TOPRIGHT[0]= (c->xmax<<shift); //due to pmx no clip
+ if(P_TOPRIGHT[1] > (c->ymax<<shift)) P_TOPRIGHT[1]= (c->ymax<<shift);
+
+ P_MEDIAN[0]= mid_pred(P_LEFT[0], P_TOP[0], P_TOPRIGHT[0]);
+ P_MEDIAN[1]= mid_pred(P_LEFT[1], P_TOP[1], P_TOPRIGHT[1]);
+
+ if (!y) {
+ c->pred_x= P_LEFT[0];
+ c->pred_y= P_LEFT[1];
+ } else {
+ c->pred_x = P_MEDIAN[0];
+ c->pred_y = P_MEDIAN[1];
+ }
+
+ score= INT_MAX;
+ best_ref= 0;
+ for(ref=0; ref<s->ref_frames; ref++){
+ init_ref(c, current_data, s->last_picture[ref]->data, NULL, block_w*x, block_w*y, 0);
+
+ ref_score= ff_epzs_motion_search(&s->m, &ref_mx, &ref_my, P, 0, /*ref_index*/ 0, last_mv,
+ (1<<16)>>shift, level-LOG2_MB_SIZE+4, block_w);
+
+ av_assert2(ref_mx >= c->xmin);
+ av_assert2(ref_mx <= c->xmax);
+ av_assert2(ref_my >= c->ymin);
+ av_assert2(ref_my <= c->ymax);
+
+ ref_score= c->sub_motion_search(&s->m, &ref_mx, &ref_my, ref_score, 0, 0, level-LOG2_MB_SIZE+4, block_w);
+ ref_score= ff_get_mb_score(&s->m, ref_mx, ref_my, 0, 0, level-LOG2_MB_SIZE+4, block_w, 0);
+ ref_score+= 2*av_log2(2*ref)*c->penalty_factor;
+ if(s->ref_mvs[ref]){
+ s->ref_mvs[ref][index][0]= ref_mx;
+ s->ref_mvs[ref][index][1]= ref_my;
+ s->ref_scores[ref][index]= ref_score;
+ }
+ if(score > ref_score){
+ score= ref_score;
+ best_ref= ref;
+ mx= ref_mx;
+ my= ref_my;
+ }
+ }
+ //FIXME if mb_cmp != SSE then intra cannot be compared currently and mb_penalty vs. lambda2
+
+ // subpel search
+ base_bits= get_rac_count(&s->c) - 8*(s->c.bytestream - s->c.bytestream_start);
+ pc= s->c;
+ pc.bytestream_start=
+ pc.bytestream= p_buffer; //FIXME end/start? and at the other stoo
+ memcpy(p_state, s->block_state, sizeof(s->block_state));
+
+ if(level!=s->block_max_depth)
+ put_rac(&pc, &p_state[4 + s_context], 1);
+ put_rac(&pc, &p_state[1 + left->type + top->type], 0);
+ if(s->ref_frames > 1)
+ put_symbol(&pc, &p_state[128 + 1024 + 32*ref_context], best_ref, 0);
+ pred_mv(s, &pmx, &pmy, best_ref, left, top, tr);
+ put_symbol(&pc, &p_state[128 + 32*(mx_context + 16*!!best_ref)], mx - pmx, 1);
+ put_symbol(&pc, &p_state[128 + 32*(my_context + 16*!!best_ref)], my - pmy, 1);
+ p_len= pc.bytestream - pc.bytestream_start;
+ score += (s->lambda2*(get_rac_count(&pc)-base_bits))>>FF_LAMBDA_SHIFT;
+
+ block_s= block_w*block_w;
+ sum = pix_sum(current_data[0], stride, block_w, block_w);
+ l= (sum + block_s/2)/block_s;
+ iscore = pix_norm1(current_data[0], stride, block_w) - 2*l*sum + l*l*block_s;
+
+ if (s->nb_planes > 2) {
+ block_s= block_w*block_w>>(s->chroma_h_shift + s->chroma_v_shift);
+ sum = pix_sum(current_data[1], uvstride, block_w>>s->chroma_h_shift, block_w>>s->chroma_v_shift);
+ cb= (sum + block_s/2)/block_s;
+ // iscore += pix_norm1(¤t_mb[1][0], uvstride, block_w>>1) - 2*cb*sum + cb*cb*block_s;
+ sum = pix_sum(current_data[2], uvstride, block_w>>s->chroma_h_shift, block_w>>s->chroma_v_shift);
+ cr= (sum + block_s/2)/block_s;
+ // iscore += pix_norm1(¤t_mb[2][0], uvstride, block_w>>1) - 2*cr*sum + cr*cr*block_s;
+ }else
+ cb = cr = 0;
+
+ ic= s->c;
+ ic.bytestream_start=
+ ic.bytestream= i_buffer; //FIXME end/start? and at the other stoo
+ memcpy(i_state, s->block_state, sizeof(s->block_state));
+ if(level!=s->block_max_depth)
+ put_rac(&ic, &i_state[4 + s_context], 1);
+ put_rac(&ic, &i_state[1 + left->type + top->type], 1);
+ put_symbol(&ic, &i_state[32], l-pl , 1);
+ if (s->nb_planes > 2) {
+ put_symbol(&ic, &i_state[64], cb-pcb, 1);
+ put_symbol(&ic, &i_state[96], cr-pcr, 1);
+ }
+ i_len= ic.bytestream - ic.bytestream_start;
+ iscore += (s->lambda2*(get_rac_count(&ic)-base_bits))>>FF_LAMBDA_SHIFT;
+
+// assert(score==256*256*256*64-1);
+ av_assert1(iscore < 255*255*256 + s->lambda2*10);
+ av_assert1(iscore >= 0);
+ av_assert1(l>=0 && l<=255);
+ av_assert1(pl>=0 && pl<=255);
+
+ if(level==0){
+ int varc= iscore >> 8;
+ int vard= score >> 8;
+ if (vard <= 64 || vard < varc)
+ c->scene_change_score+= ff_sqrt(vard) - ff_sqrt(varc);
+ else
+ c->scene_change_score+= s->m.qscale;
+ }
+
+ if(level!=s->block_max_depth){
+ put_rac(&s->c, &s->block_state[4 + s_context], 0);
+ score2 = encode_q_branch(s, level+1, 2*x+0, 2*y+0);
+ score2+= encode_q_branch(s, level+1, 2*x+1, 2*y+0);
+ score2+= encode_q_branch(s, level+1, 2*x+0, 2*y+1);
+ score2+= encode_q_branch(s, level+1, 2*x+1, 2*y+1);
+ score2+= s->lambda2>>FF_LAMBDA_SHIFT; //FIXME exact split overhead
+
+ if(score2 < score && score2 < iscore)
+ return score2;
+ }
+
+ if(iscore < score){
+ pred_mv(s, &pmx, &pmy, 0, left, top, tr);
+ memcpy(pbbak, i_buffer, i_len);
+ s->c= ic;
+ s->c.bytestream_start= pbbak_start;
+ s->c.bytestream= pbbak + i_len;
+ set_blocks(s, level, x, y, l, cb, cr, pmx, pmy, 0, BLOCK_INTRA);
+ memcpy(s->block_state, i_state, sizeof(s->block_state));
+ return iscore;
+ }else{
+ memcpy(pbbak, p_buffer, p_len);
+ s->c= pc;
+ s->c.bytestream_start= pbbak_start;
+ s->c.bytestream= pbbak + p_len;
+ set_blocks(s, level, x, y, pl, pcb, pcr, mx, my, best_ref, 0);
+ memcpy(s->block_state, p_state, sizeof(s->block_state));
+ return score;
+ }
+}
+
+static void encode_q_branch2(SnowContext *s, int level, int x, int y){
+ const int w= s->b_width << s->block_max_depth;
+ const int rem_depth= s->block_max_depth - level;
+ const int index= (x + y*w) << rem_depth;
+ int trx= (x+1)<<rem_depth;
+ BlockNode *b= &s->block[index];
+ const BlockNode *left = x ? &s->block[index-1] : &null_block;
+ const BlockNode *top = y ? &s->block[index-w] : &null_block;
+ const BlockNode *tl = y && x ? &s->block[index-w-1] : left;
+ const BlockNode *tr = y && trx<w && ((x&1)==0 || level==0) ? &s->block[index-w+(1<<rem_depth)] : tl; //FIXME use lt
+ int pl = left->color[0];
+ int pcb= left->color[1];
+ int pcr= left->color[2];
+ int pmx, pmy;
+ int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
+ int mx_context= av_log2(2*FFABS(left->mx - top->mx)) + 16*!!b->ref;
+ int my_context= av_log2(2*FFABS(left->my - top->my)) + 16*!!b->ref;
+ int s_context= 2*left->level + 2*top->level + tl->level + tr->level;
+
+ if(s->keyframe){
+ set_blocks(s, level, x, y, pl, pcb, pcr, 0, 0, 0, BLOCK_INTRA);
+ return;
+ }
+
+ if(level!=s->block_max_depth){
+ if(same_block(b,b+1) && same_block(b,b+w) && same_block(b,b+w+1)){
+ put_rac(&s->c, &s->block_state[4 + s_context], 1);
+ }else{
+ put_rac(&s->c, &s->block_state[4 + s_context], 0);
+ encode_q_branch2(s, level+1, 2*x+0, 2*y+0);
+ encode_q_branch2(s, level+1, 2*x+1, 2*y+0);
+ encode_q_branch2(s, level+1, 2*x+0, 2*y+1);
+ encode_q_branch2(s, level+1, 2*x+1, 2*y+1);
+ return;
+ }
+ }
+ if(b->type & BLOCK_INTRA){
+ pred_mv(s, &pmx, &pmy, 0, left, top, tr);
+ put_rac(&s->c, &s->block_state[1 + (left->type&1) + (top->type&1)], 1);
+ put_symbol(&s->c, &s->block_state[32], b->color[0]-pl , 1);
+ if (s->nb_planes > 2) {
+ put_symbol(&s->c, &s->block_state[64], b->color[1]-pcb, 1);
+ put_symbol(&s->c, &s->block_state[96], b->color[2]-pcr, 1);
+ }
+ set_blocks(s, level, x, y, b->color[0], b->color[1], b->color[2], pmx, pmy, 0, BLOCK_INTRA);
+ }else{
+ pred_mv(s, &pmx, &pmy, b->ref, left, top, tr);
+ put_rac(&s->c, &s->block_state[1 + (left->type&1) + (top->type&1)], 0);
+ if(s->ref_frames > 1)
+ put_symbol(&s->c, &s->block_state[128 + 1024 + 32*ref_context], b->ref, 0);
+ put_symbol(&s->c, &s->block_state[128 + 32*mx_context], b->mx - pmx, 1);
+ put_symbol(&s->c, &s->block_state[128 + 32*my_context], b->my - pmy, 1);
+ set_blocks(s, level, x, y, pl, pcb, pcr, b->mx, b->my, b->ref, 0);
+ }
+}
+
+static int get_dc(SnowContext *s, int mb_x, int mb_y, int plane_index){
+ int i, x2, y2;
+ Plane *p= &s->plane[plane_index];
+ const int block_size = MB_SIZE >> s->block_max_depth;
+ const int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
+ const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
+ const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth];
+ const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
+ const int ref_stride= s->current_picture->linesize[plane_index];
+ uint8_t *src= s-> input_picture->data[plane_index];
+ IDWTELEM *dst= (IDWTELEM*)s->m.obmc_scratchpad + plane_index*block_size*block_size*4; //FIXME change to unsigned
+ const int b_stride = s->b_width << s->block_max_depth;
+ const int w= p->width;
+ const int h= p->height;
+ int index= mb_x + mb_y*b_stride;
+ BlockNode *b= &s->block[index];
+ BlockNode backup= *b;
+ int ab=0;
+ int aa=0;
+
+ av_assert2(s->chroma_h_shift == s->chroma_v_shift); //obmc stuff above
+
+ b->type|= BLOCK_INTRA;
+ b->color[plane_index]= 0;
+ memset(dst, 0, obmc_stride*obmc_stride*sizeof(IDWTELEM));
+
+ for(i=0; i<4; i++){
+ int mb_x2= mb_x + (i &1) - 1;
+ int mb_y2= mb_y + (i>>1) - 1;
+ int x= block_w*mb_x2 + block_w/2;
+ int y= block_h*mb_y2 + block_h/2;
+
+ add_yblock(s, 0, NULL, dst + (i&1)*block_w + (i>>1)*obmc_stride*block_h, NULL, obmc,
+ x, y, block_w, block_h, w, h, obmc_stride, ref_stride, obmc_stride, mb_x2, mb_y2, 0, 0, plane_index);
+
+ for(y2= FFMAX(y, 0); y2<FFMIN(h, y+block_h); y2++){
+ for(x2= FFMAX(x, 0); x2<FFMIN(w, x+block_w); x2++){
+ int index= x2-(block_w*mb_x - block_w/2) + (y2-(block_h*mb_y - block_h/2))*obmc_stride;
+ int obmc_v= obmc[index];
+ int d;
+ if(y<0) obmc_v += obmc[index + block_h*obmc_stride];
+ if(x<0) obmc_v += obmc[index + block_w];
+ if(y+block_h>h) obmc_v += obmc[index - block_h*obmc_stride];
+ if(x+block_w>w) obmc_v += obmc[index - block_w];
+ //FIXME precalculate this or simplify it somehow else
+
+ d = -dst[index] + (1<<(FRAC_BITS-1));
+ dst[index] = d;
+ ab += (src[x2 + y2*ref_stride] - (d>>FRAC_BITS)) * obmc_v;
+ aa += obmc_v * obmc_v; //FIXME precalculate this
+ }
+ }
+ }
+ *b= backup;
+
+ return av_clip( ROUNDED_DIV(ab<<LOG2_OBMC_MAX, aa), 0, 255); //FIXME we should not need clipping
+}
+
+static inline int get_block_bits(SnowContext *s, int x, int y, int w){
+ const int b_stride = s->b_width << s->block_max_depth;
+ const int b_height = s->b_height<< s->block_max_depth;
+ int index= x + y*b_stride;
+ const BlockNode *b = &s->block[index];
+ const BlockNode *left = x ? &s->block[index-1] : &null_block;
+ const BlockNode *top = y ? &s->block[index-b_stride] : &null_block;
+ const BlockNode *tl = y && x ? &s->block[index-b_stride-1] : left;
+ const BlockNode *tr = y && x+w<b_stride ? &s->block[index-b_stride+w] : tl;
+ int dmx, dmy;
+// int mx_context= av_log2(2*FFABS(left->mx - top->mx));
+// int my_context= av_log2(2*FFABS(left->my - top->my));
+
+ if(x<0 || x>=b_stride || y>=b_height)
+ return 0;
+/*
+1 0 0
+01X 1-2 1
+001XX 3-6 2-3
+0001XXX 7-14 4-7
+00001XXXX 15-30 8-15
+*/
+//FIXME try accurate rate
+//FIXME intra and inter predictors if surrounding blocks are not the same type
+ if(b->type & BLOCK_INTRA){
+ return 3+2*( av_log2(2*FFABS(left->color[0] - b->color[0]))
+ + av_log2(2*FFABS(left->color[1] - b->color[1]))
+ + av_log2(2*FFABS(left->color[2] - b->color[2])));
+ }else{
+ pred_mv(s, &dmx, &dmy, b->ref, left, top, tr);
+ dmx-= b->mx;
+ dmy-= b->my;
+ return 2*(1 + av_log2(2*FFABS(dmx)) //FIXME kill the 2* can be merged in lambda
+ + av_log2(2*FFABS(dmy))
+ + av_log2(2*b->ref));
+ }
+}
+
+static int get_block_rd(SnowContext *s, int mb_x, int mb_y, int plane_index, uint8_t (*obmc_edged)[MB_SIZE * 2]){
+ Plane *p= &s->plane[plane_index];
+ const int block_size = MB_SIZE >> s->block_max_depth;
+ const int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
+ const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
+ const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
+ const int ref_stride= s->current_picture->linesize[plane_index];
+ uint8_t *dst= s->current_picture->data[plane_index];
+ uint8_t *src= s-> input_picture->data[plane_index];
+ IDWTELEM *pred= (IDWTELEM*)s->m.obmc_scratchpad + plane_index*block_size*block_size*4;
+ uint8_t *cur = s->scratchbuf;
+ uint8_t *tmp = s->emu_edge_buffer;
+ const int b_stride = s->b_width << s->block_max_depth;
+ const int b_height = s->b_height<< s->block_max_depth;
+ const int w= p->width;
+ const int h= p->height;
+ int distortion;
+ int rate= 0;
+ const int penalty_factor= get_penalty_factor(s->lambda, s->lambda2, s->avctx->me_cmp);
+ int sx= block_w*mb_x - block_w/2;
+ int sy= block_h*mb_y - block_h/2;
+ int x0= FFMAX(0,-sx);
+ int y0= FFMAX(0,-sy);
+ int x1= FFMIN(block_w*2, w-sx);
+ int y1= FFMIN(block_h*2, h-sy);
+ int i,x,y;
+
+ av_assert2(s->chroma_h_shift == s->chroma_v_shift); //obmc and square assumtions below chckinhg only block_w
+
+ ff_snow_pred_block(s, cur, tmp, ref_stride, sx, sy, block_w*2, block_h*2, &s->block[mb_x + mb_y*b_stride], plane_index, w, h);
+
+ for(y=y0; y<y1; y++){
+ const uint8_t *obmc1= obmc_edged[y];
+ const IDWTELEM *pred1 = pred + y*obmc_stride;
+ uint8_t *cur1 = cur + y*ref_stride;
+ uint8_t *dst1 = dst + sx + (sy+y)*ref_stride;
+ for(x=x0; x<x1; x++){
+#if FRAC_BITS >= LOG2_OBMC_MAX
+ int v = (cur1[x] * obmc1[x]) << (FRAC_BITS - LOG2_OBMC_MAX);
+#else
+ int v = (cur1[x] * obmc1[x] + (1<<(LOG2_OBMC_MAX - FRAC_BITS-1))) >> (LOG2_OBMC_MAX - FRAC_BITS);
+#endif
+ v = (v + pred1[x]) >> FRAC_BITS;
+ if(v&(~255)) v= ~(v>>31);
+ dst1[x] = v;
+ }
+ }
+
+ /* copy the regions where obmc[] = (uint8_t)256 */
+ if(LOG2_OBMC_MAX == 8
+ && (mb_x == 0 || mb_x == b_stride-1)
+ && (mb_y == 0 || mb_y == b_height-1)){
+ if(mb_x == 0)
+ x1 = block_w;
+ else
+ x0 = block_w;
+ if(mb_y == 0)
+ y1 = block_h;
+ else
+ y0 = block_h;
+ for(y=y0; y<y1; y++)
+ memcpy(dst + sx+x0 + (sy+y)*ref_stride, cur + x0 + y*ref_stride, x1-x0);
+ }
+
+ if(block_w==16){
+ /* FIXME rearrange dsputil to fit 32x32 cmp functions */
+ /* FIXME check alignment of the cmp wavelet vs the encoding wavelet */
+ /* FIXME cmps overlap but do not cover the wavelet's whole support.
+ * So improving the score of one block is not strictly guaranteed
+ * to improve the score of the whole frame, thus iterative motion
+ * estimation does not always converge. */
+ if(s->avctx->me_cmp == FF_CMP_W97)
+ distortion = ff_w97_32_c(&s->m, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, 32);
+ else if(s->avctx->me_cmp == FF_CMP_W53)
+ distortion = ff_w53_32_c(&s->m, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, 32);
+ else{
+ distortion = 0;
+ for(i=0; i<4; i++){
+ int off = sx+16*(i&1) + (sy+16*(i>>1))*ref_stride;
+ distortion += s->dsp.me_cmp[0](&s->m, src + off, dst + off, ref_stride, 16);
+ }
+ }
+ }else{
+ av_assert2(block_w==8);
+ distortion = s->dsp.me_cmp[0](&s->m, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, block_w*2);
+ }
+
+ if(plane_index==0){
+ for(i=0; i<4; i++){
+/* ..RRr
+ * .RXx.
+ * rxx..
+ */
+ rate += get_block_bits(s, mb_x + (i&1) - (i>>1), mb_y + (i>>1), 1);
+ }
+ if(mb_x == b_stride-2)
+ rate += get_block_bits(s, mb_x + 1, mb_y + 1, 1);
+ }
+ return distortion + rate*penalty_factor;
+}
+
+static int get_4block_rd(SnowContext *s, int mb_x, int mb_y, int plane_index){
+ int i, y2;
+ Plane *p= &s->plane[plane_index];
+ const int block_size = MB_SIZE >> s->block_max_depth;
+ const int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
+ const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
+ const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth];
+ const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
+ const int ref_stride= s->current_picture->linesize[plane_index];
+ uint8_t *dst= s->current_picture->data[plane_index];
+ uint8_t *src= s-> input_picture->data[plane_index];
+ //FIXME zero_dst is const but add_yblock changes dst if add is 0 (this is never the case for dst=zero_dst
+ // const has only been removed from zero_dst to suppress a warning
+ static IDWTELEM zero_dst[4096]; //FIXME
+ const int b_stride = s->b_width << s->block_max_depth;
+ const int w= p->width;
+ const int h= p->height;
+ int distortion= 0;
+ int rate= 0;
+ const int penalty_factor= get_penalty_factor(s->lambda, s->lambda2, s->avctx->me_cmp);
+
+ av_assert2(s->chroma_h_shift == s->chroma_v_shift); //obmc and square assumtions below
+
+ for(i=0; i<9; i++){
+ int mb_x2= mb_x + (i%3) - 1;
+ int mb_y2= mb_y + (i/3) - 1;
+ int x= block_w*mb_x2 + block_w/2;
+ int y= block_h*mb_y2 + block_h/2;
+
+ add_yblock(s, 0, NULL, zero_dst, dst, obmc,
+ x, y, block_w, block_h, w, h, /*dst_stride*/0, ref_stride, obmc_stride, mb_x2, mb_y2, 1, 1, plane_index);
+
+ //FIXME find a cleaner/simpler way to skip the outside stuff
+ for(y2= y; y2<0; y2++)
+ memcpy(dst + x + y2*ref_stride, src + x + y2*ref_stride, block_w);
+ for(y2= h; y2<y+block_h; y2++)
+ memcpy(dst + x + y2*ref_stride, src + x + y2*ref_stride, block_w);
+ if(x<0){
+ for(y2= y; y2<y+block_h; y2++)
+ memcpy(dst + x + y2*ref_stride, src + x + y2*ref_stride, -x);
+ }
+ if(x+block_w > w){
+ for(y2= y; y2<y+block_h; y2++)
+ memcpy(dst + w + y2*ref_stride, src + w + y2*ref_stride, x+block_w - w);
+ }
+
+ av_assert1(block_w== 8 || block_w==16);
+ distortion += s->dsp.me_cmp[block_w==8](&s->m, src + x + y*ref_stride, dst + x + y*ref_stride, ref_stride, block_h);
+ }
+
+ if(plane_index==0){
+ BlockNode *b= &s->block[mb_x+mb_y*b_stride];
+ int merged= same_block(b,b+1) && same_block(b,b+b_stride) && same_block(b,b+b_stride+1);
+
+/* ..RRRr
+ * .RXXx.
+ * .RXXx.
+ * rxxx.
+ */
+ if(merged)
+ rate = get_block_bits(s, mb_x, mb_y, 2);
+ for(i=merged?4:0; i<9; i++){
+ static const int dxy[9][2] = {{0,0},{1,0},{0,1},{1,1},{2,0},{2,1},{-1,2},{0,2},{1,2}};
+ rate += get_block_bits(s, mb_x + dxy[i][0], mb_y + dxy[i][1], 1);
+ }
+ }
+ return distortion + rate*penalty_factor;
+}
+
+static int encode_subband_c0run(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation){
+ const int w= b->width;
+ const int h= b->height;
+ int x, y;
+
+ if(1){
+ int run=0;
+ int *runs = s->run_buffer;
+ int run_index=0;
+ int max_index;
+
+ for(y=0; y<h; y++){
+ for(x=0; x<w; x++){
+ int v, p=0;
+ int /*ll=0, */l=0, lt=0, t=0, rt=0;
+ v= src[x + y*stride];
+
+ if(y){
+ t= src[x + (y-1)*stride];
+ if(x){
+ lt= src[x - 1 + (y-1)*stride];
+ }
+ if(x + 1 < w){
+ rt= src[x + 1 + (y-1)*stride];
+ }
+ }
+ if(x){
+ l= src[x - 1 + y*stride];
+ /*if(x > 1){
+ if(orientation==1) ll= src[y + (x-2)*stride];
+ else ll= src[x - 2 + y*stride];
+ }*/
+ }
+ if(parent){
+ int px= x>>1;
+ int py= y>>1;
+ if(px<b->parent->width && py<b->parent->height)
+ p= parent[px + py*2*stride];
+ }
+ if(!(/*ll|*/l|lt|t|rt|p)){
+ if(v){
+ runs[run_index++]= run;
+ run=0;
+ }else{
+ run++;
+ }
+ }
+ }
+ }
+ max_index= run_index;
+ runs[run_index++]= run;
+ run_index=0;
+ run= runs[run_index++];
+
+ put_symbol2(&s->c, b->state[30], max_index, 0);
+ if(run_index <= max_index)
+ put_symbol2(&s->c, b->state[1], run, 3);
+
+ for(y=0; y<h; y++){
+ if(s->c.bytestream_end - s->c.bytestream < w*40){
+ av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
+ return -1;
+ }
+ for(x=0; x<w; x++){
+ int v, p=0;
+ int /*ll=0, */l=0, lt=0, t=0, rt=0;
+ v= src[x + y*stride];
+
+ if(y){
+ t= src[x + (y-1)*stride];
+ if(x){
+ lt= src[x - 1 + (y-1)*stride];
+ }
+ if(x + 1 < w){
+ rt= src[x + 1 + (y-1)*stride];
+ }
+ }
+ if(x){
+ l= src[x - 1 + y*stride];
+ /*if(x > 1){
+ if(orientation==1) ll= src[y + (x-2)*stride];
+ else ll= src[x - 2 + y*stride];
+ }*/
+ }
+ if(parent){
+ int px= x>>1;
+ int py= y>>1;
+ if(px<b->parent->width && py<b->parent->height)
+ p= parent[px + py*2*stride];
+ }
+ if(/*ll|*/l|lt|t|rt|p){
+ int context= av_log2(/*FFABS(ll) + */3*FFABS(l) + FFABS(lt) + 2*FFABS(t) + FFABS(rt) + FFABS(p));
+
+ put_rac(&s->c, &b->state[0][context], !!v);
+ }else{
+ if(!run){
+ run= runs[run_index++];
+
+ if(run_index <= max_index)
+ put_symbol2(&s->c, b->state[1], run, 3);
+ av_assert2(v);
+ }else{
+ run--;
+ av_assert2(!v);
+ }
+ }
+ if(v){
+ int context= av_log2(/*FFABS(ll) + */3*FFABS(l) + FFABS(lt) + 2*FFABS(t) + FFABS(rt) + FFABS(p));
+ int l2= 2*FFABS(l) + (l<0);
+ int t2= 2*FFABS(t) + (t<0);
+
+ put_symbol2(&s->c, b->state[context + 2], FFABS(v)-1, context-4);
+ put_rac(&s->c, &b->state[0][16 + 1 + 3 + ff_quant3bA[l2&0xFF] + 3*ff_quant3bA[t2&0xFF]], v<0);
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+static int encode_subband(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation){
+// encode_subband_qtree(s, b, src, parent, stride, orientation);
+// encode_subband_z0run(s, b, src, parent, stride, orientation);
+ return encode_subband_c0run(s, b, src, parent, stride, orientation);
+// encode_subband_dzr(s, b, src, parent, stride, orientation);
+}
+
+static av_always_inline int check_block(SnowContext *s, int mb_x, int mb_y, int p[3], int intra, uint8_t (*obmc_edged)[MB_SIZE * 2], int *best_rd){
+ const int b_stride= s->b_width << s->block_max_depth;
+ BlockNode *block= &s->block[mb_x + mb_y * b_stride];
+ BlockNode backup= *block;
+ unsigned value;
+ int rd, index;
+
+ av_assert2(mb_x>=0 && mb_y>=0);
+ av_assert2(mb_x<b_stride);
+
+ if(intra){
+ block->color[0] = p[0];
+ block->color[1] = p[1];
+ block->color[2] = p[2];
+ block->type |= BLOCK_INTRA;
+ }else{
+ index= (p[0] + 31*p[1]) & (ME_CACHE_SIZE-1);
+ value= s->me_cache_generation + (p[0]>>10) + (p[1]<<6) + (block->ref<<12);
+ if(s->me_cache[index] == value)
+ return 0;
+ s->me_cache[index]= value;
+
+ block->mx= p[0];
+ block->my= p[1];
+ block->type &= ~BLOCK_INTRA;
+ }
+
+ rd= get_block_rd(s, mb_x, mb_y, 0, obmc_edged);
+
+//FIXME chroma
+ if(rd < *best_rd){
+ *best_rd= rd;
+ return 1;
+ }else{
+ *block= backup;
+ return 0;
+ }
+}
+
+/* special case for int[2] args we discard afterwards,
+ * fixes compilation problem with gcc 2.95 */
+static av_always_inline int check_block_inter(SnowContext *s, int mb_x, int mb_y, int p0, int p1, uint8_t (*obmc_edged)[MB_SIZE * 2], int *best_rd){
+ int p[2] = {p0, p1};
+ return check_block(s, mb_x, mb_y, p, 0, obmc_edged, best_rd);
+}
+
+static av_always_inline int check_4block_inter(SnowContext *s, int mb_x, int mb_y, int p0, int p1, int ref, int *best_rd){
+ const int b_stride= s->b_width << s->block_max_depth;
+ BlockNode *block= &s->block[mb_x + mb_y * b_stride];
+ BlockNode backup[4];
+ unsigned value;
+ int rd, index;
+
+ /* We don't initialize backup[] during variable declaration, because
+ * that fails to compile on MSVC: "cannot convert from 'BlockNode' to
+ * 'int16_t'". */
+ backup[0] = block[0];
+ backup[1] = block[1];
+ backup[2] = block[b_stride];
+ backup[3] = block[b_stride + 1];
+
+ av_assert2(mb_x>=0 && mb_y>=0);
+ av_assert2(mb_x<b_stride);
+ av_assert2(((mb_x|mb_y)&1) == 0);
+
+ index= (p0 + 31*p1) & (ME_CACHE_SIZE-1);
+ value= s->me_cache_generation + (p0>>10) + (p1<<6) + (block->ref<<12);
+ if(s->me_cache[index] == value)
+ return 0;
+ s->me_cache[index]= value;
+
+ block->mx= p0;
+ block->my= p1;
+ block->ref= ref;
+ block->type &= ~BLOCK_INTRA;
+ block[1]= block[b_stride]= block[b_stride+1]= *block;
+
+ rd= get_4block_rd(s, mb_x, mb_y, 0);
+
+//FIXME chroma
+ if(rd < *best_rd){
+ *best_rd= rd;
+ return 1;
+ }else{
+ block[0]= backup[0];
+ block[1]= backup[1];
+ block[b_stride]= backup[2];
+ block[b_stride+1]= backup[3];
+ return 0;
+ }
+}
+
+static void iterative_me(SnowContext *s){
+ int pass, mb_x, mb_y;
+ const int b_width = s->b_width << s->block_max_depth;
+ const int b_height= s->b_height << s->block_max_depth;
+ const int b_stride= b_width;
+ int color[3];
+
+ {
+ RangeCoder r = s->c;
+ uint8_t state[sizeof(s->block_state)];
+ memcpy(state, s->block_state, sizeof(s->block_state));
+ for(mb_y= 0; mb_y<s->b_height; mb_y++)
+ for(mb_x= 0; mb_x<s->b_width; mb_x++)
+ encode_q_branch(s, 0, mb_x, mb_y);
+ s->c = r;
+ memcpy(s->block_state, state, sizeof(s->block_state));
+ }
+
+ for(pass=0; pass<25; pass++){
+ int change= 0;
+
+ for(mb_y= 0; mb_y<b_height; mb_y++){
+ for(mb_x= 0; mb_x<b_width; mb_x++){
+ int dia_change, i, j, ref;
+ int best_rd= INT_MAX, ref_rd;
+ BlockNode backup, ref_b;
+ const int index= mb_x + mb_y * b_stride;
+ BlockNode *block= &s->block[index];
+ BlockNode *tb = mb_y ? &s->block[index-b_stride ] : NULL;
+ BlockNode *lb = mb_x ? &s->block[index -1] : NULL;
+ BlockNode *rb = mb_x+1<b_width ? &s->block[index +1] : NULL;
+ BlockNode *bb = mb_y+1<b_height ? &s->block[index+b_stride ] : NULL;
+ BlockNode *tlb= mb_x && mb_y ? &s->block[index-b_stride-1] : NULL;
+ BlockNode *trb= mb_x+1<b_width && mb_y ? &s->block[index-b_stride+1] : NULL;
+ BlockNode *blb= mb_x && mb_y+1<b_height ? &s->block[index+b_stride-1] : NULL;
+ BlockNode *brb= mb_x+1<b_width && mb_y+1<b_height ? &s->block[index+b_stride+1] : NULL;
+ const int b_w= (MB_SIZE >> s->block_max_depth);
+ uint8_t obmc_edged[MB_SIZE * 2][MB_SIZE * 2];
+
+ if(pass && (block->type & BLOCK_OPT))
+ continue;
+ block->type |= BLOCK_OPT;
+
+ backup= *block;
+
+ if(!s->me_cache_generation)
+ memset(s->me_cache, 0, sizeof(s->me_cache));
+ s->me_cache_generation += 1<<22;
+
+ //FIXME precalculate
+ {
+ int x, y;
+ for (y = 0; y < b_w * 2; y++)
+ memcpy(obmc_edged[y], ff_obmc_tab[s->block_max_depth] + y * b_w * 2, b_w * 2);
+ if(mb_x==0)
+ for(y=0; y<b_w*2; y++)
+ memset(obmc_edged[y], obmc_edged[y][0] + obmc_edged[y][b_w-1], b_w);
+ if(mb_x==b_stride-1)
+ for(y=0; y<b_w*2; y++)
+ memset(obmc_edged[y]+b_w, obmc_edged[y][b_w] + obmc_edged[y][b_w*2-1], b_w);
+ if(mb_y==0){
+ for(x=0; x<b_w*2; x++)
+ obmc_edged[0][x] += obmc_edged[b_w-1][x];
+ for(y=1; y<b_w; y++)
+ memcpy(obmc_edged[y], obmc_edged[0], b_w*2);
+ }
+ if(mb_y==b_height-1){
+ for(x=0; x<b_w*2; x++)
+ obmc_edged[b_w*2-1][x] += obmc_edged[b_w][x];
+ for(y=b_w; y<b_w*2-1; y++)
+ memcpy(obmc_edged[y], obmc_edged[b_w*2-1], b_w*2);
+ }
+ }
+
+ //skip stuff outside the picture
+ if(mb_x==0 || mb_y==0 || mb_x==b_width-1 || mb_y==b_height-1){
+ uint8_t *src= s-> input_picture->data[0];
+ uint8_t *dst= s->current_picture->data[0];
+ const int stride= s->current_picture->linesize[0];
+ const int block_w= MB_SIZE >> s->block_max_depth;
+ const int block_h= MB_SIZE >> s->block_max_depth;
+ const int sx= block_w*mb_x - block_w/2;
+ const int sy= block_h*mb_y - block_h/2;
+ const int w= s->plane[0].width;
+ const int h= s->plane[0].height;
+ int y;
+
+ for(y=sy; y<0; y++)
+ memcpy(dst + sx + y*stride, src + sx + y*stride, block_w*2);
+ for(y=h; y<sy+block_h*2; y++)
+ memcpy(dst + sx + y*stride, src + sx + y*stride, block_w*2);
+ if(sx<0){
+ for(y=sy; y<sy+block_h*2; y++)
+ memcpy(dst + sx + y*stride, src + sx + y*stride, -sx);
+ }
+ if(sx+block_w*2 > w){
+ for(y=sy; y<sy+block_h*2; y++)
+ memcpy(dst + w + y*stride, src + w + y*stride, sx+block_w*2 - w);
+ }
+ }
+
+ // intra(black) = neighbors' contribution to the current block
+ for(i=0; i < s->nb_planes; i++)
+ color[i]= get_dc(s, mb_x, mb_y, i);
+
+ // get previous score (cannot be cached due to OBMC)
+ if(pass > 0 && (block->type&BLOCK_INTRA)){
+ int color0[3]= {block->color[0], block->color[1], block->color[2]};
+ check_block(s, mb_x, mb_y, color0, 1, obmc_edged, &best_rd);
+ }else
+ check_block_inter(s, mb_x, mb_y, block->mx, block->my, obmc_edged, &best_rd);
+
+ ref_b= *block;
+ ref_rd= best_rd;
+ for(ref=0; ref < s->ref_frames; ref++){
+ int16_t (*mvr)[2]= &s->ref_mvs[ref][index];
+ if(s->ref_scores[ref][index] > s->ref_scores[ref_b.ref][index]*3/2) //FIXME tune threshold
+ continue;
+ block->ref= ref;
+ best_rd= INT_MAX;
+
+ check_block_inter(s, mb_x, mb_y, mvr[0][0], mvr[0][1], obmc_edged, &best_rd);
+ check_block_inter(s, mb_x, mb_y, 0, 0, obmc_edged, &best_rd);
+ if(tb)
+ check_block_inter(s, mb_x, mb_y, mvr[-b_stride][0], mvr[-b_stride][1], obmc_edged, &best_rd);
+ if(lb)
+ check_block_inter(s, mb_x, mb_y, mvr[-1][0], mvr[-1][1], obmc_edged, &best_rd);
+ if(rb)
+ check_block_inter(s, mb_x, mb_y, mvr[1][0], mvr[1][1], obmc_edged, &best_rd);
+ if(bb)
+ check_block_inter(s, mb_x, mb_y, mvr[b_stride][0], mvr[b_stride][1], obmc_edged, &best_rd);
+
+ /* fullpel ME */
+ //FIXME avoid subpel interpolation / round to nearest integer
+ do{
+ dia_change=0;
+ for(i=0; i<FFMAX(s->avctx->dia_size, 1); i++){
+ for(j=0; j<i; j++){
+ dia_change |= check_block_inter(s, mb_x, mb_y, block->mx+4*(i-j), block->my+(4*j), obmc_edged, &best_rd);
+ dia_change |= check_block_inter(s, mb_x, mb_y, block->mx-4*(i-j), block->my-(4*j), obmc_edged, &best_rd);
+ dia_change |= check_block_inter(s, mb_x, mb_y, block->mx+4*(i-j), block->my-(4*j), obmc_edged, &best_rd);
+ dia_change |= check_block_inter(s, mb_x, mb_y, block->mx-4*(i-j), block->my+(4*j), obmc_edged, &best_rd);
+ }
+ }
+ }while(dia_change);
+ /* subpel ME */
+ do{
+ static const int square[8][2]= {{+1, 0},{-1, 0},{ 0,+1},{ 0,-1},{+1,+1},{-1,-1},{+1,-1},{-1,+1},};
+ dia_change=0;
+ for(i=0; i<8; i++)
+ dia_change |= check_block_inter(s, mb_x, mb_y, block->mx+square[i][0], block->my+square[i][1], obmc_edged, &best_rd);
+ }while(dia_change);
+ //FIXME or try the standard 2 pass qpel or similar
+
+ mvr[0][0]= block->mx;
+ mvr[0][1]= block->my;
+ if(ref_rd > best_rd){
+ ref_rd= best_rd;
+ ref_b= *block;
+ }
+ }
+ best_rd= ref_rd;
+ *block= ref_b;
+ check_block(s, mb_x, mb_y, color, 1, obmc_edged, &best_rd);
+ //FIXME RD style color selection
+ if(!same_block(block, &backup)){
+ if(tb ) tb ->type &= ~BLOCK_OPT;
+ if(lb ) lb ->type &= ~BLOCK_OPT;
+ if(rb ) rb ->type &= ~BLOCK_OPT;
+ if(bb ) bb ->type &= ~BLOCK_OPT;
+ if(tlb) tlb->type &= ~BLOCK_OPT;
+ if(trb) trb->type &= ~BLOCK_OPT;
+ if(blb) blb->type &= ~BLOCK_OPT;
+ if(brb) brb->type &= ~BLOCK_OPT;
+ change ++;
+ }
+ }
+ }
+ av_log(s->avctx, AV_LOG_ERROR, "pass:%d changed:%d\n", pass, change);
+ if(!change)
+ break;
+ }
+
+ if(s->block_max_depth == 1){
+ int change= 0;
+ for(mb_y= 0; mb_y<b_height; mb_y+=2){
+ for(mb_x= 0; mb_x<b_width; mb_x+=2){
+ int i;
+ int best_rd, init_rd;
+ const int index= mb_x + mb_y * b_stride;
+ BlockNode *b[4];
+
+ b[0]= &s->block[index];
+ b[1]= b[0]+1;
+ b[2]= b[0]+b_stride;
+ b[3]= b[2]+1;
+ if(same_block(b[0], b[1]) &&
+ same_block(b[0], b[2]) &&
+ same_block(b[0], b[3]))
+ continue;
+
+ if(!s->me_cache_generation)
+ memset(s->me_cache, 0, sizeof(s->me_cache));
+ s->me_cache_generation += 1<<22;
+
+ init_rd= best_rd= get_4block_rd(s, mb_x, mb_y, 0);
+
+ //FIXME more multiref search?
+ check_4block_inter(s, mb_x, mb_y,
+ (b[0]->mx + b[1]->mx + b[2]->mx + b[3]->mx + 2) >> 2,
+ (b[0]->my + b[1]->my + b[2]->my + b[3]->my + 2) >> 2, 0, &best_rd);
+
+ for(i=0; i<4; i++)
+ if(!(b[i]->type&BLOCK_INTRA))
+ check_4block_inter(s, mb_x, mb_y, b[i]->mx, b[i]->my, b[i]->ref, &best_rd);
+
+ if(init_rd != best_rd)
+ change++;
+ }
+ }
+ av_log(s->avctx, AV_LOG_ERROR, "pass:4mv changed:%d\n", change*4);
+ }
+}
+
+static void encode_blocks(SnowContext *s, int search){
+ int x, y;
+ int w= s->b_width;
+ int h= s->b_height;
+
+ if(s->avctx->me_method == ME_ITER && !s->keyframe && search)
+ iterative_me(s);
+
+ for(y=0; y<h; y++){
+ if(s->c.bytestream_end - s->c.bytestream < w*MB_SIZE*MB_SIZE*3){ //FIXME nicer limit
+ av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
+ return;
+ }
+ for(x=0; x<w; x++){
+ if(s->avctx->me_method == ME_ITER || !search)
+ encode_q_branch2(s, 0, x, y);
+ else
+ encode_q_branch (s, 0, x, y);
+ }
+ }
+}
+
+static void quantize(SnowContext *s, SubBand *b, IDWTELEM *dst, DWTELEM *src, int stride, int bias){
+ const int w= b->width;
+ const int h= b->height;
+ const int qlog= av_clip(s->qlog + b->qlog, 0, QROOT*16);
+ const int qmul= ff_qexp[qlog&(QROOT-1)]<<((qlog>>QSHIFT) + ENCODER_EXTRA_BITS);
+ int x,y, thres1, thres2;
+
+ if(s->qlog == LOSSLESS_QLOG){
+ for(y=0; y<h; y++)
+ for(x=0; x<w; x++)
+ dst[x + y*stride]= src[x + y*stride];
+ return;
+ }
+
+ bias= bias ? 0 : (3*qmul)>>3;
+ thres1= ((qmul - bias)>>QEXPSHIFT) - 1;
+ thres2= 2*thres1;
+
+ if(!bias){
+ for(y=0; y<h; y++){
+ for(x=0; x<w; x++){
+ int i= src[x + y*stride];
+
+ if((unsigned)(i+thres1) > thres2){
+ if(i>=0){
+ i<<= QEXPSHIFT;
+ i/= qmul; //FIXME optimize
+ dst[x + y*stride]= i;
+ }else{
+ i= -i;
+ i<<= QEXPSHIFT;
+ i/= qmul; //FIXME optimize
+ dst[x + y*stride]= -i;
+ }
+ }else
+ dst[x + y*stride]= 0;
+ }
+ }
+ }else{
+ for(y=0; y<h; y++){
+ for(x=0; x<w; x++){
+ int i= src[x + y*stride];
+
+ if((unsigned)(i+thres1) > thres2){
+ if(i>=0){
+ i<<= QEXPSHIFT;
+ i= (i + bias) / qmul; //FIXME optimize
+ dst[x + y*stride]= i;
+ }else{
+ i= -i;
+ i<<= QEXPSHIFT;
+ i= (i + bias) / qmul; //FIXME optimize
+ dst[x + y*stride]= -i;
+ }
+ }else
+ dst[x + y*stride]= 0;
+ }
+ }
+ }
+}
+
+static void dequantize(SnowContext *s, SubBand *b, IDWTELEM *src, int stride){
+ const int w= b->width;
+ const int h= b->height;
+ const int qlog= av_clip(s->qlog + b->qlog, 0, QROOT*16);
+ const int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
+ const int qadd= (s->qbias*qmul)>>QBIAS_SHIFT;
+ int x,y;
+
+ if(s->qlog == LOSSLESS_QLOG) return;
+
+ for(y=0; y<h; y++){
+ for(x=0; x<w; x++){
+ int i= src[x + y*stride];
+ if(i<0){
+ src[x + y*stride]= -((-i*qmul + qadd)>>(QEXPSHIFT)); //FIXME try different bias
+ }else if(i>0){
+ src[x + y*stride]= (( i*qmul + qadd)>>(QEXPSHIFT));
+ }
+ }
+ }
+}
+
+static void decorrelate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median){
+ const int w= b->width;
+ const int h= b->height;
+ int x,y;
+
+ for(y=h-1; y>=0; y--){
+ for(x=w-1; x>=0; x--){
+ int i= x + y*stride;
+
+ if(x){
+ if(use_median){
+ if(y && x+1<w) src[i] -= mid_pred(src[i - 1], src[i - stride], src[i - stride + 1]);
+ else src[i] -= src[i - 1];
+ }else{
+ if(y) src[i] -= mid_pred(src[i - 1], src[i - stride], src[i - 1] + src[i - stride] - src[i - 1 - stride]);
+ else src[i] -= src[i - 1];
+ }
+ }else{
+ if(y) src[i] -= src[i - stride];
+ }
+ }
+ }
+}
+
+static void correlate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median){
+ const int w= b->width;
+ const int h= b->height;
+ int x,y;
+
+ for(y=0; y<h; y++){
+ for(x=0; x<w; x++){
+ int i= x + y*stride;
+
+ if(x){
+ if(use_median){
+ if(y && x+1<w) src[i] += mid_pred(src[i - 1], src[i - stride], src[i - stride + 1]);
+ else src[i] += src[i - 1];
+ }else{
+ if(y) src[i] += mid_pred(src[i - 1], src[i - stride], src[i - 1] + src[i - stride] - src[i - 1 - stride]);
+ else src[i] += src[i - 1];
+ }
+ }else{
+ if(y) src[i] += src[i - stride];
+ }
+ }
+ }
+}
+
+static void encode_qlogs(SnowContext *s){
+ int plane_index, level, orientation;
+
+ for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
+ for(level=0; level<s->spatial_decomposition_count; level++){
+ for(orientation=level ? 1:0; orientation<4; orientation++){
+ if(orientation==2) continue;
+ put_symbol(&s->c, s->header_state, s->plane[plane_index].band[level][orientation].qlog, 1);
+ }
+ }
+ }
+}
+
+static void encode_header(SnowContext *s){
+ int plane_index, i;
+ uint8_t kstate[32];
+
+ memset(kstate, MID_STATE, sizeof(kstate));
+
+ put_rac(&s->c, kstate, s->keyframe);
+ if(s->keyframe || s->always_reset){
+ ff_snow_reset_contexts(s);
+ s->last_spatial_decomposition_type=
+ s->last_qlog=
+ s->last_qbias=
+ s->last_mv_scale=
+ s->last_block_max_depth= 0;
+ for(plane_index=0; plane_index<2; plane_index++){
+ Plane *p= &s->plane[plane_index];
+ p->last_htaps=0;
+ p->last_diag_mc=0;
+ memset(p->last_hcoeff, 0, sizeof(p->last_hcoeff));
+ }
+ }
+ if(s->keyframe){
+ put_symbol(&s->c, s->header_state, s->version, 0);
+ put_rac(&s->c, s->header_state, s->always_reset);
+ put_symbol(&s->c, s->header_state, s->temporal_decomposition_type, 0);
+ put_symbol(&s->c, s->header_state, s->temporal_decomposition_count, 0);
+ put_symbol(&s->c, s->header_state, s->spatial_decomposition_count, 0);
+ put_symbol(&s->c, s->header_state, s->colorspace_type, 0);
+ if (s->nb_planes > 2) {
+ put_symbol(&s->c, s->header_state, s->chroma_h_shift, 0);
+ put_symbol(&s->c, s->header_state, s->chroma_v_shift, 0);
+ }
+ put_rac(&s->c, s->header_state, s->spatial_scalability);
+// put_rac(&s->c, s->header_state, s->rate_scalability);
+ put_symbol(&s->c, s->header_state, s->max_ref_frames-1, 0);
+
+ encode_qlogs(s);
+ }
+
+ if(!s->keyframe){
+ int update_mc=0;
+ for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
+ Plane *p= &s->plane[plane_index];
+ update_mc |= p->last_htaps != p->htaps;
+ update_mc |= p->last_diag_mc != p->diag_mc;
+ update_mc |= !!memcmp(p->last_hcoeff, p->hcoeff, sizeof(p->hcoeff));
+ }
+ put_rac(&s->c, s->header_state, update_mc);
+ if(update_mc){
+ for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
+ Plane *p= &s->plane[plane_index];
+ put_rac(&s->c, s->header_state, p->diag_mc);
+ put_symbol(&s->c, s->header_state, p->htaps/2-1, 0);
+ for(i= p->htaps/2; i; i--)
+ put_symbol(&s->c, s->header_state, FFABS(p->hcoeff[i]), 0);
+ }
+ }
+ if(s->last_spatial_decomposition_count != s->spatial_decomposition_count){
+ put_rac(&s->c, s->header_state, 1);
+ put_symbol(&s->c, s->header_state, s->spatial_decomposition_count, 0);
+ encode_qlogs(s);
+ }else
+ put_rac(&s->c, s->header_state, 0);
+ }
+
+ put_symbol(&s->c, s->header_state, s->spatial_decomposition_type - s->last_spatial_decomposition_type, 1);
+ put_symbol(&s->c, s->header_state, s->qlog - s->last_qlog , 1);
+ put_symbol(&s->c, s->header_state, s->mv_scale - s->last_mv_scale, 1);
+ put_symbol(&s->c, s->header_state, s->qbias - s->last_qbias , 1);
+ put_symbol(&s->c, s->header_state, s->block_max_depth - s->last_block_max_depth, 1);
+
+}
+
+static void update_last_header_values(SnowContext *s){
+ int plane_index;
+
+ if(!s->keyframe){
+ for(plane_index=0; plane_index<2; plane_index++){
+ Plane *p= &s->plane[plane_index];
+ p->last_diag_mc= p->diag_mc;
+ p->last_htaps = p->htaps;
+ memcpy(p->last_hcoeff, p->hcoeff, sizeof(p->hcoeff));
+ }
+ }
+
+ s->last_spatial_decomposition_type = s->spatial_decomposition_type;
+ s->last_qlog = s->qlog;
+ s->last_qbias = s->qbias;
+ s->last_mv_scale = s->mv_scale;
+ s->last_block_max_depth = s->block_max_depth;
+ s->last_spatial_decomposition_count = s->spatial_decomposition_count;
+}
+
+static int qscale2qlog(int qscale){
+ return rint(QROOT*log2(qscale / (float)FF_QP2LAMBDA))
+ + 61*QROOT/8; ///< 64 > 60
+}
+
+static int ratecontrol_1pass(SnowContext *s, AVFrame *pict)
+{
+ /* Estimate the frame's complexity as a sum of weighted dwt coefficients.
+ * FIXME we know exact mv bits at this point,
+ * but ratecontrol isn't set up to include them. */
+ uint32_t coef_sum= 0;
+ int level, orientation, delta_qlog;
+
+ for(level=0; level<s->spatial_decomposition_count; level++){
+ for(orientation=level ? 1 : 0; orientation<4; orientation++){
+ SubBand *b= &s->plane[0].band[level][orientation];
+ IDWTELEM *buf= b->ibuf;
+ const int w= b->width;
+ const int h= b->height;
+ const int stride= b->stride;
+ const int qlog= av_clip(2*QROOT + b->qlog, 0, QROOT*16);
+ const int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
+ const int qdiv= (1<<16)/qmul;
+ int x, y;
+ //FIXME this is ugly
+ for(y=0; y<h; y++)
+ for(x=0; x<w; x++)
+ buf[x+y*stride]= b->buf[x+y*stride];
+ if(orientation==0)
+ decorrelate(s, b, buf, stride, 1, 0);
+ for(y=0; y<h; y++)
+ for(x=0; x<w; x++)
+ coef_sum+= abs(buf[x+y*stride]) * qdiv >> 16;
+ }
+ }
+
+ /* ugly, ratecontrol just takes a sqrt again */
+ av_assert0(coef_sum < INT_MAX);
+ coef_sum = (uint64_t)coef_sum * coef_sum >> 16;
+
+ if(pict->pict_type == AV_PICTURE_TYPE_I){
+ s->m.current_picture.mb_var_sum= coef_sum;
+ s->m.current_picture.mc_mb_var_sum= 0;
+ }else{
+ s->m.current_picture.mc_mb_var_sum= coef_sum;
+ s->m.current_picture.mb_var_sum= 0;
+ }
+
+ pict->quality= ff_rate_estimate_qscale(&s->m, 1);
+ if (pict->quality < 0)
+ return INT_MIN;
+ s->lambda= pict->quality * 3/2;
+ delta_qlog= qscale2qlog(pict->quality) - s->qlog;
+ s->qlog+= delta_qlog;
+ return delta_qlog;
+}
+
+static void calculate_visual_weight(SnowContext *s, Plane *p){
+ int width = p->width;
+ int height= p->height;
+ int level, orientation, x, y;
+
+ for(level=0; level<s->spatial_decomposition_count; level++){
+ for(orientation=level ? 1 : 0; orientation<4; orientation++){
+ SubBand *b= &p->band[level][orientation];
+ IDWTELEM *ibuf= b->ibuf;
+ int64_t error=0;
+
+ memset(s->spatial_idwt_buffer, 0, sizeof(*s->spatial_idwt_buffer)*width*height);
+ ibuf[b->width/2 + b->height/2*b->stride]= 256*16;
+ ff_spatial_idwt(s->spatial_idwt_buffer, s->temp_idwt_buffer, width, height, width, s->spatial_decomposition_type, s->spatial_decomposition_count);
+ for(y=0; y<height; y++){
+ for(x=0; x<width; x++){
+ int64_t d= s->spatial_idwt_buffer[x + y*width]*16;
+ error += d*d;
+ }
+ }
+
+ b->qlog= (int)(log(352256.0/sqrt(error)) / log(pow(2.0, 1.0/QROOT))+0.5);
+ }
+ }
+}
+
+static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
+ AVFrame *pict, int *got_packet)
+{
+ SnowContext *s = avctx->priv_data;
+ RangeCoder * const c= &s->c;
+ AVFrame *pic = pict;
+ const int width= s->avctx->width;
+ const int height= s->avctx->height;
+ int level, orientation, plane_index, i, y, ret;
+ uint8_t rc_header_bak[sizeof(s->header_state)];
+ uint8_t rc_block_bak[sizeof(s->block_state)];
+
+ if ((ret = ff_alloc_packet2(avctx, pkt, s->b_width*s->b_height*MB_SIZE*MB_SIZE*3 + FF_MIN_BUFFER_SIZE)) < 0)
+ return ret;
+
+ ff_init_range_encoder(c, pkt->data, pkt->size);
+ ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
+
+ for(i=0; i < s->nb_planes; i++){
+ int hshift= i ? s->chroma_h_shift : 0;
+ int vshift= i ? s->chroma_v_shift : 0;
+ for(y=0; y<(height>>vshift); y++)
+ memcpy(&s->input_picture->data[i][y * s->input_picture->linesize[i]],
+ &pict->data[i][y * pict->linesize[i]],
+ width>>hshift);
+ s->dsp.draw_edges(s->input_picture->data[i], s->input_picture->linesize[i],
+ width >> hshift, height >> vshift,
+ EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
+ EDGE_TOP | EDGE_BOTTOM);
+
+ }
+ emms_c();
+ s->new_picture = pict;
+
+ s->m.picture_number= avctx->frame_number;
+ if(avctx->flags&CODEC_FLAG_PASS2){
+ s->m.pict_type = pic->pict_type = s->m.rc_context.entry[avctx->frame_number].new_pict_type;
+ s->keyframe = pic->pict_type == AV_PICTURE_TYPE_I;
+ if(!(avctx->flags&CODEC_FLAG_QSCALE)) {
+ pic->quality = ff_rate_estimate_qscale(&s->m, 0);
+ if (pic->quality < 0)
+ return -1;
+ }
+ }else{
+ s->keyframe= avctx->gop_size==0 || avctx->frame_number % avctx->gop_size == 0;
+ s->m.pict_type = pic->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
+ }
+
+ if(s->pass1_rc && avctx->frame_number == 0)
+ pic->quality = 2*FF_QP2LAMBDA;
+ if (pic->quality) {
+ s->qlog = qscale2qlog(pic->quality);
+ s->lambda = pic->quality * 3/2;
+ }
+ if (s->qlog < 0 || (!pic->quality && (avctx->flags & CODEC_FLAG_QSCALE))) {
+ s->qlog= LOSSLESS_QLOG;
+ s->lambda = 0;
+ }//else keep previous frame's qlog until after motion estimation
+
+ ff_snow_frame_start(s);
+ avctx->coded_frame= s->current_picture;
+
+ s->m.current_picture_ptr= &s->m.current_picture;
- s->m.current_picture.f.data[0] = s->current_picture->data[0];
- s->m. last_picture.f.data[0] = s->last_picture[0]->data[0];
- s->m. new_picture.f.data[0] = s-> input_picture->data[0];
++ s->m.current_picture.f = s->current_picture;
++ s->m.current_picture.f->pts = pict->pts;
+ if(pic->pict_type == AV_PICTURE_TYPE_P){
+ int block_width = (width +15)>>4;
+ int block_height= (height+15)>>4;
+ int stride= s->current_picture->linesize[0];
+
+ av_assert0(s->current_picture->data[0]);
+ av_assert0(s->last_picture[0]->data[0]);
+
+ s->m.avctx= s->avctx;
- s->m. last_picture.f.linesize[0] =
- s->m. new_picture.f.linesize[0] =
- s->m.current_picture.f.linesize[0] = stride;
++ s->m. last_picture.f = s->last_picture[0];
++ s->m. new_picture.f = s->input_picture;
++ s->m.current_picture.f->data[0] = s->current_picture->data[0];
++ s->m. last_picture.f->data[0] = s->last_picture[0]->data[0];
++ s->m. new_picture.f->data[0] = s-> input_picture->data[0];
+ s->m. last_picture_ptr= &s->m. last_picture;
+ s->m.linesize=
- s->m.current_picture.f.display_picture_number =
- s->m.current_picture.f.coded_picture_number = avctx->frame_number;
- s->m.current_picture.f.quality = pic->quality;
++ s->m. last_picture.f->linesize[0] =
++ s->m. new_picture.f->linesize[0] =
++ s->m.current_picture.f->linesize[0] = stride;
+ s->m.uvlinesize= s->current_picture->linesize[1];
+ s->m.width = width;
+ s->m.height= height;
+ s->m.mb_width = block_width;
+ s->m.mb_height= block_height;
+ s->m.mb_stride= s->m.mb_width+1;
+ s->m.b8_stride= 2*s->m.mb_width+1;
+ s->m.f_code=1;
+ s->m.pict_type = pic->pict_type;
+ s->m.me_method= s->avctx->me_method;
+ s->m.me.scene_change_score=0;
+ s->m.flags= s->avctx->flags;
+ s->m.quarter_sample= (s->avctx->flags & CODEC_FLAG_QPEL)!=0;
+ s->m.out_format= FMT_H263;
+ s->m.unrestricted_mv= 1;
+
+ s->m.lambda = s->lambda;
+ s->m.qscale= (s->m.lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7);
+ s->lambda2= s->m.lambda2= (s->m.lambda*s->m.lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT;
+
+ s->m.dsp= s->dsp; //move
+ s->m.hdsp = s->hdsp;
+ ff_init_me(&s->m);
+ s->hdsp = s->m.hdsp;
+ s->dsp= s->m.dsp;
+ }
+
+ if(s->pass1_rc){
+ memcpy(rc_header_bak, s->header_state, sizeof(s->header_state));
+ memcpy(rc_block_bak, s->block_state, sizeof(s->block_state));
+ }
+
+redo_frame:
+
+ s->spatial_decomposition_count= 5;
+
+ while( !(width >>(s->chroma_h_shift + s->spatial_decomposition_count))
+ || !(height>>(s->chroma_v_shift + s->spatial_decomposition_count)))
+ s->spatial_decomposition_count--;
+
+ if (s->spatial_decomposition_count <= 0) {
+ av_log(avctx, AV_LOG_ERROR, "Resolution too low\n");
+ return AVERROR(EINVAL);
+ }
+
+ s->m.pict_type = pic->pict_type;
+ s->qbias = pic->pict_type == AV_PICTURE_TYPE_P ? 2 : 0;
+
+ ff_snow_common_init_after_header(avctx);
+
+ if(s->last_spatial_decomposition_count != s->spatial_decomposition_count){
+ for(plane_index=0; plane_index < s->nb_planes; plane_index++){
+ calculate_visual_weight(s, &s->plane[plane_index]);
+ }
+ }
+
+ encode_header(s);
+ s->m.misc_bits = 8*(s->c.bytestream - s->c.bytestream_start);
+ encode_blocks(s, 1);
+ s->m.mv_bits = 8*(s->c.bytestream - s->c.bytestream_start) - s->m.misc_bits;
+
+ for(plane_index=0; plane_index < s->nb_planes; plane_index++){
+ Plane *p= &s->plane[plane_index];
+ int w= p->width;
+ int h= p->height;
+ int x, y;
+// int bits= put_bits_count(&s->c.pb);
+
+ if (!s->memc_only) {
+ //FIXME optimize
+ if(pict->data[plane_index]) //FIXME gray hack
+ for(y=0; y<h; y++){
+ for(x=0; x<w; x++){
+ s->spatial_idwt_buffer[y*w + x]= pict->data[plane_index][y*pict->linesize[plane_index] + x]<<FRAC_BITS;
+ }
+ }
+ predict_plane(s, s->spatial_idwt_buffer, plane_index, 0);
+
+ if( plane_index==0
+ && pic->pict_type == AV_PICTURE_TYPE_P
+ && !(avctx->flags&CODEC_FLAG_PASS2)
+ && s->m.me.scene_change_score > s->avctx->scenechange_threshold){
+ ff_init_range_encoder(c, pkt->data, pkt->size);
+ ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
+ pic->pict_type= AV_PICTURE_TYPE_I;
+ s->keyframe=1;
+ s->current_picture->key_frame=1;
+ goto redo_frame;
+ }
+
+ if(s->qlog == LOSSLESS_QLOG){
+ for(y=0; y<h; y++){
+ for(x=0; x<w; x++){
+ s->spatial_dwt_buffer[y*w + x]= (s->spatial_idwt_buffer[y*w + x] + (1<<(FRAC_BITS-1))-1)>>FRAC_BITS;
+ }
+ }
+ }else{
+ for(y=0; y<h; y++){
+ for(x=0; x<w; x++){
+ s->spatial_dwt_buffer[y*w + x]=s->spatial_idwt_buffer[y*w + x]<<ENCODER_EXTRA_BITS;
+ }
+ }
+ }
+
+ ff_spatial_dwt(s->spatial_dwt_buffer, s->temp_dwt_buffer, w, h, w, s->spatial_decomposition_type, s->spatial_decomposition_count);
+
+ if(s->pass1_rc && plane_index==0){
+ int delta_qlog = ratecontrol_1pass(s, pic);
+ if (delta_qlog <= INT_MIN)
+ return -1;
+ if(delta_qlog){
+ //reordering qlog in the bitstream would eliminate this reset
+ ff_init_range_encoder(c, pkt->data, pkt->size);
+ memcpy(s->header_state, rc_header_bak, sizeof(s->header_state));
+ memcpy(s->block_state, rc_block_bak, sizeof(s->block_state));
+ encode_header(s);
+ encode_blocks(s, 0);
+ }
+ }
+
+ for(level=0; level<s->spatial_decomposition_count; level++){
+ for(orientation=level ? 1 : 0; orientation<4; orientation++){
+ SubBand *b= &p->band[level][orientation];
+
+ quantize(s, b, b->ibuf, b->buf, b->stride, s->qbias);
+ if(orientation==0)
+ decorrelate(s, b, b->ibuf, b->stride, pic->pict_type == AV_PICTURE_TYPE_P, 0);
+ if (!s->no_bitstream)
+ encode_subband(s, b, b->ibuf, b->parent ? b->parent->ibuf : NULL, b->stride, orientation);
+ av_assert0(b->parent==NULL || b->parent->stride == b->stride*2);
+ if(orientation==0)
+ correlate(s, b, b->ibuf, b->stride, 1, 0);
+ }
+ }
+
+ for(level=0; level<s->spatial_decomposition_count; level++){
+ for(orientation=level ? 1 : 0; orientation<4; orientation++){
+ SubBand *b= &p->band[level][orientation];
+
+ dequantize(s, b, b->ibuf, b->stride);
+ }
+ }
+
+ ff_spatial_idwt(s->spatial_idwt_buffer, s->temp_idwt_buffer, w, h, w, s->spatial_decomposition_type, s->spatial_decomposition_count);
+ if(s->qlog == LOSSLESS_QLOG){
+ for(y=0; y<h; y++){
+ for(x=0; x<w; x++){
+ s->spatial_idwt_buffer[y*w + x]<<=FRAC_BITS;
+ }
+ }
+ }
+ predict_plane(s, s->spatial_idwt_buffer, plane_index, 1);
+ }else{
+ //ME/MC only
+ if(pic->pict_type == AV_PICTURE_TYPE_I){
+ for(y=0; y<h; y++){
+ for(x=0; x<w; x++){
+ s->current_picture->data[plane_index][y*s->current_picture->linesize[plane_index] + x]=
+ pict->data[plane_index][y*pict->linesize[plane_index] + x];
+ }
+ }
+ }else{
+ memset(s->spatial_idwt_buffer, 0, sizeof(IDWTELEM)*w*h);
+ predict_plane(s, s->spatial_idwt_buffer, plane_index, 1);
+ }
+ }
+ if(s->avctx->flags&CODEC_FLAG_PSNR){
+ int64_t error= 0;
+
+ if(pict->data[plane_index]) //FIXME gray hack
+ for(y=0; y<h; y++){
+ for(x=0; x<w; x++){
+ int d= s->current_picture->data[plane_index][y*s->current_picture->linesize[plane_index] + x] - pict->data[plane_index][y*pict->linesize[plane_index] + x];
+ error += d*d;
+ }
+ }
+ s->avctx->error[plane_index] += error;
+ s->current_picture->error[plane_index] = error;
+ }
+
+ }
+
+ update_last_header_values(s);
+
+ ff_snow_release_buffer(avctx);
+
+ s->current_picture->coded_picture_number = avctx->frame_number;
+ s->current_picture->pict_type = pict->pict_type;
+ s->current_picture->quality = pict->quality;
+ s->m.frame_bits = 8*(s->c.bytestream - s->c.bytestream_start);
+ s->m.p_tex_bits = s->m.frame_bits - s->m.misc_bits - s->m.mv_bits;
++ s->m.current_picture.f->display_picture_number =
++ s->m.current_picture.f->coded_picture_number = avctx->frame_number;
++ s->m.current_picture.f->quality = pic->quality;
+ s->m.total_bits += 8*(s->c.bytestream - s->c.bytestream_start);
+ if(s->pass1_rc)
+ if (ff_rate_estimate_qscale(&s->m, 0) < 0)
+ return -1;
+ if(avctx->flags&CODEC_FLAG_PASS1)
+ ff_write_pass1_stats(&s->m);
+ s->m.last_pict_type = s->m.pict_type;
+ avctx->frame_bits = s->m.frame_bits;
+ avctx->mv_bits = s->m.mv_bits;
+ avctx->misc_bits = s->m.misc_bits;
+ avctx->p_tex_bits = s->m.p_tex_bits;
+
+ emms_c();
+
+ pkt->size = ff_rac_terminate(c);
+ if (avctx->coded_frame->key_frame)
+ pkt->flags |= AV_PKT_FLAG_KEY;
+ *got_packet = 1;
+
+ return 0;
+}
+
+static av_cold int encode_end(AVCodecContext *avctx)
+{
+ SnowContext *s = avctx->priv_data;
+
+ ff_snow_common_end(s);
+ ff_rate_control_uninit(&s->m);
+ av_frame_free(&s->input_picture);
+ av_free(avctx->stats_out);
+
+ return 0;
+}
+
+#define OFFSET(x) offsetof(SnowContext, x)
+#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
+static const AVOption options[] = {
+ { "memc_only", "Only do ME/MC (I frames -> ref, P frame -> ME+MC).", OFFSET(memc_only), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
+ { "no_bitstream", "Skip final bitstream writeout.", OFFSET(no_bitstream), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
+ { NULL },
+};
+
+static const AVClass snowenc_class = {
+ .class_name = "snow encoder",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_snow_encoder = {
+ .name = "snow",
+ .long_name = NULL_IF_CONFIG_SMALL("Snow"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_SNOW,
+ .priv_data_size = sizeof(SnowContext),
+ .init = encode_init,
+ .encode2 = encode_frame,
+ .close = encode_end,
+ .pix_fmts = (const enum AVPixelFormat[]){
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_NONE
+ },
+ .priv_class = &snowenc_class,
+};
+
+
+#ifdef TEST
+#undef malloc
+#undef free
+#undef printf
+
+#include "libavutil/lfg.h"
+#include "libavutil/mathematics.h"
+
+int main(void){
+#define width 256
+#define height 256
+ int buffer[2][width*height];
+ SnowContext s;
+ int i;
+ AVLFG prng;
+ s.spatial_decomposition_count=6;
+ s.spatial_decomposition_type=1;
+
+ s.temp_dwt_buffer = av_mallocz(width * sizeof(DWTELEM));
+ s.temp_idwt_buffer = av_mallocz(width * sizeof(IDWTELEM));
+
+ av_lfg_init(&prng, 1);
+
+ printf("testing 5/3 DWT\n");
+ for(i=0; i<width*height; i++)
+ buffer[0][i] = buffer[1][i] = av_lfg_get(&prng) % 54321 - 12345;
+
+ ff_spatial_dwt(buffer[0], s.temp_dwt_buffer, width, height, width, s.spatial_decomposition_type, s.spatial_decomposition_count);
+ ff_spatial_idwt((IDWTELEM*)buffer[0], s.temp_idwt_buffer, width, height, width, s.spatial_decomposition_type, s.spatial_decomposition_count);
+
+ for(i=0; i<width*height; i++)
+ if(buffer[0][i]!= buffer[1][i]) printf("fsck: %6d %12d %7d\n",i, buffer[0][i], buffer[1][i]);
+
+ printf("testing 9/7 DWT\n");
+ s.spatial_decomposition_type=0;
+ for(i=0; i<width*height; i++)
+ buffer[0][i] = buffer[1][i] = av_lfg_get(&prng) % 54321 - 12345;
+
+ ff_spatial_dwt(buffer[0], s.temp_dwt_buffer, width, height, width, s.spatial_decomposition_type, s.spatial_decomposition_count);
+ ff_spatial_idwt((IDWTELEM*)buffer[0], s.temp_idwt_buffer, width, height, width, s.spatial_decomposition_type, s.spatial_decomposition_count);
+
+ for(i=0; i<width*height; i++)
+ if(FFABS(buffer[0][i] - buffer[1][i])>20) printf("fsck: %6d %12d %7d\n",i, buffer[0][i], buffer[1][i]);
+
+ {
+ int level, orientation, x, y;
+ int64_t errors[8][4];
+ int64_t g=0;
+
+ memset(errors, 0, sizeof(errors));
+ s.spatial_decomposition_count=3;
+ s.spatial_decomposition_type=0;
+ for(level=0; level<s.spatial_decomposition_count; level++){
+ for(orientation=level ? 1 : 0; orientation<4; orientation++){
+ int w= width >> (s.spatial_decomposition_count-level);
+ int h= height >> (s.spatial_decomposition_count-level);
+ int stride= width << (s.spatial_decomposition_count-level);
+ DWTELEM *buf= buffer[0];
+ int64_t error=0;
+
+ if(orientation&1) buf+=w;
+ if(orientation>1) buf+=stride>>1;
+
+ memset(buffer[0], 0, sizeof(int)*width*height);
+ buf[w/2 + h/2*stride]= 256*256;
+ ff_spatial_idwt((IDWTELEM*)buffer[0], s.temp_idwt_buffer, width, height, width, s.spatial_decomposition_type, s.spatial_decomposition_count);
+ for(y=0; y<height; y++){
+ for(x=0; x<width; x++){
+ int64_t d= buffer[0][x + y*width];
+ error += d*d;
+ if(FFABS(width/2-x)<9 && FFABS(height/2-y)<9 && level==2) printf("%8"PRId64" ", d);
+ }
+ if(FFABS(height/2-y)<9 && level==2) printf("\n");
+ }
+ error= (int)(sqrt(error)+0.5);
+ errors[level][orientation]= error;
+ if(g) g=av_gcd(g, error);
+ else g= error;
+ }
+ }
+ printf("static int const visual_weight[][4]={\n");
+ for(level=0; level<s.spatial_decomposition_count; level++){
+ printf(" {");
+ for(orientation=0; orientation<4; orientation++){
+ printf("%8"PRId64",", errors[level][orientation]/g);
+ }
+ printf("},\n");
+ }
+ printf("};\n");
+ {
+ int level=2;
+ int w= width >> (s.spatial_decomposition_count-level);
+ //int h= height >> (s.spatial_decomposition_count-level);
+ int stride= width << (s.spatial_decomposition_count-level);
+ DWTELEM *buf= buffer[0];
+ int64_t error=0;
+
+ buf+=w;
+ buf+=stride>>1;
+
+ memset(buffer[0], 0, sizeof(int)*width*height);
+ for(y=0; y<height; y++){
+ for(x=0; x<width; x++){
+ int tab[4]={0,2,3,1};
+ buffer[0][x+width*y]= 256*256*tab[(x&1) + 2*(y&1)];
+ }
+ }
+ ff_spatial_dwt(buffer[0], s.temp_dwt_buffer, width, height, width, s.spatial_decomposition_type, s.spatial_decomposition_count);
+ for(y=0; y<height; y++){
+ for(x=0; x<width; x++){
+ int64_t d= buffer[0][x + y*width];
+ error += d*d;
+ if(FFABS(width/2-x)<9 && FFABS(height/2-y)<9) printf("%8"PRId64" ", d);
+ }
+ if(FFABS(height/2-y)<9) printf("\n");
+ }
+ }
+
+ }
+ return 0;
+}
+#endif /* TEST */
--- /dev/null
- ff_vaapi_get_surface_id(&s->current_picture_ptr->f));
+/*
+ * Video Acceleration API (video decoding)
+ * HW decode acceleration for MPEG-2, MPEG-4, H.264 and VC-1
+ *
+ * Copyright (C) 2013 Anton Khirnov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avcodec.h"
+#include "vaapi_internal.h"
+
+int ff_vaapi_mpeg_end_frame(AVCodecContext *avctx)
+{
+ struct vaapi_context * const vactx = avctx->hwaccel_context;
+ MpegEncContext *s = avctx->priv_data;
+ int ret;
+
+ ret = ff_vaapi_commit_slices(vactx);
+ if (ret < 0)
+ goto finish;
+
+ ret = ff_vaapi_render_picture(vactx,
++ ff_vaapi_get_surface_id(s->current_picture_ptr->f));
+ if (ret < 0)
+ goto finish;
+
+ ff_mpeg_draw_horiz_band(s, 0, s->avctx->height);
+
+finish:
+ ff_vaapi_common_end_frame(avctx);
+ return ret;
+}
+
}
if (!dir) {
if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
- srcY = s->current_picture.f.data[0];
- srcU = s->current_picture.f.data[1];
- srcV = s->current_picture.f.data[2];
+ srcY = s->current_picture.f->data[0];
+ srcU = s->current_picture.f->data[1];
+ srcV = s->current_picture.f->data[2];
luty = v->curr_luty;
lutuv = v->curr_lutuv;
- use_ic = v->curr_use_ic;
+ use_ic = *v->curr_use_ic;
} else {
- srcY = s->last_picture.f.data[0];
- srcU = s->last_picture.f.data[1];
- srcV = s->last_picture.f.data[2];
+ srcY = s->last_picture.f->data[0];
+ srcU = s->last_picture.f->data[1];
+ srcV = s->last_picture.f->data[2];
luty = v->last_luty;
lutuv = v->last_lutuv;
use_ic = v->last_use_ic;
if (!dir) {
if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
- srcY = s->current_picture.f.data[0];
+ srcY = s->current_picture.f->data[0];
luty = v->curr_luty;
- use_ic = v->curr_use_ic;
+ use_ic = *v->curr_use_ic;
} else {
- srcY = s->last_picture.f.data[0];
+ srcY = s->last_picture.f->data[0];
luty = v->last_luty;
use_ic = v->last_use_ic;
}
if (!dir) {
if (v->field_mode && (v->cur_field_type != chroma_ref_type) && v->second_field) {
- srcU = s->current_picture.f.data[1];
- srcV = s->current_picture.f.data[2];
+ srcU = s->current_picture.f->data[1];
+ srcV = s->current_picture.f->data[2];
lutuv = v->curr_lutuv;
- use_ic = v->curr_use_ic;
+ use_ic = *v->curr_use_ic;
} else {
- srcU = s->last_picture.f.data[1];
- srcV = s->last_picture.f.data[2];
+ srcU = s->last_picture.f->data[1];
+ srcV = s->last_picture.f->data[2];
lutuv = v->last_lutuv;
use_ic = v->last_use_ic;
}
uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
if (i < 2 ? dir : dir2) {
- srcU = s->next_picture.f.data[1];
- srcV = s->next_picture.f.data[2];
- srcU = s->next_picture.f->data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
- srcV = s->next_picture.f->data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
++ srcU = s->next_picture.f->data[1];
++ srcV = s->next_picture.f->data[2];
lutuv = v->next_lutuv;
use_ic = v->next_use_ic;
} else {
- srcU = s->last_picture.f.data[1];
- srcV = s->last_picture.f.data[2];
- srcU = s->last_picture.f->data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
- srcV = s->last_picture.f->data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
++ srcU = s->last_picture.f->data[1];
++ srcV = s->last_picture.f->data[2];
lutuv = v->last_lutuv;
use_ic = v->last_use_ic;
}
AVCodecContext *avctx = s->avctx;
SpriteData sd;
- vc1_parse_sprites(v, gb, &sd);
+ memset(&sd, 0, sizeof(sd));
+
+ ret = vc1_parse_sprites(v, gb, &sd);
+ if (ret < 0)
+ return ret;
- if (!s->current_picture.f.data[0]) {
+ if (!s->current_picture.f->data[0]) {
av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
return -1;
}
goto err;
}
+ if ((s->mb_height >> v->field_mode) == 0) {
+ av_log(v->s.avctx, AV_LOG_ERROR, "image too short\n");
+ goto err;
+ }
+
// for skipping the frame
- s->current_picture.f.pict_type = s->pict_type;
- s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
+ s->current_picture.f->pict_type = s->pict_type;
+ s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
/* skip B-frames if we don't have reference frames */
if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->droppable)) {
goto err;
}
- v->s.current_picture_ptr->f.interlaced_frame = (v->fcm != PROGRESSIVE);
- v->s.current_picture_ptr->f.top_field_first = v->tff;
+ v->s.current_picture_ptr->field_picture = v->field_mode;
++ v->s.current_picture_ptr->f->interlaced_frame = (v->fcm != PROGRESSIVE);
++ v->s.current_picture_ptr->f->top_field_first = v->tff;
+
// process pulldown flags
- s->current_picture_ptr->f.repeat_pict = 0;
+ s->current_picture_ptr->f->repeat_pict = 0;
// Pulldown flags are only valid when 'broadcast' has been set.
// So ticks_per_frame will be 2
if (v->rff) {
s->me.qpel_put = s->dsp.put_qpel_pixels_tab;
s->me.qpel_avg = s->dsp.avg_qpel_pixels_tab;
- if (avctx->hwaccel) {
- if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0)
- goto err;
- if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
- goto err;
- if (avctx->hwaccel->end_frame(avctx) < 0)
- goto err;
+ if ((CONFIG_VC1_VDPAU_DECODER)
+ &&s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) {
+ if (v->field_mode && buf_start_second_field) {
+ ff_vdpau_vc1_decode_picture(s, buf_start, buf_start_second_field - buf_start);
+ ff_vdpau_vc1_decode_picture(s, buf_start_second_field, (buf + buf_size) - buf_start_second_field);
+ } else {
+ ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start);
+ }
+ } else if (avctx->hwaccel) {
+ if (v->field_mode && buf_start_second_field) {
+ // decode first field
+ s->picture_structure = PICT_BOTTOM_FIELD - v->tff;
+ if (avctx->hwaccel->start_frame(avctx, buf_start, buf_start_second_field - buf_start) < 0)
+ goto err;
+ if (avctx->hwaccel->decode_slice(avctx, buf_start, buf_start_second_field - buf_start) < 0)
+ goto err;
+ if (avctx->hwaccel->end_frame(avctx) < 0)
+ goto err;
+
+ // decode second field
+ s->gb = slices[n_slices1 + 1].gb;
+ s->picture_structure = PICT_TOP_FIELD + v->tff;
+ v->second_field = 1;
+ v->pic_header_flag = 0;
+ if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "parsing header for second field failed");
+ goto err;
+ }
- v->s.current_picture_ptr->f.pict_type = v->s.pict_type;
++ v->s.current_picture_ptr->f->pict_type = v->s.pict_type;
+
+ if (avctx->hwaccel->start_frame(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field) < 0)
+ goto err;
+ if (avctx->hwaccel->decode_slice(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field) < 0)
+ goto err;
+ if (avctx->hwaccel->end_frame(avctx) < 0)
+ goto err;
+ } else {
+ s->picture_structure = PICT_FRAME;
+ if (avctx->hwaccel->start_frame(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
+ goto err;
+ if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
+ goto err;
+ if (avctx->hwaccel->end_frame(avctx) < 0)
+ goto err;
+ }
} else {
int header_ret = 0;
*got_frame = 1;
} else {
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
- if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
+ if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
goto err;
- ff_print_debug_info(s, s->current_picture_ptr);
+ ff_print_debug_info(s, s->current_picture_ptr, pict);
*got_frame = 1;
} else if (s->last_picture_ptr != NULL) {
- if ((ret = av_frame_ref(pict, &s->last_picture_ptr->f)) < 0)
+ if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0)
goto err;
- ff_print_debug_info(s, s->last_picture_ptr);
+ ff_print_debug_info(s, s->last_picture_ptr, pict);
*got_frame = 1;
}
}
MpegEncContext *s = avctx->priv_data;
Picture *pic = s->current_picture_ptr;
struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
- VdpVideoSurface surf = ff_vdpau_get_surface_id(&pic->f);
+ VdpVideoSurface surf = ff_vdpau_get_surface_id(pic->f);
- res = hwctx->render2(avctx, &pic->f, (void *)&pic_ctx->info,
+#if FF_API_BUFS_VDPAU
+FF_DISABLE_DEPRECATION_WARNINGS
+ hwctx->info = pic_ctx->info;
+ hwctx->bitstream_buffers = pic_ctx->bitstream_buffers;
+ hwctx->bitstream_buffers_used = pic_ctx->bitstream_buffers_used;
+ hwctx->bitstream_buffers_allocated = pic_ctx->bitstream_buffers_allocated;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+ if (!hwctx->render) {
++ res = hwctx->render2(avctx, pic->f, (void *)&pic_ctx->info,
+ pic_ctx->bitstream_buffers_used, pic_ctx->bitstream_buffers);
+ } else
hwctx->render(hwctx->decoder, surf, (void *)&pic_ctx->info,
pic_ctx->bitstream_buffers_used, pic_ctx->bitstream_buffers);
return 0;
}
- render = (struct vdpau_render_state *)s->current_picture_ptr->f.data[0];
+/* Obsolete non-hwaccel VDPAU support below... */
+
+void ff_vdpau_h264_set_reference_frames(H264Context *h)
+{
+ struct vdpau_render_state *render, *render_ref;
+ VdpReferenceFrameH264 *rf, *rf2;
+ H264Picture *pic;
+ int i, list, pic_frame_idx;
+
+ render = (struct vdpau_render_state *)h->cur_pic_ptr->f.data[0];
+ assert(render);
+
+ rf = &render->info.h264.referenceFrames[0];
+#define H264_RF_COUNT FF_ARRAY_ELEMS(render->info.h264.referenceFrames)
+
+ for (list = 0; list < 2; ++list) {
+ H264Picture **lp = list ? h->long_ref : h->short_ref;
+ int ls = list ? 16 : h->short_ref_count;
+
+ for (i = 0; i < ls; ++i) {
+ pic = lp[i];
+ if (!pic || !pic->reference)
+ continue;
+ pic_frame_idx = pic->long_ref ? pic->pic_id : pic->frame_num;
+
+ render_ref = (struct vdpau_render_state *)pic->f.data[0];
+ assert(render_ref);
+
+ rf2 = &render->info.h264.referenceFrames[0];
+ while (rf2 != rf) {
+ if (
+ (rf2->surface == render_ref->surface)
+ && (rf2->is_long_term == pic->long_ref)
+ && (rf2->frame_idx == pic_frame_idx)
+ )
+ break;
+ ++rf2;
+ }
+ if (rf2 != rf) {
+ rf2->top_is_reference |= (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE;
+ rf2->bottom_is_reference |= (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE;
+ continue;
+ }
+
+ if (rf >= &render->info.h264.referenceFrames[H264_RF_COUNT])
+ continue;
+
+ rf->surface = render_ref->surface;
+ rf->is_long_term = pic->long_ref;
+ rf->top_is_reference = (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE;
+ rf->bottom_is_reference = (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE;
+ rf->field_order_cnt[0] = pic->field_poc[0];
+ rf->field_order_cnt[1] = pic->field_poc[1];
+ rf->frame_idx = pic_frame_idx;
+
+ ++rf;
+ }
+ }
+
+ for (; rf < &render->info.h264.referenceFrames[H264_RF_COUNT]; ++rf) {
+ rf->surface = VDP_INVALID_HANDLE;
+ rf->is_long_term = 0;
+ rf->top_is_reference = 0;
+ rf->bottom_is_reference = 0;
+ rf->field_order_cnt[0] = 0;
+ rf->field_order_cnt[1] = 0;
+ rf->frame_idx = 0;
+ }
+}
+
+void ff_vdpau_add_data_chunk(uint8_t *data, const uint8_t *buf, int buf_size)
+{
+ struct vdpau_render_state *render = (struct vdpau_render_state*)data;
+ assert(render);
+
+ render->bitstream_buffers= av_fast_realloc(
+ render->bitstream_buffers,
+ &render->bitstream_buffers_allocated,
+ sizeof(*render->bitstream_buffers)*(render->bitstream_buffers_used + 1)
+ );
+
+ render->bitstream_buffers[render->bitstream_buffers_used].struct_version = VDP_BITSTREAM_BUFFER_VERSION;
+ render->bitstream_buffers[render->bitstream_buffers_used].bitstream = buf;
+ render->bitstream_buffers[render->bitstream_buffers_used].bitstream_bytes = buf_size;
+ render->bitstream_buffers_used++;
+}
+
+#if CONFIG_H264_VDPAU_DECODER
+void ff_vdpau_h264_picture_start(H264Context *h)
+{
+ struct vdpau_render_state *render;
+ int i;
+
+ render = (struct vdpau_render_state *)h->cur_pic_ptr->f.data[0];
+ assert(render);
+
+ for (i = 0; i < 2; ++i) {
+ int foc = h->cur_pic_ptr->field_poc[i];
+ if (foc == INT_MAX)
+ foc = 0;
+ render->info.h264.field_order_cnt[i] = foc;
+ }
+
+ render->info.h264.frame_num = h->frame_num;
+}
+
+void ff_vdpau_h264_picture_complete(H264Context *h)
+{
+ struct vdpau_render_state *render;
+
+ render = (struct vdpau_render_state *)h->cur_pic_ptr->f.data[0];
+ assert(render);
+
+ render->info.h264.slice_count = h->slice_num;
+ if (render->info.h264.slice_count < 1)
+ return;
+
+ render->info.h264.is_reference = (h->cur_pic_ptr->reference & 3) ? VDP_TRUE : VDP_FALSE;
+ render->info.h264.field_pic_flag = h->picture_structure != PICT_FRAME;
+ render->info.h264.bottom_field_flag = h->picture_structure == PICT_BOTTOM_FIELD;
+ render->info.h264.num_ref_frames = h->sps.ref_frame_count;
+ render->info.h264.mb_adaptive_frame_field_flag = h->sps.mb_aff && !render->info.h264.field_pic_flag;
+ render->info.h264.constrained_intra_pred_flag = h->pps.constrained_intra_pred;
+ render->info.h264.weighted_pred_flag = h->pps.weighted_pred;
+ render->info.h264.weighted_bipred_idc = h->pps.weighted_bipred_idc;
+ render->info.h264.frame_mbs_only_flag = h->sps.frame_mbs_only_flag;
+ render->info.h264.transform_8x8_mode_flag = h->pps.transform_8x8_mode;
+ render->info.h264.chroma_qp_index_offset = h->pps.chroma_qp_index_offset[0];
+ render->info.h264.second_chroma_qp_index_offset = h->pps.chroma_qp_index_offset[1];
+ render->info.h264.pic_init_qp_minus26 = h->pps.init_qp - 26;
+ render->info.h264.num_ref_idx_l0_active_minus1 = h->pps.ref_count[0] - 1;
+ render->info.h264.num_ref_idx_l1_active_minus1 = h->pps.ref_count[1] - 1;
+ render->info.h264.log2_max_frame_num_minus4 = h->sps.log2_max_frame_num - 4;
+ render->info.h264.pic_order_cnt_type = h->sps.poc_type;
+ render->info.h264.log2_max_pic_order_cnt_lsb_minus4 = h->sps.poc_type ? 0 : h->sps.log2_max_poc_lsb - 4;
+ render->info.h264.delta_pic_order_always_zero_flag = h->sps.delta_pic_order_always_zero_flag;
+ render->info.h264.direct_8x8_inference_flag = h->sps.direct_8x8_inference_flag;
+ render->info.h264.entropy_coding_mode_flag = h->pps.cabac;
+ render->info.h264.pic_order_present_flag = h->pps.pic_order_present;
+ render->info.h264.deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present;
+ render->info.h264.redundant_pic_cnt_present_flag = h->pps.redundant_pic_cnt_present;
+ memcpy(render->info.h264.scaling_lists_4x4, h->pps.scaling_matrix4, sizeof(render->info.h264.scaling_lists_4x4));
+ memcpy(render->info.h264.scaling_lists_8x8[0], h->pps.scaling_matrix8[0], sizeof(render->info.h264.scaling_lists_8x8[0]));
+ memcpy(render->info.h264.scaling_lists_8x8[1], h->pps.scaling_matrix8[3], sizeof(render->info.h264.scaling_lists_8x8[0]));
+
+ ff_h264_draw_horiz_band(h, 0, h->avctx->height);
+ render->bitstream_buffers_used = 0;
+}
+#endif /* CONFIG_H264_VDPAU_DECODER */
+
+#if CONFIG_MPEG_VDPAU_DECODER || CONFIG_MPEG1_VDPAU_DECODER
+void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf,
+ int buf_size, int slice_count)
+{
+ struct vdpau_render_state *render, *last, *next;
+ int i;
+
+ if (!s->current_picture_ptr) return;
+
- next = (struct vdpau_render_state *)s->next_picture.f.data[0];
++ render = (struct vdpau_render_state *)s->current_picture_ptr->f->data[0];
+ assert(render);
+
+ /* fill VdpPictureInfoMPEG1Or2 struct */
+ render->info.mpeg.picture_structure = s->picture_structure;
+ render->info.mpeg.picture_coding_type = s->pict_type;
+ render->info.mpeg.intra_dc_precision = s->intra_dc_precision;
+ render->info.mpeg.frame_pred_frame_dct = s->frame_pred_frame_dct;
+ render->info.mpeg.concealment_motion_vectors = s->concealment_motion_vectors;
+ render->info.mpeg.intra_vlc_format = s->intra_vlc_format;
+ render->info.mpeg.alternate_scan = s->alternate_scan;
+ render->info.mpeg.q_scale_type = s->q_scale_type;
+ render->info.mpeg.top_field_first = s->top_field_first;
+ render->info.mpeg.full_pel_forward_vector = s->full_pel[0]; // MPEG-1 only. Set 0 for MPEG-2
+ render->info.mpeg.full_pel_backward_vector = s->full_pel[1]; // MPEG-1 only. Set 0 for MPEG-2
+ render->info.mpeg.f_code[0][0] = s->mpeg_f_code[0][0]; // For MPEG-1 fill both horiz. & vert.
+ render->info.mpeg.f_code[0][1] = s->mpeg_f_code[0][1];
+ render->info.mpeg.f_code[1][0] = s->mpeg_f_code[1][0];
+ render->info.mpeg.f_code[1][1] = s->mpeg_f_code[1][1];
+ for (i = 0; i < 64; ++i) {
+ render->info.mpeg.intra_quantizer_matrix[i] = s->intra_matrix[i];
+ render->info.mpeg.non_intra_quantizer_matrix[i] = s->inter_matrix[i];
+ }
+
+ render->info.mpeg.forward_reference = VDP_INVALID_HANDLE;
+ render->info.mpeg.backward_reference = VDP_INVALID_HANDLE;
+
+ switch(s->pict_type){
+ case AV_PICTURE_TYPE_B:
- last = (struct vdpau_render_state *)s->last_picture.f.data[0];
++ next = (struct vdpau_render_state *)s->next_picture.f->data[0];
+ assert(next);
+ render->info.mpeg.backward_reference = next->surface;
+ // no return here, going to set forward prediction
+ case AV_PICTURE_TYPE_P:
- ff_vdpau_add_data_chunk(s->current_picture_ptr->f.data[0], buf, buf_size);
++ last = (struct vdpau_render_state *)s->last_picture.f->data[0];
+ if (!last) // FIXME: Does this test make sense?
+ last = render; // predict second field from the first
+ render->info.mpeg.forward_reference = last->surface;
+ }
+
- render = (struct vdpau_render_state *)s->current_picture.f.data[0];
++ ff_vdpau_add_data_chunk(s->current_picture_ptr->f->data[0], buf, buf_size);
+
+ render->info.mpeg.slice_count = slice_count;
+
+ if (slice_count)
+ ff_mpeg_draw_horiz_band(s, 0, s->avctx->height);
+ render->bitstream_buffers_used = 0;
+}
+#endif /* CONFIG_MPEG_VDPAU_DECODER || CONFIG_MPEG1_VDPAU_DECODER */
+
+#if CONFIG_VC1_VDPAU_DECODER
+void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf,
+ int buf_size)
+{
+ VC1Context *v = s->avctx->priv_data;
+ struct vdpau_render_state *render, *last, *next;
+
- next = (struct vdpau_render_state *)s->next_picture.f.data[0];
++ render = (struct vdpau_render_state *)s->current_picture.f->data[0];
+ assert(render);
+
+ /* fill LvPictureInfoVC1 struct */
+ render->info.vc1.frame_coding_mode = v->fcm ? v->fcm + 1 : 0;
+ render->info.vc1.postprocflag = v->postprocflag;
+ render->info.vc1.pulldown = v->broadcast;
+ render->info.vc1.interlace = v->interlace;
+ render->info.vc1.tfcntrflag = v->tfcntrflag;
+ render->info.vc1.finterpflag = v->finterpflag;
+ render->info.vc1.psf = v->psf;
+ render->info.vc1.dquant = v->dquant;
+ render->info.vc1.panscan_flag = v->panscanflag;
+ render->info.vc1.refdist_flag = v->refdist_flag;
+ render->info.vc1.quantizer = v->quantizer_mode;
+ render->info.vc1.extended_mv = v->extended_mv;
+ render->info.vc1.extended_dmv = v->extended_dmv;
+ render->info.vc1.overlap = v->overlap;
+ render->info.vc1.vstransform = v->vstransform;
+ render->info.vc1.loopfilter = v->s.loop_filter;
+ render->info.vc1.fastuvmc = v->fastuvmc;
+ render->info.vc1.range_mapy_flag = v->range_mapy_flag;
+ render->info.vc1.range_mapy = v->range_mapy;
+ render->info.vc1.range_mapuv_flag = v->range_mapuv_flag;
+ render->info.vc1.range_mapuv = v->range_mapuv;
+ /* Specific to simple/main profile only */
+ render->info.vc1.multires = v->multires;
+ render->info.vc1.syncmarker = v->resync_marker;
+ render->info.vc1.rangered = v->rangered | (v->rangeredfrm << 1);
+ render->info.vc1.maxbframes = v->s.max_b_frames;
+
+ render->info.vc1.deblockEnable = v->postprocflag & 1;
+ render->info.vc1.pquant = v->pq;
+
+ render->info.vc1.forward_reference = VDP_INVALID_HANDLE;
+ render->info.vc1.backward_reference = VDP_INVALID_HANDLE;
+
+ if (v->bi_type)
+ render->info.vc1.picture_type = 4;
+ else
+ render->info.vc1.picture_type = s->pict_type - 1 + s->pict_type / 3;
+
+ switch(s->pict_type){
+ case AV_PICTURE_TYPE_B:
- last = (struct vdpau_render_state *)s->last_picture.f.data[0];
++ next = (struct vdpau_render_state *)s->next_picture.f->data[0];
+ assert(next);
+ render->info.vc1.backward_reference = next->surface;
+ // no break here, going to set forward prediction
+ case AV_PICTURE_TYPE_P:
- ff_vdpau_add_data_chunk(s->current_picture_ptr->f.data[0], buf, buf_size);
++ last = (struct vdpau_render_state *)s->last_picture.f->data[0];
+ if (!last) // FIXME: Does this test make sense?
+ last = render; // predict second field from the first
+ render->info.vc1.forward_reference = last->surface;
+ }
+
- render = (struct vdpau_render_state *)s->current_picture_ptr->f.data[0];
++ ff_vdpau_add_data_chunk(s->current_picture_ptr->f->data[0], buf, buf_size);
+
+ render->info.vc1.slice_count = 1;
+
+ ff_mpeg_draw_horiz_band(s, 0, s->avctx->height);
+ render->bitstream_buffers_used = 0;
+}
+#endif /* (CONFIG_VC1_VDPAU_DECODER */
+
+#if CONFIG_MPEG4_VDPAU_DECODER
+void ff_vdpau_mpeg4_decode_picture(Mpeg4DecContext *ctx, const uint8_t *buf,
+ int buf_size)
+{
+ MpegEncContext *s = &ctx->m;
+ struct vdpau_render_state *render, *last, *next;
+ int i;
+
+ if (!s->current_picture_ptr) return;
+
- next = (struct vdpau_render_state *)s->next_picture.f.data[0];
++ render = (struct vdpau_render_state *)s->current_picture_ptr->f->data[0];
+ assert(render);
+
+ /* fill VdpPictureInfoMPEG4Part2 struct */
+ render->info.mpeg4.trd[0] = s->pp_time;
+ render->info.mpeg4.trb[0] = s->pb_time;
+ render->info.mpeg4.trd[1] = s->pp_field_time >> 1;
+ render->info.mpeg4.trb[1] = s->pb_field_time >> 1;
+ render->info.mpeg4.vop_time_increment_resolution = s->avctx->time_base.den;
+ render->info.mpeg4.vop_coding_type = 0;
+ render->info.mpeg4.vop_fcode_forward = s->f_code;
+ render->info.mpeg4.vop_fcode_backward = s->b_code;
+ render->info.mpeg4.resync_marker_disable = !ctx->resync_marker;
+ render->info.mpeg4.interlaced = !s->progressive_sequence;
+ render->info.mpeg4.quant_type = s->mpeg_quant;
+ render->info.mpeg4.quarter_sample = s->quarter_sample;
+ render->info.mpeg4.short_video_header = s->avctx->codec->id == AV_CODEC_ID_H263;
+ render->info.mpeg4.rounding_control = s->no_rounding;
+ render->info.mpeg4.alternate_vertical_scan_flag = s->alternate_scan;
+ render->info.mpeg4.top_field_first = s->top_field_first;
+ for (i = 0; i < 64; ++i) {
+ render->info.mpeg4.intra_quantizer_matrix[i] = s->intra_matrix[i];
+ render->info.mpeg4.non_intra_quantizer_matrix[i] = s->inter_matrix[i];
+ }
+ render->info.mpeg4.forward_reference = VDP_INVALID_HANDLE;
+ render->info.mpeg4.backward_reference = VDP_INVALID_HANDLE;
+
+ switch (s->pict_type) {
+ case AV_PICTURE_TYPE_B:
- last = (struct vdpau_render_state *)s->last_picture.f.data[0];
++ next = (struct vdpau_render_state *)s->next_picture.f->data[0];
+ assert(next);
+ render->info.mpeg4.backward_reference = next->surface;
+ render->info.mpeg4.vop_coding_type = 2;
+ // no break here, going to set forward prediction
+ case AV_PICTURE_TYPE_P:
- ff_vdpau_add_data_chunk(s->current_picture_ptr->f.data[0], buf, buf_size);
++ last = (struct vdpau_render_state *)s->last_picture.f->data[0];
+ assert(last);
+ render->info.mpeg4.forward_reference = last->surface;
+ }
+
++ ff_vdpau_add_data_chunk(s->current_picture_ptr->f->data[0], buf, buf_size);
+
+ ff_mpeg_draw_horiz_band(s, 0, s->avctx->height);
+ render->bitstream_buffers_used = 0;
+}
+#endif /* CONFIG_MPEG4_VDPAU_DECODER */
+
int av_vdpau_get_profile(AVCodecContext *avctx, VdpDecoderProfile *profile)
{
#define PROFILE(prof) \
switch (s->pict_type) {
case AV_PICTURE_TYPE_B:
- ref = ff_vdpau_get_surface_id(&s->next_picture.f);
+ if (s->next_picture_ptr) {
+ ref = ff_vdpau_get_surface_id(s->next_picture.f);
assert(ref != VDP_INVALID_HANDLE);
info->backward_reference = ref;
+ }
/* fall-through */
case AV_PICTURE_TYPE_P:
- ref = ff_vdpau_get_surface_id(&s->last_picture.f);
+ if (s->last_picture_ptr) {
+ ref = ff_vdpau_get_surface_id(s->last_picture.f);
assert(ref != VDP_INVALID_HANDLE);
info->forward_reference = ref;
+ }
}
info->slice_count = 0;