/* 248DCT setup */
s->fdct[1] = dsp.fdct248;
s->idct_put[1] = ff_simple_idct248_put; // FIXME: need to add it to DSP
- memcpy(s->dv_zigzag[1], ff_zigzag248_direct, 64);
+ if (avctx->lowres){
+ for (i = 0; i < 64; i++){
+ int j = ff_zigzag248_direct[i];
+ s->dv_zigzag[1][i] = dsp.idct_permutation[(j & 7) + (j & 8) * 4 + (j & 48) / 2];
+ }
+ }else
+ memcpy(s->dv_zigzag[1], ff_zigzag248_direct, 64);
+ avcodec_get_frame_defaults(&s->picture);
avctx->coded_frame = &s->picture;
s->avctx = avctx;
avctx->chroma_sample_location = AVCHROMA_LOC_TOPLEFT;
}
avcodec_get_frame_defaults(&s->frame);
- s->frame.data[0] = NULL;
+ if (avctx->extradata_size >= 4)
+ for (i = 0; i < FFMIN(avctx->extradata_size, AVPALETTE_SIZE)/4; i++)
+ s->pal[i] = 0xFFU<<24 | AV_RL32(avctx->extradata+4*i);
+
return 0;
}
static av_cold int decode_init(AVCodecContext *avctx){
QpegContext * const a = avctx->priv_data;
+ avcodec_get_frame_defaults(&a->pic);
+ avcodec_get_frame_defaults(&a->ref);
a->avctx = avctx;
avctx->pix_fmt= AV_PIX_FMT_PAL8;
- a->refdata = av_malloc(avctx->width * avctx->height);
+
+ decode_flush(avctx);
+ avcodec_get_frame_defaults(&a->pic);
+
return 0;
}
s->blocks = (s->width / 8) * (s->height / 8);
avctx->pix_fmt = AV_PIX_FMT_YUV410P;
avctx->coded_frame = &s->frame;
+ avctx->coded_frame = (AVFrame*) &s->frame;
s->ulti_codebook = ulti_codebook;
+ avcodec_get_frame_defaults(&s->frame);
return 0;
}