--- /dev/null
- avctx->coded_frame = avcodec_alloc_frame();
- if (!avctx->coded_frame)
- return AVERROR(ENOMEM);
-
+/*
+ * 012v decoder
+ *
+ * Copyright (C) 2012 Carl Eugen Hoyos
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avcodec.h"
+#include "internal.h"
+#include "libavutil/intreadwrite.h"
+
+static av_cold int zero12v_decode_init(AVCodecContext *avctx)
+{
+ avctx->pix_fmt = PIX_FMT_YUV422P16;
+ avctx->bits_per_raw_sample = 10;
+
- avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
- avctx->coded_frame->key_frame = 1;
+ if (avctx->codec_tag == MKTAG('a', '1', '2', 'v'))
+ av_log_ask_for_sample(avctx, "Samples with actual transparency needed\n");
+
- AVFrame *pic = avctx->coded_frame;
+ return 0;
+}
+
+static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame, AVPacket *avpkt)
+{
+ int line = 0, ret;
+ const int width = avctx->width;
- if (pic->data[0])
- avctx->release_buffer(avctx, pic);
-
++ AVFrame *pic = data;
+ uint16_t *y, *u, *v;
+ const uint8_t *line_end, *src = avpkt->data;
+ int stride = avctx->width * 8 / 3;
+
- pic->reference = 0;
- if ((ret = ff_get_buffer(avctx, pic)) < 0)
+ if (width == 1) {
+ av_log(avctx, AV_LOG_ERROR, "Width 1 not supported.\n");
+ return AVERROR_INVALIDDATA;
+ }
+ if (avpkt->size < avctx->height * stride) {
+ av_log(avctx, AV_LOG_ERROR, "Packet too small: %d instead of %d\n",
+ avpkt->size, avctx->height * stride);
+ return AVERROR_INVALIDDATA;
+ }
+
- *(AVFrame*)data= *avctx->coded_frame;
++ if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
+ return ret;
+
++ pic->pict_type = AV_PICTURE_TYPE_I;
++ pic->key_frame = 1;
++
+ y = (uint16_t *)pic->data[0];
+ u = (uint16_t *)pic->data[1];
+ v = (uint16_t *)pic->data[2];
+ line_end = avpkt->data + stride;
+
+ while (line++ < avctx->height) {
+ while (1) {
+ uint32_t t = AV_RL32(src);
+ src += 4;
+ *u++ = t << 6 & 0xFFC0;
+ *y++ = t >> 4 & 0xFFC0;
+ *v++ = t >> 14 & 0xFFC0;
+
+ if (src >= line_end - 1) {
+ *y = 0x80;
+ src++;
+ line_end += stride;
+ y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
+ u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
+ v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
+ break;
+ }
+
+ t = AV_RL32(src);
+ src += 4;
+ *y++ = t << 6 & 0xFFC0;
+ *u++ = t >> 4 & 0xFFC0;
+ *y++ = t >> 14 & 0xFFC0;
+ if (src >= line_end - 2) {
+ if (!(width & 1)) {
+ *y = 0x80;
+ src += 2;
+ }
+ line_end += stride;
+ y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
+ u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
+ v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
+ break;
+ }
+
+ t = AV_RL32(src);
+ src += 4;
+ *v++ = t << 6 & 0xFFC0;
+ *y++ = t >> 4 & 0xFFC0;
+ *u++ = t >> 14 & 0xFFC0;
+
+ if (src >= line_end - 1) {
+ *y = 0x80;
+ src++;
+ line_end += stride;
+ y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
+ u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
+ v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
+ break;
+ }
+
+ t = AV_RL32(src);
+ src += 4;
+ *y++ = t << 6 & 0xFFC0;
+ *v++ = t >> 4 & 0xFFC0;
+ *y++ = t >> 14 & 0xFFC0;
+
+ if (src >= line_end - 2) {
+ if (width & 1) {
+ *y = 0x80;
+ src += 2;
+ }
+ line_end += stride;
+ y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
+ u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
+ v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
+ break;
+ }
+ }
+ }
+
+ *got_frame = 1;
- static av_cold int zero12v_decode_close(AVCodecContext *avctx)
- {
- AVFrame *pic = avctx->coded_frame;
- if (pic->data[0])
- avctx->release_buffer(avctx, pic);
- av_freep(&avctx->coded_frame);
-
- return 0;
- }
-
+
+ return avpkt->size;
+}
+
- .close = zero12v_decode_close,
+AVCodec ff_zero12v_decoder = {
+ .name = "012v",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_012V,
+ .init = zero12v_decode_init,
+ .decode = zero12v_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
+ .long_name = NULL_IF_CONFIG_SMALL("Uncompressed 4:2:2 10-bit"),
+};
const int width = f->avctx->width;
const int height = f->avctx->height;
const int mbs = (FFALIGN(width, 16) >> 4) * (FFALIGN(height, 16) >> 4);
- uint16_t *dst = (uint16_t*)f->current_picture->data[0];
- const int stride = f->current_picture->linesize[0]>>1;
+ uint16_t *dst = (uint16_t*)frame->data[0];
+ const int stride = frame->linesize[0]>>1;
+ const uint8_t *buf_end = buf + length;
GetByteContext g3;
if (length < mbs * 8) {
int buf_size = avpkt->size;
FourXContext *const f = avctx->priv_data;
AVFrame *picture = data;
- AVFrame *p;
int i, frame_4cc, frame_size, ret;
+ if (buf_size < 12)
+ return AVERROR_INVALIDDATA;
frame_4cc = AV_RL32(buf);
if (buf_size != AV_RL32(buf + 4) + 8 || buf_size < 20)
av_log(f->avctx, AV_LOG_ERROR, "size mismatch %d %d\n",
frame_size = buf_size - 12;
}
- p = f->current_picture;
- avctx->coded_frame = p;
-
+ FFSWAP(AVFrame*, f->current_picture, f->last_picture);
+
// alternatively we would have to use our own buffer management
avctx->flags |= CODEC_FLAG_EMU_EDGE;
- p->reference= 3;
- if ((ret = avctx->reget_buffer(avctx, p)) < 0) {
- if ((ret = ff_get_buffer(avctx, picture, AV_GET_BUFFER_FLAG_REF)) < 0) {
- av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
++ if ((ret = ff_reget_buffer(avctx, f->current_picture)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
if (frame_4cc == AV_RL32("ifr2")) {
- p->pict_type= AV_PICTURE_TYPE_I;
- if ((ret = decode_i2_frame(f, buf - 4, frame_size + 4)) < 0) {
- picture->pict_type = AV_PICTURE_TYPE_I;
- if ((ret = decode_i2_frame(f, picture, buf - 4, frame_size + 4)) < 0)
++ f->current_picture->pict_type = AV_PICTURE_TYPE_I;
++ if ((ret = decode_i2_frame(f, f->current_picture, buf - 4, frame_size + 4)) < 0) {
+ av_log(f->avctx, AV_LOG_ERROR, "decode i2 frame failed\n");
return ret;
+ }
} else if (frame_4cc == AV_RL32("ifrm")) {
- p->pict_type= AV_PICTURE_TYPE_I;
- if ((ret = decode_i_frame(f, buf, frame_size)) < 0) {
- picture->pict_type = AV_PICTURE_TYPE_I;
- if ((ret = decode_i_frame(f, picture, buf, frame_size)) < 0)
++ f->current_picture->pict_type = AV_PICTURE_TYPE_I;
++ if ((ret = decode_i_frame(f, f->current_picture, buf, frame_size)) < 0) {
+ av_log(f->avctx, AV_LOG_ERROR, "decode i frame failed\n");
return ret;
+ }
} else if (frame_4cc == AV_RL32("pfrm") || frame_4cc == AV_RL32("pfr2")) {
if (!f->last_picture->data[0]) {
- f->last_picture->reference = 3;
- if ((ret = ff_get_buffer(avctx, f->last_picture)) < 0) {
+ if ((ret = ff_get_buffer(avctx, f->last_picture,
+ AV_GET_BUFFER_FLAG_REF)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
- memset(f->last_picture->data[0], 0, avctx->height * FFABS(f->last_picture->linesize[0]));
+ for (i=0; i<avctx->height; i++)
+ memset(f->last_picture->data[0] + i*f->last_picture->linesize[0], 0, 2*avctx->width);
}
- p->pict_type = AV_PICTURE_TYPE_P;
- if ((ret = decode_p_frame(f, buf, frame_size)) < 0) {
- picture->pict_type = AV_PICTURE_TYPE_P;
- if ((ret = decode_p_frame(f, picture, buf, frame_size)) < 0)
++ f->current_picture->pict_type = AV_PICTURE_TYPE_P;
++ if ((ret = decode_p_frame(f, f->current_picture, buf, frame_size)) < 0) {
+ av_log(f->avctx, AV_LOG_ERROR, "decode p frame failed\n");
return ret;
+ }
} else if (frame_4cc == AV_RL32("snd_")) {
av_log(avctx, AV_LOG_ERROR, "ignoring snd_ chunk length:%d\n",
buf_size);
buf_size);
}
- p->key_frame = p->pict_type == AV_PICTURE_TYPE_I;
- picture->key_frame = picture->pict_type == AV_PICTURE_TYPE_I;
++ f->current_picture->key_frame = f->current_picture->pict_type == AV_PICTURE_TYPE_I;
- *picture = *p;
- av_frame_unref(f->last_picture);
- if ((ret = av_frame_ref(f->last_picture, picture)) < 0)
++ if ((ret = av_frame_ref(picture, f->current_picture)) < 0)
+ return ret;
*got_frame = 1;
emms_c();
else
avctx->pix_fmt = AV_PIX_FMT_BGR555;
- f->current_picture = avcodec_alloc_frame();
- f->last_picture = avcodec_alloc_frame();
- if (!f->current_picture || !f->last_picture) {
- avcodec_free_frame(&f->current_picture);
- avcodec_free_frame(&f->last_picture);
- f->last_picture = av_frame_alloc();
- if (!f->last_picture)
++ f->current_picture = av_frame_alloc();
++ f->last_picture = av_frame_alloc();
++ if (!f->current_picture || !f->last_picture)
return AVERROR(ENOMEM);
- }
return 0;
}
f->cfrm[i].allocated_size = 0;
}
ff_free_vlc(&f->pre_vlc);
- if (f->current_picture->data[0])
- avctx->release_buffer(avctx, f->current_picture);
- if (f->last_picture->data[0])
- avctx->release_buffer(avctx, f->last_picture);
- avcodec_free_frame(&f->current_picture);
- avcodec_free_frame(&f->last_picture);
++ av_frame_free(&f->current_picture);
+ av_frame_free(&f->last_picture);
return 0;
}
/* Decode a plane */
for (row = 0; row < height; row++) {
- pixptr = c->pic.data[0] + row * c->pic.linesize[0] + planemap[p];
- pixptr_end = pixptr + c->pic.linesize[0];
+ pixptr = frame->data[0] + row * frame->linesize[0] + planemap[p];
+ pixptr_end = pixptr + frame->linesize[0];
+ if(lp - encoded + row*2 + 1 >= buf_size)
+ return -1;
dlen = av_be2ne16(*(const unsigned short *)(lp + row * 2));
/* Decode a row of this plane */
while (dlen > 0) {
}
/* get output buffer */
- frame->nb_samples = buf_size * (is_compr + 1);
+ frame->nb_samples = buf_size * 2;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
typedef struct AascContext {
AVCodecContext *avctx;
GetByteContext gb;
- AVFrame frame;
+ AVFrame *frame;
+
+ uint32_t palette[AVPALETTE_COUNT];
+ int palette_size;
} AascContext;
static av_cold int aasc_decode_init(AVCodecContext *avctx)
{
AascContext *s = avctx->priv_data;
+ uint8_t *ptr;
+ int i;
s->avctx = avctx;
-
- avctx->pix_fmt = AV_PIX_FMT_BGR24;
+ switch (avctx->bits_per_coded_sample) {
+ case 8:
+ avctx->pix_fmt = AV_PIX_FMT_PAL8;
+
+ ptr = avctx->extradata;
+ s->palette_size = FFMIN(avctx->extradata_size, AVPALETTE_SIZE);
+ for (i = 0; i < s->palette_size / 4; i++) {
+ s->palette[i] = 0xFFU << 24 | AV_RL32(ptr);
+ ptr += 4;
+ }
+ break;
+ case 16:
+ avctx->pix_fmt = AV_PIX_FMT_RGB555LE;
+ break;
+ case 24:
+ avctx->pix_fmt = AV_PIX_FMT_BGR24;
+ break;
+ default:
+ av_log(avctx, AV_LOG_ERROR, "Unsupported bit depth: %d\n", avctx->bits_per_coded_sample);
+ return -1;
+ }
- avcodec_get_frame_defaults(&s->frame);
+
+ s->frame = av_frame_alloc();
+ if (!s->frame)
+ return AVERROR(ENOMEM);
return 0;
}
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
AascContext *s = avctx->priv_data;
- int compr, i, stride, ret;
+ int compr, i, stride, psize, ret;
+
+ if (buf_size < 4) {
+ av_log(avctx, AV_LOG_ERROR, "frame too short\n");
+ return AVERROR_INVALIDDATA;
+ }
- s->frame.reference = 3;
- s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
- if ((ret = avctx->reget_buffer(avctx, &s->frame)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
compr = AV_RL32(buf);
buf += 4;
buf_size -= 4;
+ psize = avctx->bits_per_coded_sample / 8;
+ switch (avctx->codec_tag) {
+ case MKTAG('A', 'A', 'S', '4'):
+ bytestream2_init(&s->gb, buf - 4, buf_size + 4);
+ ff_msrle_decode(avctx, (AVPicture*)&s->frame, 8, &s->gb);
+ break;
+ case MKTAG('A', 'A', 'S', 'C'):
switch (compr) {
case 0:
- stride = (avctx->width * 3 + 3) & ~3;
+ stride = (avctx->width * psize + psize) & ~psize;
for (i = avctx->height - 1; i >= 0; i--) {
- memcpy(s->frame->data[0] + i * s->frame->linesize[0], buf, avctx->width * 3);
+ if (avctx->width * psize > buf_size) {
+ av_log(avctx, AV_LOG_ERROR, "Next line is beyond buffer bounds\n");
+ break;
+ }
- memcpy(s->frame.data[0] + i*s->frame.linesize[0], buf, avctx->width * psize);
++ memcpy(s->frame->data[0] + i * s->frame->linesize[0], buf, avctx->width * psize);
buf += stride;
+ buf_size -= stride;
}
break;
case 1:
av_log(avctx, AV_LOG_ERROR, "Unknown compression type %d\n", compr);
return AVERROR_INVALIDDATA;
}
- memcpy(s->frame.data[1], s->palette, s->palette_size);
+ break;
+ default:
+ av_log(avctx, AV_LOG_ERROR, "Unknown FourCC: %X\n", avctx->codec_tag);
+ return -1;
+ }
+
+ if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
++ memcpy(s->frame->data[1], s->palette, s->palette_size);
*got_frame = 1;
- *(AVFrame*)data = s->frame;
+ if ((ret = av_frame_ref(data, s->frame)) < 0)
+ return ret;
/* report that the buffer was completely consumed */
return buf_size;
/* get output buffer */
frame->nb_samples = ctx->cur_frame_length;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
- av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed.\n");
return ret;
}
*/
#include "libavutil/common.h"
+ #include "libavutil/frame.h"
#include "libavutil/lfg.h"
+#include "libavutil/xga_font_data.h"
#include "avcodec.h"
#include "cga_data.h"
#include "internal.h"
AnsiContext *s = avctx->priv_data;
avctx->pix_fmt = AV_PIX_FMT_PAL8;
+ s->frame = av_frame_alloc();
+ if (!s->frame)
+ return AVERROR(ENOMEM);
+
/* defaults */
- s->font = ff_vga16_font;
+ s->font = avpriv_vga16_font;
s->font_height = 16;
s->fg = DEFAULT_FG_COLOR;
s->bg = DEFAULT_BG_COLOR;
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
- s->frame.pict_type = AV_PICTURE_TYPE_I;
- s->frame.palette_has_changed = 1;
- set_palette((uint32_t *)s->frame.data[1]);
+ s->frame->pict_type = AV_PICTURE_TYPE_I;
+ s->frame->palette_has_changed = 1;
- memcpy(s->frame->data[1], ff_cga_palette, 16 * 4);
++ set_palette((uint32_t *)s->frame->data[1]);
erase_screen(avctx);
} else if (c == 'l') {
erase_screen(avctx);
return ret;
}
if (!avctx->frame_number) {
- memset(s->frame->data[0], 0, avctx->height * FFABS(s->frame->linesize[0]));
+ for (i=0; i<avctx->height; i++)
- memset(s->frame.data[0]+ i*s->frame.linesize[0], 0, avctx->width);
- memset(s->frame.data[1], 0, AVPALETTE_SIZE);
++ memset(s->frame->data[0]+ i*s->frame->linesize[0], 0, avctx->width);
+ memset(s->frame->data[1], 0, AVPALETTE_SIZE);
}
- s->frame.pict_type = AV_PICTURE_TYPE_I;
- s->frame.palette_has_changed = 1;
- set_palette((uint32_t *)s->frame.data[1]);
+ s->frame->pict_type = AV_PICTURE_TYPE_I;
+ s->frame->palette_has_changed = 1;
- memcpy(s->frame->data[1], ff_cga_palette, 16 * 4);
++ set_palette((uint32_t *)s->frame->data[1]);
+ if (!s->first_frame) {
+ erase_screen(avctx);
+ s->first_frame = 1;
+ }
while(buf < buf_end) {
switch(s->state) {
--- /dev/null
- AVFrame frame;
+/*
+ * AVRn decoder
+ * Copyright (c) 2012 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avcodec.h"
+#include "internal.h"
+#include "mjpeg.h"
+#include "mjpegdec.h"
+#include "libavutil/imgutils.h"
+
+typedef struct {
+ MJpegDecodeContext mjpeg_ctx;
- avcodec_get_frame_defaults(&a->frame);
+ int is_mjpeg;
+ int interlace; //FIXME use frame.interlaced_frame
+ int tff;
+} AVRnContext;
+
+static av_cold int init(AVCodecContext *avctx)
+{
+ AVRnContext *a = avctx->priv_data;
+ int ret;
+
+ // Support "Resolution 1:1" for Avid AVI Codec
+ a->is_mjpeg = avctx->extradata_size < 31 || memcmp(&avctx->extradata[28], "1:1", 3);
+
+ if(!a->is_mjpeg && avctx->lowres) {
+ av_log(avctx, AV_LOG_ERROR, "lowres is not possible with rawvideo\n");
+ return AVERROR(EINVAL);
+ }
+
+ if(a->is_mjpeg)
+ return ff_mjpeg_decode_init(avctx);
+
+ if ((ret = av_image_check_size(avctx->width, avctx->height, 0, avctx)) < 0)
+ return ret;
+
- AVFrame *p = &a->frame;
-
- if(p->data[0])
- avctx->release_buffer(avctx, p);
+ avctx->pix_fmt = AV_PIX_FMT_UYVY422;
+
+ if(avctx->extradata_size >= 9 && avctx->extradata[4]+28 < avctx->extradata_size) {
+ int ndx = avctx->extradata[4] + 4;
+ a->interlace = !memcmp(avctx->extradata + ndx, "1:1(", 4);
+ if(a->interlace) {
+ a->tff = avctx->extradata[ndx + 24] == 1;
+ }
+ }
+
+ return 0;
+}
+
+static av_cold int end(AVCodecContext *avctx)
+{
+ AVRnContext *a = avctx->priv_data;
- AVFrame *p = &a->frame;
+
+ if(a->is_mjpeg)
+ ff_mjpeg_decode_end(avctx);
+
+ return 0;
+}
+
+static int decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame, AVPacket *avpkt)
+{
+ AVRnContext *a = avctx->priv_data;
- if(p->data[0])
- avctx->release_buffer(avctx, p);
++ AVFrame *p = data;
+ const uint8_t *buf = avpkt->data;
+ int buf_size = avpkt->size;
+ int y, ret, true_height;
+
+ if(a->is_mjpeg)
+ return ff_mjpeg_decode_frame(avctx, data, got_frame, avpkt);
+
+ true_height = buf_size / (2*avctx->width);
- if((ret = ff_get_buffer(avctx, p)) < 0){
+
+ if(buf_size < 2*avctx->width * avctx->height) {
+ av_log(avctx, AV_LOG_ERROR, "packet too small\n");
+ return AVERROR_INVALIDDATA;
+ }
+
- *(AVFrame*)data = a->frame;
++ if((ret = ff_get_buffer(avctx, p, 0)) < 0){
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
+ }
+ p->pict_type= AV_PICTURE_TYPE_I;
+ p->key_frame= 1;
+
+ if(a->interlace) {
+ buf += (true_height - avctx->height)*avctx->width;
+ for(y = 0; y < avctx->height-1; y+=2) {
+ memcpy(p->data[0] + (y+ a->tff)*p->linesize[0], buf , 2*avctx->width);
+ memcpy(p->data[0] + (y+!a->tff)*p->linesize[0], buf + avctx->width*true_height+4, 2*avctx->width);
+ buf += 2*avctx->width;
+ }
+ } else {
+ buf += (true_height - avctx->height)*avctx->width*2;
+ for(y = 0; y < avctx->height; y++) {
+ memcpy(p->data[0] + y*p->linesize[0], buf, 2*avctx->width);
+ buf += 2*avctx->width;
+ }
+ }
+
+ *got_frame = 1;
+ return buf_size;
+}
+
+AVCodec ff_avrn_decoder = {
+ .name = "avrn",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_AVRN,
+ .priv_data_size = sizeof(AVRnContext),
+ .init = init,
+ .close = end,
+ .decode = decode_frame,
+ .long_name = NULL_IF_CONFIG_SMALL("Avid AVI Codec"),
+ .capabilities = CODEC_CAP_DR1,
+ .max_lowres = 3,
+};
+
int i, j, x, y, stride, ret, vect_w = 3, vect_h = 3;
AvsVideoSubType sub_type;
AvsBlockType type;
- GetBitContext change_map;
+ GetBitContext change_map = {0}; //init to silence warning
- if ((ret = avctx->reget_buffer(avctx, p)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, p)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
--- /dev/null
-
- avctx->coded_frame = avcodec_alloc_frame();
-
- if (!avctx->coded_frame) {
- av_log(avctx, AV_LOG_ERROR, "Could not allocate frame.\n");
- return AVERROR(ENOMEM);
- }
-
+/*
+ * AVID Meridien decoder
+ *
+ * Copyright (c) 2012 Carl Eugen Hoyos
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avcodec.h"
+#include "internal.h"
+#include "libavutil/intreadwrite.h"
+
+static av_cold int avui_decode_init(AVCodecContext *avctx)
+{
+ avctx->pix_fmt = AV_PIX_FMT_YUVA422P;
- AVFrame *pic = avctx->coded_frame;
+ return 0;
+}
+
+static int avui_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame, AVPacket *avpkt)
+{
- if (pic->data[0])
- avctx->release_buffer(avctx, pic);
-
++ AVFrame *pic = data;
+ const uint8_t *src = avpkt->data, *extradata = avctx->extradata;
+ const uint8_t *srca;
+ uint8_t *y, *u, *v, *a;
+ int transparent, interlaced = 1, skip, opaque_length, i, j, k;
+ uint32_t extradata_size = avctx->extradata_size;
+
- pic->reference = 0;
-
- if (ff_get_buffer(avctx, pic) < 0) {
+ while (extradata_size >= 24) {
+ uint32_t atom_size = AV_RB32(extradata);
+ if (!memcmp(&extradata[4], "APRGAPRG0001", 12)) {
+ interlaced = extradata[19] != 1;
+ break;
+ }
+ if (atom_size && atom_size <= extradata_size) {
+ extradata += atom_size;
+ extradata_size -= atom_size;
+ } else {
+ break;
+ }
+ }
+ if (avctx->height == 486) {
+ skip = 10;
+ } else {
+ skip = 16;
+ }
+ opaque_length = 2 * avctx->width * (avctx->height + skip) + 4 * interlaced;
+ if (avpkt->size < opaque_length) {
+ av_log(avctx, AV_LOG_ERROR, "Insufficient input data.\n");
+ return AVERROR(EINVAL);
+ }
+ transparent = avctx->bits_per_coded_sample == 32 &&
+ avpkt->size >= opaque_length * 2 + 4;
+ srca = src + opaque_length + 5;
+
- *(AVFrame *)data = *pic;
++ if (ff_get_buffer(avctx, pic, 0) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Could not allocate buffer.\n");
+ return AVERROR(ENOMEM);
+ }
+
+ pic->key_frame = 1;
+ pic->pict_type = AV_PICTURE_TYPE_I;
+
+ if (!interlaced) {
+ src += avctx->width * skip;
+ srca += avctx->width * skip;
+ }
+
+ for (i = 0; i < interlaced + 1; i++) {
+ src += avctx->width * skip;
+ srca += avctx->width * skip;
+ if (interlaced && avctx->height == 486) {
+ y = pic->data[0] + (1 - i) * pic->linesize[0];
+ u = pic->data[1] + (1 - i) * pic->linesize[1];
+ v = pic->data[2] + (1 - i) * pic->linesize[2];
+ a = pic->data[3] + (1 - i) * pic->linesize[3];
+ } else {
+ y = pic->data[0] + i * pic->linesize[0];
+ u = pic->data[1] + i * pic->linesize[1];
+ v = pic->data[2] + i * pic->linesize[2];
+ a = pic->data[3] + i * pic->linesize[3];
+ }
+
+ for (j = 0; j < avctx->height >> interlaced; j++) {
+ for (k = 0; k < avctx->width >> 1; k++) {
+ u[ k ] = *src++;
+ y[2 * k ] = *src++;
+ a[2 * k ] = 0xFF - (transparent ? *srca++ : 0);
+ srca++;
+ v[ k ] = *src++;
+ y[2 * k + 1] = *src++;
+ a[2 * k + 1] = 0xFF - (transparent ? *srca++ : 0);
+ srca++;
+ }
+
+ y += (interlaced + 1) * pic->linesize[0];
+ u += (interlaced + 1) * pic->linesize[1];
+ v += (interlaced + 1) * pic->linesize[2];
+ a += (interlaced + 1) * pic->linesize[3];
+ }
+ src += 4;
+ srca += 4;
+ }
+ *got_frame = 1;
- static av_cold int avui_decode_close(AVCodecContext *avctx)
- {
- if (avctx->coded_frame->data[0])
- avctx->release_buffer(avctx, avctx->coded_frame);
-
- av_freep(&avctx->coded_frame);
-
- return 0;
- }
-
+
+ return avpkt->size;
+}
+
- .close = avui_decode_close,
+AVCodec ff_avui_decoder = {
+ .name = "avui",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_AVUI,
+ .init = avui_decode_init,
+ .decode = avui_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
+ .long_name = NULL_IF_CONFIG_SMALL("Avid Meridien Uncompressed"),
+};
static av_cold int bethsoftvid_decode_init(AVCodecContext *avctx)
{
BethsoftvidContext *vid = avctx->priv_data;
- vid->frame.reference = 3;
- vid->frame.buffer_hints = FF_BUFFER_HINTS_VALID |
- FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
+ avcodec_get_frame_defaults(&vid->frame);
avctx->pix_fmt = AV_PIX_FMT_PAL8;
return 0;
}
typedef struct BFIContext {
AVCodecContext *avctx;
- AVFrame frame;
uint8_t *dst;
+ uint32_t pal[256];
} BFIContext;
static av_cold int bfi_decode_init(AVCodecContext *avctx)
av_log(NULL, AV_LOG_ERROR, "Palette is too large.\n");
return AVERROR_INVALIDDATA;
}
- pal = (uint32_t *)bfi->frame.data[1];
+ pal = (uint32_t *)frame->data[1];
for (i = 0; i < avctx->extradata_size / 3; i++) {
int shift = 16;
- *pal = 0;
+ *pal = 0xFFU << 24;
for (j = 0; j < 3; j++, shift -= 8)
*pal += ((avctx->extradata[i * 3 + j] << 2) |
(avctx->extradata[i * 3 + j] >> 4)) << shift;
pal++;
}
- memcpy(bfi->pal, bfi->frame.data[1], sizeof(bfi->pal));
- bfi->frame.palette_has_changed = 1;
++ memcpy(bfi->pal, frame->data[1], sizeof(bfi->pal));
+ frame->palette_has_changed = 1;
} else {
- bfi->frame.pict_type = AV_PICTURE_TYPE_P;
- bfi->frame.key_frame = 0;
- bfi->frame.palette_has_changed = 0;
- memcpy(bfi->frame.data[1], bfi->pal, sizeof(bfi->pal));
+ frame->pict_type = AV_PICTURE_TYPE_P;
+ frame->key_frame = 0;
++ frame->palette_has_changed = 0;
++ memcpy(frame->data[1], bfi->pal, sizeof(bfi->pal));
}
bytestream2_skip(&g, 4); // Unpacked size, not required.
--- /dev/null
- AVFrame frame;
+/*
+ * Binary text decoder
+ * eXtended BINary text (XBIN) decoder
+ * iCEDraw File decoder
+ * Copyright (c) 2010 Peter Ross (pross@xvid.org)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Binary text decoder
+ * eXtended BINary text (XBIN) decoder
+ * iCEDraw File decoder
+ */
+
+#include "libavutil/intreadwrite.h"
+#include "libavutil/xga_font_data.h"
+#include "avcodec.h"
+#include "cga_data.h"
+#include "bintext.h"
++#include "internal.h"
+
+typedef struct XbinContext {
- memmove(s->frame.data[0], s->frame.data[0] + s->font_height*s->frame.linesize[0],
- (avctx->height - s->font_height)*s->frame.linesize[0]);
- memset(s->frame.data[0] + (avctx->height - s->font_height)*s->frame.linesize[0],
- DEFAULT_BG_COLOR, s->font_height * s->frame.linesize[0]);
++ AVFrame *frame;
+ int palette[16];
+ int flags;
+ int font_height;
+ const uint8_t *font;
+ int x, y;
+} XbinContext;
+
+static av_cold int decode_init(AVCodecContext *avctx)
+{
+ XbinContext *s = avctx->priv_data;
+ uint8_t *p;
+ int i;
+
+ avctx->pix_fmt = AV_PIX_FMT_PAL8;
+ p = avctx->extradata;
+ if (p) {
+ s->font_height = p[0];
+ s->flags = p[1];
+ p += 2;
+ if(avctx->extradata_size < 2 + (!!(s->flags & BINTEXT_PALETTE))*3*16
+ + (!!(s->flags & BINTEXT_FONT))*s->font_height*256) {
+ av_log(avctx, AV_LOG_ERROR, "not enough extradata\n");
+ return AVERROR_INVALIDDATA;
+ }
+ } else {
+ s->font_height = 8;
+ s->flags = 0;
+ }
+
+ if ((s->flags & BINTEXT_PALETTE)) {
+ for (i = 0; i < 16; i++) {
+ s->palette[i] = 0xFF000000 | (AV_RB24(p) << 2) | ((AV_RB24(p) >> 4) & 0x30303);
+ p += 3;
+ }
+ } else {
+ for (i = 0; i < 16; i++)
+ s->palette[i] = 0xFF000000 | ff_cga_palette[i];
+ }
+
+ if ((s->flags & BINTEXT_FONT)) {
+ s->font = p;
+ } else {
+ switch(s->font_height) {
+ default:
+ av_log(avctx, AV_LOG_WARNING, "font height %i not supported\n", s->font_height);
+ s->font_height = 8;
+ case 8:
+ s->font = avpriv_cga_font;
+ break;
+ case 16:
+ s->font = avpriv_vga16_font;
+ break;
+ }
+ }
+
++ s->frame = av_frame_alloc();
++ if (!s->frame)
++ return AVERROR(ENOMEM);
++
+ return 0;
+}
+
+#define DEFAULT_BG_COLOR 0
+av_unused static void hscroll(AVCodecContext *avctx)
+{
+ XbinContext *s = avctx->priv_data;
+ if (s->y < avctx->height - s->font_height) {
+ s->y += s->font_height;
+ } else {
- ff_draw_pc_font(s->frame.data[0] + s->y * s->frame.linesize[0] + s->x,
- s->frame.linesize[0], s->font, s->font_height, c,
++ memmove(s->frame->data[0], s->frame->data[0] + s->font_height*s->frame->linesize[0],
++ (avctx->height - s->font_height)*s->frame->linesize[0]);
++ memset(s->frame->data[0] + (avctx->height - s->font_height)*s->frame->linesize[0],
++ DEFAULT_BG_COLOR, s->font_height * s->frame->linesize[0]);
+ }
+}
+
+#define FONT_WIDTH 8
+
+/**
+ * Draw character to screen
+ */
+static void draw_char(AVCodecContext *avctx, int c, int a)
+{
+ XbinContext *s = avctx->priv_data;
+ if (s->y > avctx->height - s->font_height)
+ return;
- s->frame.buffer_hints = FF_BUFFER_HINTS_VALID |
- FF_BUFFER_HINTS_PRESERVE |
- FF_BUFFER_HINTS_REUSABLE;
- if (avctx->reget_buffer(avctx, &s->frame)) {
++ ff_draw_pc_font(s->frame->data[0] + s->y * s->frame->linesize[0] + s->x,
++ s->frame->linesize[0], s->font, s->font_height, c,
+ a & 0x0F, a >> 4);
+ s->x += FONT_WIDTH;
+ if (s->x > avctx->width - FONT_WIDTH) {
+ s->x = 0;
+ s->y += s->font_height;
+ }
+}
+
+static int decode_frame(AVCodecContext *avctx,
+ void *data, int *got_frame,
+ AVPacket *avpkt)
+{
+ XbinContext *s = avctx->priv_data;
+ const uint8_t *buf = avpkt->data;
+ int buf_size = avpkt->size;
+ const uint8_t *buf_end = buf+buf_size;
++ int ret;
+
+ s->x = s->y = 0;
- s->frame.pict_type = AV_PICTURE_TYPE_I;
- s->frame.palette_has_changed = 1;
- memcpy(s->frame.data[1], s->palette, 16 * 4);
++ if (ff_reget_buffer(avctx, s->frame) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return -1;
+ }
- *(AVFrame*)data = s->frame;
++ s->frame->pict_type = AV_PICTURE_TYPE_I;
++ s->frame->palette_has_changed = 1;
++ memcpy(s->frame->data[1], s->palette, 16 * 4);
+
+ if (avctx->codec_id == AV_CODEC_ID_XBIN) {
+ while (buf + 2 < buf_end) {
+ int i,c,a;
+ int type = *buf >> 6;
+ int count = (*buf & 0x3F) + 1;
+ buf++;
+ switch (type) {
+ case 0: //no compression
+ for (i = 0; i < count && buf + 1 < buf_end; i++) {
+ draw_char(avctx, buf[0], buf[1]);
+ buf += 2;
+ }
+ break;
+ case 1: //character compression
+ c = *buf++;
+ for (i = 0; i < count && buf < buf_end; i++)
+ draw_char(avctx, c, *buf++);
+ break;
+ case 2: //attribute compression
+ a = *buf++;
+ for (i = 0; i < count && buf < buf_end; i++)
+ draw_char(avctx, *buf++, a);
+ break;
+ case 3: //character/attribute compression
+ c = *buf++;
+ a = *buf++;
+ for (i = 0; i < count && buf < buf_end; i++)
+ draw_char(avctx, c, a);
+ break;
+ }
+ }
+ } else if (avctx->codec_id == AV_CODEC_ID_IDF) {
+ while (buf + 2 < buf_end) {
+ if (AV_RL16(buf) == 1) {
+ int i;
+ if (buf + 6 > buf_end)
+ break;
+ for (i = 0; i < buf[2]; i++)
+ draw_char(avctx, buf[4], buf[5]);
+ buf += 6;
+ } else {
+ draw_char(avctx, buf[0], buf[1]);
+ buf += 2;
+ }
+ }
+ } else {
+ while (buf + 1 < buf_end) {
+ draw_char(avctx, buf[0], buf[1]);
+ buf += 2;
+ }
+ }
+
++ if ((ret = av_frame_ref(data, s->frame)) < 0)
++ return ret;
+ *got_frame = 1;
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+ return buf_size;
+}
+
+static av_cold int decode_end(AVCodecContext *avctx)
+{
+ XbinContext *s = avctx->priv_data;
+
++ av_frame_free(&s->frame);
+
+ return 0;
+}
+
+#if CONFIG_BINTEXT_DECODER
+AVCodec ff_bintext_decoder = {
+ .name = "bintext",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_BINTEXT,
+ .priv_data_size = sizeof(XbinContext),
+ .init = decode_init,
+ .close = decode_end,
+ .decode = decode_frame,
+ .capabilities = CODEC_CAP_DR1,
+ .long_name = NULL_IF_CONFIG_SMALL("Binary text"),
+};
+#endif
+#if CONFIG_XBIN_DECODER
+AVCodec ff_xbin_decoder = {
+ .name = "xbin",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_XBIN,
+ .priv_data_size = sizeof(XbinContext),
+ .init = decode_init,
+ .close = decode_end,
+ .decode = decode_frame,
+ .capabilities = CODEC_CAP_DR1,
+ .long_name = NULL_IF_CONFIG_SMALL("eXtended BINary text"),
+};
+#endif
+#if CONFIG_IDF_DECODER
+AVCodec ff_idf_decoder = {
+ .name = "idf",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_IDF,
+ .priv_data_size = sizeof(XbinContext),
+ .init = decode_init,
+ .close = decode_end,
+ .decode = decode_frame,
+ .capabilities = CODEC_CAP_DR1,
+ .long_name = NULL_IF_CONFIG_SMALL("iCEDraw text"),
+};
+#endif
--- /dev/null
- typedef struct BRPixContext {
- AVFrame frame;
- } BRPixContext;
-
+/*
+ * BRender PIX (.pix) image decoder
+ * Copyright (c) 2012 Aleksi Nurmi
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * Tested against samples from I-War / Independence War and Defiance.
+ * If the PIX file does not contain a palette, the
+ * palette_has_changed property of the AVFrame is set to 0.
+ */
+
+#include "libavutil/imgutils.h"
+#include "avcodec.h"
+#include "bytestream.h"
+#include "internal.h"
+
- static av_cold int brpix_init(AVCodecContext *avctx)
- {
- BRPixContext *s = avctx->priv_data;
-
- avcodec_get_frame_defaults(&s->frame);
- avctx->coded_frame = &s->frame;
-
- return 0;
- }
-
+typedef struct BRPixHeader {
+ int format;
+ unsigned int width, height;
+} BRPixHeader;
+
- BRPixContext *s = avctx->priv_data;
- AVFrame *frame_out = data;
+static int brpix_decode_header(BRPixHeader *out, GetByteContext *pgb)
+{
+ unsigned int header_len = bytestream2_get_be32(pgb);
+
+ out->format = bytestream2_get_byte(pgb);
+ bytestream2_skip(pgb, 2);
+ out->width = bytestream2_get_be16(pgb);
+ out->height = bytestream2_get_be16(pgb);
+
+ // the header is at least 11 bytes long; we read the first 7
+ if (header_len < 11) {
+ return 0;
+ }
+
+ // skip the rest of the header
+ bytestream2_skip(pgb, header_len-7);
+
+ return 1;
+}
+
+static int brpix_decode_frame(AVCodecContext *avctx,
+ void *data, int *got_frame,
+ AVPacket *avpkt)
+{
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
-
++ AVFrame *frame = data;
+
+ int ret;
+ GetByteContext gb;
+
+ unsigned int bytes_pp;
+
+ unsigned int magic[4];
+ unsigned int chunk_type;
+ unsigned int data_len;
+ BRPixHeader hdr;
+
+ bytestream2_init(&gb, avpkt->data, avpkt->size);
+
+ magic[0] = bytestream2_get_be32(&gb);
+ magic[1] = bytestream2_get_be32(&gb);
+ magic[2] = bytestream2_get_be32(&gb);
+ magic[3] = bytestream2_get_be32(&gb);
+
+ if (magic[0] != 0x12 ||
+ magic[1] != 0x8 ||
+ magic[2] != 0x2 ||
+ magic[3] != 0x2) {
+ av_log(avctx, AV_LOG_ERROR, "Not a BRender PIX file\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ chunk_type = bytestream2_get_be32(&gb);
+ if (chunk_type != 0x3 && chunk_type != 0x3d) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid chunk type %d\n", chunk_type);
+ return AVERROR_INVALIDDATA;
+ }
+
+ ret = brpix_decode_header(&hdr, &gb);
+ if (!ret) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid header length\n");
+ return AVERROR_INVALIDDATA;
+ }
+ switch (hdr.format) {
+ case 3:
+ avctx->pix_fmt = AV_PIX_FMT_PAL8;
+ bytes_pp = 1;
+ break;
+ case 4:
+ avctx->pix_fmt = AV_PIX_FMT_RGB555BE;
+ bytes_pp = 2;
+ break;
+ case 5:
+ avctx->pix_fmt = AV_PIX_FMT_RGB565BE;
+ bytes_pp = 2;
+ break;
+ case 6:
+ avctx->pix_fmt = AV_PIX_FMT_RGB24;
+ bytes_pp = 3;
+ break;
+ case 7:
+ avctx->pix_fmt = AV_PIX_FMT_0RGB;
+ bytes_pp = 4;
+ break;
+ case 18:
+ avctx->pix_fmt = AV_PIX_FMT_GRAY8A;
+ bytes_pp = 2;
+ break;
+ default:
+ av_log(avctx, AV_LOG_ERROR, "Format %d is not supported\n",
+ hdr.format);
+ return AVERROR_PATCHWELCOME;
+ }
+
- if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
+ if (av_image_check_size(hdr.width, hdr.height, 0, avctx) < 0)
+ return AVERROR_INVALIDDATA;
+
+ if (hdr.width != avctx->width || hdr.height != avctx->height)
+ avcodec_set_dimensions(avctx, hdr.width, hdr.height);
+
- uint32_t *pal_out = (uint32_t *)s->frame.data[1];
++ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
+ }
+
+ chunk_type = bytestream2_get_be32(&gb);
+
+ if (avctx->pix_fmt == AV_PIX_FMT_PAL8 &&
+ (chunk_type == 0x3 || chunk_type == 0x3d)) {
+ BRPixHeader palhdr;
- s->frame.palette_has_changed = 1;
++ uint32_t *pal_out = (uint32_t *)frame->data[1];
+ int i;
+
+ ret = brpix_decode_header(&palhdr, &gb);
+ if (!ret) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid palette header length\n");
+ return AVERROR_INVALIDDATA;
+ }
+ if (palhdr.format != 7) {
+ av_log(avctx, AV_LOG_ERROR, "Palette is not in 0RGB format\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ chunk_type = bytestream2_get_be32(&gb);
+ data_len = bytestream2_get_be32(&gb);
+ bytestream2_skip(&gb, 8);
+ if (chunk_type != 0x21 || data_len != 1032 ||
+ bytestream2_get_bytes_left(&gb) < 1032) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid palette data\n");
+ return AVERROR_INVALIDDATA;
+ }
+ // convert 0RGB to machine endian format (ARGB32)
+ for (i = 0; i < 256; ++i) {
+ bytestream2_skipu(&gb, 1);
+ *pal_out++ = (0xFFU << 24) | bytestream2_get_be24u(&gb);
+ }
+ bytestream2_skip(&gb, 8);
+
- uint32_t *pal_out = (uint32_t *)s->frame.data[1];
++ frame->palette_has_changed = 1;
+
+ chunk_type = bytestream2_get_be32(&gb);
+ } else if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
- s->frame.palette_has_changed = 1;
++ uint32_t *pal_out = (uint32_t *)frame->data[1];
+ int i;
+
+ for (i = 0; i < 256; ++i) {
+ *pal_out++ = (0xFFU << 24) | (i * 0x010101);
+ }
- av_image_copy_plane(s->frame.data[0], s->frame.linesize[0],
++ frame->palette_has_changed = 1;
+ }
+
+ data_len = bytestream2_get_be32(&gb);
+ bytestream2_skip(&gb, 8);
+
+ // read the image data to the buffer
+ {
+ unsigned int bytes_per_scanline = bytes_pp * hdr.width;
+ unsigned int bytes_left = bytestream2_get_bytes_left(&gb);
+
+ if (chunk_type != 0x21 || data_len != bytes_left ||
+ bytes_left / bytes_per_scanline < hdr.height)
+ {
+ av_log(avctx, AV_LOG_ERROR, "Invalid image data\n");
+ return AVERROR_INVALIDDATA;
+ }
+
- *frame_out = s->frame;
++ av_image_copy_plane(frame->data[0], frame->linesize[0],
+ avpkt->data + bytestream2_tell(&gb),
+ bytes_per_scanline,
+ bytes_per_scanline, hdr.height);
+ }
+
- static av_cold int brpix_end(AVCodecContext *avctx)
- {
- BRPixContext *s = avctx->priv_data;
-
- if(s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
-
- return 0;
- }
-
+ *got_frame = 1;
+
+ return avpkt->size;
+}
+
- .priv_data_size = sizeof(BRPixContext),
- .init = brpix_init,
- .close = brpix_end,
+AVCodec ff_brender_pix_decoder = {
+ .name = "brender_pix",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_BRENDER_PIX,
+ .decode = brpix_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
+ .long_name = NULL_IF_CONFIG_SMALL("BRender PIX image"),
+};
static int decode_pic(AVSContext *h)
{
int skip_count = -1;
+ int ret;
enum cavs_mb mb_type;
+ av_frame_unref(h->cur.f);
+
skip_bits(&h->gb, 16);//bbv_dwlay
if (h->stc == PIC_PB_START_CODE) {
h->cur.f->pict_type = get_bits(&h->gb, 2) + AV_PICTURE_TYPE_I;
if (h->stream_revision > 0)
skip_bits(&h->gb, 1); //marker_bit
}
- /* release last B frame */
- if (h->cur.f->data[0])
- h->avctx->release_buffer(h->avctx, h->cur.f);
- if ((ret = ff_get_buffer(h->avctx, h->cur.f)) < 0)
- ff_get_buffer(h->avctx, h->cur.f, h->cur.f->pict_type == AV_PICTURE_TYPE_B ?
- 0 : AV_GET_BUFFER_FLAG_REF);
++ if ((ret = ff_get_buffer(h->avctx, h->cur.f,
++ h->cur.f->pict_type == AV_PICTURE_TYPE_B ?
++ 0 : AV_GET_BUFFER_FLAG_REF)) < 0)
+ return ret;
if (!h->edge_emu_buffer) {
int alloc_size = FFALIGN(FFABS(h->cur.f->linesize[0]) + 32, 32);
r = ((color >> 8) & 0x000F) * 17;
g = ((color >> 4) & 0x000F) * 17;
b = ((color ) & 0x000F) * 17;
- palette[i + array_offset] = r << 16 | g << 8 | b;
+ palette[i + array_offset] = 0xFFU << 24 | r << 16 | g << 8 | b;
}
- cc->frame.palette_has_changed = 1;
+ cc->frame->palette_has_changed = 1;
}
static int cdg_tile_block(CDGraphicsContext *cc, uint8_t *data, int b)
av_log(avctx, AV_LOG_ERROR, "buffer too small for decoder\n");
return AVERROR(EINVAL);
}
+ if (buf_size > CDG_HEADER_SIZE + CDG_DATA_SIZE) {
+ av_log(avctx, AV_LOG_ERROR, "buffer too big for decoder\n");
+ return AVERROR(EINVAL);
+ }
- ret = avctx->reget_buffer(avctx, &cc->frame);
+ ret = ff_reget_buffer(avctx, cc->frame);
if (ret) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
- if (!avctx->frame_number)
+ if (!avctx->frame_number) {
- memset(cc->frame.data[0], 0, cc->frame.linesize[0] * avctx->height);
- memset(cc->frame.data[1], 0, AVPALETTE_SIZE);
+ memset(cc->frame->data[0], 0, cc->frame->linesize[0] * avctx->height);
++ memset(cc->frame->data[1], 0, AVPALETTE_SIZE);
+ }
command = bytestream_get_byte(&buf);
inst = bytestream_get_byte(&buf);
typedef struct {
AVCodecContext *avctx;
-- AVFrame frame;
int bpp;
int format;
int padded_bits;
}
}
- static void cdxl_decode_rgb(CDXLVideoContext *c)
+ static void cdxl_decode_rgb(CDXLVideoContext *c, AVFrame *frame)
{
- uint32_t *new_palette = (uint32_t *)c->frame.data[1];
+ uint32_t *new_palette = (uint32_t *)frame->data[1];
- memset(c->frame.data[1], 0, AVPALETTE_SIZE);
++ memset(frame->data[1], 0, AVPALETTE_SIZE);
import_palette(c, new_palette);
- import_format(c, c->frame.linesize[0], c->frame.data[0]);
+ import_format(c, frame->linesize[0], frame->data[0]);
}
- static void cdxl_decode_ham6(CDXLVideoContext *c)
+ static void cdxl_decode_ham6(CDXLVideoContext *c, AVFrame *frame)
{
AVCodecContext *avctx = c->avctx;
uint32_t new_palette[16], r, g, b;
#include "libavutil/common.h"
#include "libavutil/intreadwrite.h"
#include "avcodec.h"
+ #include "internal.h"
-typedef struct {
- uint8_t y0, y1, y2, y3;
- uint8_t u, v;
-} cvid_codebook;
+typedef uint8_t cvid_codebook[12];
#define MAX_STRIPS 32
typedef struct CinepakContext {
AVCodecContext *avctx;
-- AVFrame frame;
++ AVFrame *frame;
const unsigned char *data;
int size;
for (y=strip->y1; y < strip->y2; y+=4) {
- iy[0] = strip->x1 + (y * s->frame.linesize[0]);
- iy[1] = iy[0] + s->frame.linesize[0];
- iy[2] = iy[1] + s->frame.linesize[0];
- iy[3] = iy[2] + s->frame.linesize[0];
- iu[0] = (strip->x1/2) + ((y/2) * s->frame.linesize[1]);
- iu[1] = iu[0] + s->frame.linesize[1];
- iv[0] = (strip->x1/2) + ((y/2) * s->frame.linesize[2]);
- iv[1] = iv[0] + s->frame.linesize[2];
+/* take care of y dimension not being multiple of 4, such streams exist */
- ip0 = ip1 = ip2 = ip3 = s->frame.data[0] +
- (s->palette_video?strip->x1:strip->x1*3) + (y * s->frame.linesize[0]);
++ ip0 = ip1 = ip2 = ip3 = s->frame->data[0] +
++ (s->palette_video?strip->x1:strip->x1*3) + (y * s->frame->linesize[0]);
+ if(s->avctx->height - y > 1) {
- ip1 = ip0 + s->frame.linesize[0];
++ ip1 = ip0 + s->frame->linesize[0];
+ if(s->avctx->height - y > 2) {
- ip2 = ip1 + s->frame.linesize[0];
++ ip2 = ip1 + s->frame->linesize[0];
+ if(s->avctx->height - y > 3) {
- ip3 = ip2 + s->frame.linesize[0];
++ ip3 = ip2 + s->frame->linesize[0];
+ }
+ }
+ }
+/* to get the correct picture for not-multiple-of-4 cases let us fill
+ * each block from the bottom up, thus possibly overwriting the top line
+ * more than once but ending with the correct data in place
+ * (instead of in-loop checking) */
for (x=strip->x1; x < strip->x2; x+=4) {
if ((chunk_id & 0x01) && !(mask >>= 1)) {
num_strips = FFMIN(num_strips, MAX_STRIPS);
- s->frame.key_frame = 0;
++ s->frame->key_frame = 0;
+
for (i=0; i < num_strips; i++) {
if ((s->data + 12) > eod)
return AVERROR_INVALIDDATA;
s->strips[i].id = s->data[0];
- s->strips[i].y1 = y0;
- s->strips[i].x1 = 0;
- s->strips[i].y2 = y0 + AV_RB16 (&s->data[8]);
- s->strips[i].x2 = s->avctx->width;
+/* zero y1 means "relative to the previous stripe" */
+ if (!(s->strips[i].y1 = AV_RB16 (&s->data[4])))
+ s->strips[i].y2 = (s->strips[i].y1 = y0) + AV_RB16 (&s->data[8]);
+ else
+ s->strips[i].y2 = AV_RB16 (&s->data[8]);
+ s->strips[i].x1 = AV_RB16 (&s->data[6]);
+ s->strips[i].x2 = AV_RB16 (&s->data[10]);
+
+ if (s->strips[i].id == 0x10)
- s->frame.key_frame = 1;
++ s->frame->key_frame = 1;
strip_size = AV_RB24 (&s->data[1]) - 12;
if (strip_size < 0)
avctx->pix_fmt = AV_PIX_FMT_PAL8;
}
- avcodec_get_frame_defaults(&s->frame);
-- s->frame.data[0] = NULL;
++ s->frame = av_frame_alloc();
++ if (!s->frame)
++ return AVERROR(ENOMEM);
return 0;
}
s->data = buf;
s->size = buf_size;
- s->frame.reference = 3;
- s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE |
- FF_BUFFER_HINTS_REUSABLE;
- if ((ret = avctx->reget_buffer(avctx, &s->frame))) {
- if ((ret = ff_reget_buffer(avctx, &s->frame))) {
++ if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
if (s->palette_video) {
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
if (pal) {
-- s->frame.palette_has_changed = 1;
++ s->frame->palette_has_changed = 1;
memcpy(s->pal, pal, AVPALETTE_SIZE);
}
}
- cinepak_decode(s);
+ if ((ret = cinepak_decode(s)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "cinepak_decode failed\n");
+ }
if (s->palette_video)
-- memcpy (s->frame.data[1], s->pal, AVPALETTE_SIZE);
++ memcpy (s->frame->data[1], s->pal, AVPALETTE_SIZE);
+
- if ((ret = av_frame_ref(data, &s->frame)) < 0)
++ if ((ret = av_frame_ref(data, s->frame)) < 0)
+ return ret;
*got_frame = 1;
- *(AVFrame*)data = s->frame;
/* report that the buffer was completely consumed */
return buf_size;
{
CinepakContext *s = avctx->priv_data;
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
- av_frame_unref(&s->frame);
++ av_frame_free(&s->frame);
return 0;
}
init_get_bits(&gb, buf, buf_size * 8);
for (y = 0; y < avctx->height; y++) {
- uint8_t *luma = &a->picture.data[0][y * a->picture.linesize[0]];
- uint8_t *cb = &a->picture.data[1][y * a->picture.linesize[1]];
- uint8_t *cr = &a->picture.data[2][y * a->picture.linesize[2]];
+ uint8_t *luma = &p->data[0][y * p->linesize[0]];
+ uint8_t *cb = &p->data[1][y * p->linesize[1]];
+ uint8_t *cr = &p->data[2][y * p->linesize[2]];
for (x = 0; x < avctx->width; x += 4) {
- luma[3] = get_bits(&gb, 5) << 3;
- luma[2] = get_bits(&gb, 5) << 3;
- luma[1] = get_bits(&gb, 5) << 3;
- luma[0] = get_bits(&gb, 5) << 3;
+ luma[3] = (get_bits(&gb, 5)*33) >> 2;
+ luma[2] = (get_bits(&gb, 5)*33) >> 2;
+ luma[1] = (get_bits(&gb, 5)*33) >> 2;
+ luma[0] = (get_bits(&gb, 5)*33) >> 2;
luma += 4;
*(cb++) = get_bits(&gb, 6) << 2;
*(cr++) = get_bits(&gb, 6) << 2;
#endif
#if CONFIG_CLJR_ENCODER
+ typedef struct CLJRContext {
++ AVClass *avclass;
+ AVFrame picture;
++ int dither_type;
+ } CLJRContext;
+
+ static av_cold int encode_init(AVCodecContext *avctx)
+ {
+ CLJRContext * const a = avctx->priv_data;
+
+ avctx->coded_frame = &a->picture;
+
+ return 0;
+ }
+
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *p, int *got_packet)
{
--- /dev/null
- AVFrame frame;
+/*
+ * CPiA video decoder.
+ * Copyright (c) 2010 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This decoder is based on the LGPL code available at
+ * https://v4l4j.googlecode.com/svn/v4l4j/trunk/libvideo/libv4lconvert/cpia1.c
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avcodec.h"
+#include "get_bits.h"
++#include "internal.h"
+
+
+#define FRAME_HEADER_SIZE 64
+#define MAGIC_0 0x19 /**< First header byte */
+#define MAGIC_1 0x68 /**< Second header byte */
+#define SUBSAMPLE_420 0
+#define SUBSAMPLE_422 1
+#define YUVORDER_YUYV 0
+#define YUVORDER_UYVY 1
+#define NOT_COMPRESSED 0
+#define COMPRESSED 1
+#define NO_DECIMATION 0
+#define DECIMATION_ENAB 1
+#define EOL 0xfd /**< End Of Line marker */
+#define EOI 0xff /**< End Of Image marker */
+
+
+typedef struct {
- AVFrame* const frame = &cpia->frame;
++ AVFrame *frame;
+} CpiaContext;
+
+
+static int cpia_decode_frame(AVCodecContext *avctx,
+ void *data, int *got_frame, AVPacket* avpkt)
+{
+ CpiaContext* const cpia = avctx->priv_data;
+ int i,j,ret;
+
+ uint8_t* const header = avpkt->data;
+ uint8_t* src;
+ int src_size;
+ uint16_t linelength;
+ uint8_t skip;
+
- if ((ret = avctx->reget_buffer(avctx, frame)) < 0) {
++ AVFrame *frame = cpia->frame;
+ uint8_t *y, *u, *v, *y_end, *u_end, *v_end;
+
+ // Check header
+ if ( avpkt->size < FRAME_HEADER_SIZE
+ || header[0] != MAGIC_0 || header[1] != MAGIC_1
+ || (header[17] != SUBSAMPLE_420 && header[17] != SUBSAMPLE_422)
+ || (header[18] != YUVORDER_YUYV && header[18] != YUVORDER_UYVY)
+ || (header[28] != NOT_COMPRESSED && header[28] != COMPRESSED)
+ || (header[29] != NO_DECIMATION && header[29] != DECIMATION_ENAB)
+ ) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid header!\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ // currently unsupported properties
+ if (header[17] == SUBSAMPLE_422) {
+ av_log(avctx, AV_LOG_ERROR, "Unsupported subsample!\n");
+ return AVERROR_PATCHWELCOME;
+ }
+ if (header[18] == YUVORDER_UYVY) {
+ av_log(avctx, AV_LOG_ERROR, "Unsupported YUV byte order!\n");
+ return AVERROR_PATCHWELCOME;
+ }
+ if (header[29] == DECIMATION_ENAB) {
+ av_log(avctx, AV_LOG_ERROR, "Decimation unsupported!\n");
+ return AVERROR_PATCHWELCOME;
+ }
+
+ src = header + FRAME_HEADER_SIZE;
+ src_size = avpkt->size - FRAME_HEADER_SIZE;
+
+ if (header[28] == NOT_COMPRESSED) {
+ frame->pict_type = AV_PICTURE_TYPE_I;
+ frame->key_frame = 1;
+ } else {
+ frame->pict_type = AV_PICTURE_TYPE_P;
+ frame->key_frame = 0;
+ }
+
+ // Get buffer filled with previous frame
- *(AVFrame*) data = *frame;
++ if ((ret = ff_reget_buffer(avctx, frame)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed!\n");
+ return ret;
+ }
+
+
+ for ( i = 0;
+ i < frame->height;
+ i++, src += linelength, src_size -= linelength
+ ) {
+ // Read line length, two byte little endian
+ linelength = AV_RL16(src);
+ src += 2;
+
+ if (src_size < linelength) {
+ av_frame_set_decode_error_flags(frame, FF_DECODE_ERROR_INVALID_BITSTREAM);
+ av_log(avctx, AV_LOG_WARNING, "Frame ended enexpectedly!\n");
+ break;
+ }
+ if (src[linelength - 1] != EOL) {
+ av_frame_set_decode_error_flags(frame, FF_DECODE_ERROR_INVALID_BITSTREAM);
+ av_log(avctx, AV_LOG_WARNING, "Wrong line length %d or line not terminated properly (found 0x%02x)!\n", linelength, src[linelength - 1]);
+ break;
+ }
+
+ /* Update the data pointers. Y data is on every line.
+ * U and V data on every second line
+ */
+ y = &frame->data[0][i * frame->linesize[0]];
+ u = &frame->data[1][(i >> 1) * frame->linesize[1]];
+ v = &frame->data[2][(i >> 1) * frame->linesize[2]];
+ y_end = y + frame->linesize[0] - 1;
+ u_end = u + frame->linesize[1] - 1;
+ v_end = v + frame->linesize[2] - 1;
+
+ if ((i & 1) && header[17] == SUBSAMPLE_420) {
+ /* We are on a odd line and 420 subsample is used.
+ * On this line only Y values are specified, one per pixel.
+ */
+ for (j = 0; j < linelength - 1; j++) {
+ if (y > y_end) {
+ av_frame_set_decode_error_flags(frame, FF_DECODE_ERROR_INVALID_BITSTREAM);
+ av_log(avctx, AV_LOG_WARNING, "Decoded data exceeded linesize!\n");
+ break;
+ }
+ if ((src[j] & 1) && header[28] == COMPRESSED) {
+ /* It seems that odd lines are always uncompressed, but
+ * we do it according to specification anyways.
+ */
+ skip = src[j] >> 1;
+ y += skip;
+ } else {
+ *(y++) = src[j];
+ }
+ }
+ } else if (header[17] == SUBSAMPLE_420) {
+ /* We are on an even line and 420 subsample is used.
+ * On this line each pair of pixels is described by four bytes.
+ */
+ for (j = 0; j < linelength - 4; ) {
+ if (y + 1 > y_end || u > u_end || v > v_end) {
+ av_frame_set_decode_error_flags(frame, FF_DECODE_ERROR_INVALID_BITSTREAM);
+ av_log(avctx, AV_LOG_WARNING, "Decoded data exceeded linesize!\n");
+ break;
+ }
+ if ((src[j] & 1) && header[28] == COMPRESSED) {
+ // Skip amount of pixels and move forward one byte
+ skip = src[j] >> 1;
+ y += skip;
+ u += skip >> 1;
+ v += skip >> 1;
+ j++;
+ } else {
+ // Set image data as specified and move forward 4 bytes
+ *(y++) = src[j];
+ *(u++) = src[j+1];
+ *(y++) = src[j+2];
+ *(v++) = src[j+3];
+ j += 4;
+ }
+ }
+ }
+ }
+
+ *got_frame = 1;
++ if ((ret = av_frame_ref(data, cpia->frame)) < 0)
++ return ret;
+
+ return avpkt->size;
+}
+
+static av_cold int cpia_decode_init(AVCodecContext *avctx)
+{
++ CpiaContext *s = avctx->priv_data;
++
+ // output pixel format
+ avctx->pix_fmt = AV_PIX_FMT_YUV420P;
+
+ /* The default timebase set by the v4l2 demuxer leads to probing which is buggy.
+ * Set some reasonable time_base to skip this.
+ */
+ if (avctx->time_base.num == 1 && avctx->time_base.den == 1000000) {
+ avctx->time_base.num = 1;
+ avctx->time_base.den = 60;
+ }
+
++ s->frame = av_frame_alloc();
++ if (!s->frame)
++ return AVERROR(ENOMEM);
++
+ return 0;
+}
+
++static av_cold int cpia_decode_end(AVCodecContext *avctx)
++{
++ CpiaContext *s = avctx->priv_data;
++
++ av_frame_free(&s->frame);
++
++ return 0;
++}
+
+AVCodec ff_cpia_decoder = {
+ .name = "cpia",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_CPIA,
+ .priv_data_size = sizeof(CpiaContext),
+ .init = cpia_decode_init,
++ .close = cpia_decode_end,
+ .decode = cpia_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
+ .long_name = NULL_IF_CONFIG_SMALL("CPiA video format"),
+};
--- /dev/null
- if (ff_get_buffer(avctx, &priv->pic) < 0) {
+/*
+ * - CrystalHD decoder module -
+ *
+ * Copyright(C) 2010,2011 Philip Langdale <ffmpeg.philipl@overt.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * - Principles of Operation -
+ *
+ * The CrystalHD decoder operates at the bitstream level - which is an even
+ * higher level than the decoding hardware you typically see in modern GPUs.
+ * This means it has a very simple interface, in principle. You feed demuxed
+ * packets in one end and get decoded picture (fields/frames) out the other.
+ *
+ * Of course, nothing is ever that simple. Due, at the very least, to b-frame
+ * dependencies in the supported formats, the hardware has a delay between
+ * when a packet goes in, and when a picture comes out. Furthermore, this delay
+ * is not just a function of time, but also one of the dependency on additional
+ * frames being fed into the decoder to satisfy the b-frame dependencies.
+ *
+ * As such, a pipeline will build up that is roughly equivalent to the required
+ * DPB for the file being played. If that was all it took, things would still
+ * be simple - so, of course, it isn't.
+ *
+ * The hardware has a way of indicating that a picture is ready to be copied out,
+ * but this is unreliable - and sometimes the attempt will still fail so, based
+ * on testing, the code will wait until 3 pictures are ready before starting
+ * to copy out - and this has the effect of extending the pipeline.
+ *
+ * Finally, while it is tempting to say that once the decoder starts outputting
+ * frames, the software should never fail to return a frame from a decode(),
+ * this is a hard assertion to make, because the stream may switch between
+ * differently encoded content (number of b-frames, interlacing, etc) which
+ * might require a longer pipeline than before. If that happened, you could
+ * deadlock trying to retrieve a frame that can't be decoded without feeding
+ * in additional packets.
+ *
+ * As such, the code will return in the event that a picture cannot be copied
+ * out, leading to an increase in the length of the pipeline. This in turn,
+ * means we have to be sensitive to the time it takes to decode a picture;
+ * We do not want to give up just because the hardware needed a little more
+ * time to prepare the picture! For this reason, there are delays included
+ * in the decode() path that ensure that, under normal conditions, the hardware
+ * will only fail to return a frame if it really needs additional packets to
+ * complete the decoding.
+ *
+ * Finally, to be explicit, we do not want the pipeline to grow without bound
+ * for two reasons: 1) The hardware can only buffer a finite number of packets,
+ * and 2) The client application may not be able to cope with arbitrarily long
+ * delays in the video path relative to the audio path. For example. MPlayer
+ * can only handle a 20 picture delay (although this is arbitrary, and needs
+ * to be extended to fully support the CrystalHD where the delay could be up
+ * to 32 pictures - consider PAFF H.264 content with 16 b-frames).
+ */
+
+/*****************************************************************************
+ * Includes
+ ****************************************************************************/
+
+#define _XOPEN_SOURCE 600
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <libcrystalhd/bc_dts_types.h>
+#include <libcrystalhd/bc_dts_defs.h>
+#include <libcrystalhd/libcrystalhd_if.h>
+
+#include "avcodec.h"
+#include "h264.h"
+#include "internal.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/opt.h"
+
+/** Timeout parameter passed to DtsProcOutput() in us */
+#define OUTPUT_PROC_TIMEOUT 50
+/** Step between fake timestamps passed to hardware in units of 100ns */
+#define TIMESTAMP_UNIT 100000
+/** Initial value in us of the wait in decode() */
+#define BASE_WAIT 10000
+/** Increment in us to adjust wait in decode() */
+#define WAIT_UNIT 1000
+
+
+/*****************************************************************************
+ * Module private data
+ ****************************************************************************/
+
+typedef enum {
+ RET_ERROR = -1,
+ RET_OK = 0,
+ RET_COPY_AGAIN = 1,
+ RET_SKIP_NEXT_COPY = 2,
+ RET_COPY_NEXT_FIELD = 3,
+} CopyRet;
+
+typedef struct OpaqueList {
+ struct OpaqueList *next;
+ uint64_t fake_timestamp;
+ uint64_t reordered_opaque;
+ uint8_t pic_type;
+} OpaqueList;
+
+typedef struct {
+ AVClass *av_class;
+ AVCodecContext *avctx;
+ AVFrame pic;
+ HANDLE dev;
+
+ uint8_t *orig_extradata;
+ uint32_t orig_extradata_size;
+
+ AVBitStreamFilterContext *bsfc;
+ AVCodecParserContext *parser;
+
+ uint8_t is_70012;
+ uint8_t *sps_pps_buf;
+ uint32_t sps_pps_size;
+ uint8_t is_nal;
+ uint8_t output_ready;
+ uint8_t need_second_field;
+ uint8_t skip_next_output;
+ uint64_t decode_wait;
+
+ uint64_t last_picture;
+
+ OpaqueList *head;
+ OpaqueList *tail;
+
+ /* Options */
+ uint32_t sWidth;
+ uint8_t bframe_bug;
+} CHDContext;
+
+static const AVOption options[] = {
+ { "crystalhd_downscale_width",
+ "Turn on downscaling to the specified width",
+ offsetof(CHDContext, sWidth),
+ AV_OPT_TYPE_INT, {.i64 = 0}, 0, UINT32_MAX,
+ AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM, },
+ { NULL, },
+};
+
+
+/*****************************************************************************
+ * Helper functions
+ ****************************************************************************/
+
+static inline BC_MEDIA_SUBTYPE id2subtype(CHDContext *priv, enum AVCodecID id)
+{
+ switch (id) {
+ case AV_CODEC_ID_MPEG4:
+ return BC_MSUBTYPE_DIVX;
+ case AV_CODEC_ID_MSMPEG4V3:
+ return BC_MSUBTYPE_DIVX311;
+ case AV_CODEC_ID_MPEG2VIDEO:
+ return BC_MSUBTYPE_MPEG2VIDEO;
+ case AV_CODEC_ID_VC1:
+ return BC_MSUBTYPE_VC1;
+ case AV_CODEC_ID_WMV3:
+ return BC_MSUBTYPE_WMV3;
+ case AV_CODEC_ID_H264:
+ return priv->is_nal ? BC_MSUBTYPE_AVC1 : BC_MSUBTYPE_H264;
+ default:
+ return BC_MSUBTYPE_INVALID;
+ }
+}
+
+static inline void print_frame_info(CHDContext *priv, BC_DTS_PROC_OUT *output)
+{
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tYBuffSz: %u\n", output->YbuffSz);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tYBuffDoneSz: %u\n",
+ output->YBuffDoneSz);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tUVBuffDoneSz: %u\n",
+ output->UVBuffDoneSz);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tTimestamp: %"PRIu64"\n",
+ output->PicInfo.timeStamp);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tPicture Number: %u\n",
+ output->PicInfo.picture_number);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tWidth: %u\n",
+ output->PicInfo.width);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tHeight: %u\n",
+ output->PicInfo.height);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tChroma: 0x%03x\n",
+ output->PicInfo.chroma_format);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tPulldown: %u\n",
+ output->PicInfo.pulldown);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tFlags: 0x%08x\n",
+ output->PicInfo.flags);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tFrame Rate/Res: %u\n",
+ output->PicInfo.frame_rate);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tAspect Ratio: %u\n",
+ output->PicInfo.aspect_ratio);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tColor Primaries: %u\n",
+ output->PicInfo.colour_primaries);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tMetaData: %u\n",
+ output->PicInfo.picture_meta_payload);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tSession Number: %u\n",
+ output->PicInfo.sess_num);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tycom: %u\n",
+ output->PicInfo.ycom);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tCustom Aspect: %u\n",
+ output->PicInfo.custom_aspect_ratio_width_height);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tFrames to Drop: %u\n",
+ output->PicInfo.n_drop);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tH264 Valid Fields: 0x%08x\n",
+ output->PicInfo.other.h264.valid);
+}
+
+
+/*****************************************************************************
+ * OpaqueList functions
+ ****************************************************************************/
+
+static uint64_t opaque_list_push(CHDContext *priv, uint64_t reordered_opaque,
+ uint8_t pic_type)
+{
+ OpaqueList *newNode = av_mallocz(sizeof (OpaqueList));
+ if (!newNode) {
+ av_log(priv->avctx, AV_LOG_ERROR,
+ "Unable to allocate new node in OpaqueList.\n");
+ return 0;
+ }
+ if (!priv->head) {
+ newNode->fake_timestamp = TIMESTAMP_UNIT;
+ priv->head = newNode;
+ } else {
+ newNode->fake_timestamp = priv->tail->fake_timestamp + TIMESTAMP_UNIT;
+ priv->tail->next = newNode;
+ }
+ priv->tail = newNode;
+ newNode->reordered_opaque = reordered_opaque;
+ newNode->pic_type = pic_type;
+
+ return newNode->fake_timestamp;
+}
+
+/*
+ * The OpaqueList is built in decode order, while elements will be removed
+ * in presentation order. If frames are reordered, this means we must be
+ * able to remove elements that are not the first element.
+ *
+ * Returned node must be freed by caller.
+ */
+static OpaqueList *opaque_list_pop(CHDContext *priv, uint64_t fake_timestamp)
+{
+ OpaqueList *node = priv->head;
+
+ if (!priv->head) {
+ av_log(priv->avctx, AV_LOG_ERROR,
+ "CrystalHD: Attempted to query non-existent timestamps.\n");
+ return NULL;
+ }
+
+ /*
+ * The first element is special-cased because we have to manipulate
+ * the head pointer rather than the previous element in the list.
+ */
+ if (priv->head->fake_timestamp == fake_timestamp) {
+ priv->head = node->next;
+
+ if (!priv->head->next)
+ priv->tail = priv->head;
+
+ node->next = NULL;
+ return node;
+ }
+
+ /*
+ * The list is processed at arm's length so that we have the
+ * previous element available to rewrite its next pointer.
+ */
+ while (node->next) {
+ OpaqueList *current = node->next;
+ if (current->fake_timestamp == fake_timestamp) {
+ node->next = current->next;
+
+ if (!node->next)
+ priv->tail = node;
+
+ current->next = NULL;
+ return current;
+ } else {
+ node = current;
+ }
+ }
+
+ av_log(priv->avctx, AV_LOG_VERBOSE,
+ "CrystalHD: Couldn't match fake_timestamp.\n");
+ return NULL;
+}
+
+
+/*****************************************************************************
+ * Video decoder API function definitions
+ ****************************************************************************/
+
+static void flush(AVCodecContext *avctx)
+{
+ CHDContext *priv = avctx->priv_data;
+
+ avctx->has_b_frames = 0;
+ priv->last_picture = -1;
+ priv->output_ready = 0;
+ priv->need_second_field = 0;
+ priv->skip_next_output = 0;
+ priv->decode_wait = BASE_WAIT;
+
+ if (priv->pic.data[0])
+ avctx->release_buffer(avctx, &priv->pic);
+
+ /* Flush mode 4 flushes all software and hardware buffers. */
+ DtsFlushInput(priv->dev, 4);
+}
+
+
+static av_cold int uninit(AVCodecContext *avctx)
+{
+ CHDContext *priv = avctx->priv_data;
+ HANDLE device;
+
+ device = priv->dev;
+ DtsStopDecoder(device);
+ DtsCloseDecoder(device);
+ DtsDeviceClose(device);
+
+ /*
+ * Restore original extradata, so that if the decoder is
+ * reinitialised, the bitstream detection and filtering
+ * will work as expected.
+ */
+ if (priv->orig_extradata) {
+ av_free(avctx->extradata);
+ avctx->extradata = priv->orig_extradata;
+ avctx->extradata_size = priv->orig_extradata_size;
+ priv->orig_extradata = NULL;
+ priv->orig_extradata_size = 0;
+ }
+
+ av_parser_close(priv->parser);
+ if (priv->bsfc) {
+ av_bitstream_filter_close(priv->bsfc);
+ }
+
+ av_free(priv->sps_pps_buf);
+
+ if (priv->pic.data[0])
+ avctx->release_buffer(avctx, &priv->pic);
+
+ if (priv->head) {
+ OpaqueList *node = priv->head;
+ while (node) {
+ OpaqueList *next = node->next;
+ av_free(node);
+ node = next;
+ }
+ }
+
+ return 0;
+}
+
+
+static av_cold int init(AVCodecContext *avctx)
+{
+ CHDContext* priv;
+ BC_STATUS ret;
+ BC_INFO_CRYSTAL version;
+ BC_INPUT_FORMAT format = {
+ .FGTEnable = FALSE,
+ .Progressive = TRUE,
+ .OptFlags = 0x80000000 | vdecFrameRate59_94 | 0x40,
+ .width = avctx->width,
+ .height = avctx->height,
+ };
+
+ BC_MEDIA_SUBTYPE subtype;
+
+ uint32_t mode = DTS_PLAYBACK_MODE |
+ DTS_LOAD_FILE_PLAY_FW |
+ DTS_SKIP_TX_CHK_CPB |
+ DTS_PLAYBACK_DROP_RPT_MODE |
+ DTS_SINGLE_THREADED_MODE |
+ DTS_DFLT_RESOLUTION(vdecRESOLUTION_1080p23_976);
+
+ av_log(avctx, AV_LOG_VERBOSE, "CrystalHD Init for %s\n",
+ avctx->codec->name);
+
+ avctx->pix_fmt = AV_PIX_FMT_YUYV422;
+
+ /* Initialize the library */
+ priv = avctx->priv_data;
+ priv->avctx = avctx;
+ priv->is_nal = avctx->extradata_size > 0 && *(avctx->extradata) == 1;
+ priv->last_picture = -1;
+ priv->decode_wait = BASE_WAIT;
+
+ subtype = id2subtype(priv, avctx->codec->id);
+ switch (subtype) {
+ case BC_MSUBTYPE_AVC1:
+ {
+ uint8_t *dummy_p;
+ int dummy_int;
+
+ /* Back up the extradata so it can be restored at close time. */
+ priv->orig_extradata = av_malloc(avctx->extradata_size);
+ if (!priv->orig_extradata) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Failed to allocate copy of extradata\n");
+ return AVERROR(ENOMEM);
+ }
+ priv->orig_extradata_size = avctx->extradata_size;
+ memcpy(priv->orig_extradata, avctx->extradata, avctx->extradata_size);
+
+ priv->bsfc = av_bitstream_filter_init("h264_mp4toannexb");
+ if (!priv->bsfc) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Cannot open the h264_mp4toannexb BSF!\n");
+ return AVERROR_BSF_NOT_FOUND;
+ }
+ av_bitstream_filter_filter(priv->bsfc, avctx, NULL, &dummy_p,
+ &dummy_int, NULL, 0, 0);
+ }
+ subtype = BC_MSUBTYPE_H264;
+ // Fall-through
+ case BC_MSUBTYPE_H264:
+ format.startCodeSz = 4;
+ // Fall-through
+ case BC_MSUBTYPE_VC1:
+ case BC_MSUBTYPE_WVC1:
+ case BC_MSUBTYPE_WMV3:
+ case BC_MSUBTYPE_WMVA:
+ case BC_MSUBTYPE_MPEG2VIDEO:
+ case BC_MSUBTYPE_DIVX:
+ case BC_MSUBTYPE_DIVX311:
+ format.pMetaData = avctx->extradata;
+ format.metaDataSz = avctx->extradata_size;
+ break;
+ default:
+ av_log(avctx, AV_LOG_ERROR, "CrystalHD: Unknown codec name\n");
+ return AVERROR(EINVAL);
+ }
+ format.mSubtype = subtype;
+
+ if (priv->sWidth) {
+ format.bEnableScaling = 1;
+ format.ScalingParams.sWidth = priv->sWidth;
+ }
+
+ /* Get a decoder instance */
+ av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: starting up\n");
+ // Initialize the Link and Decoder devices
+ ret = DtsDeviceOpen(&priv->dev, mode);
+ if (ret != BC_STS_SUCCESS) {
+ av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: DtsDeviceOpen failed\n");
+ goto fail;
+ }
+
+ ret = DtsCrystalHDVersion(priv->dev, &version);
+ if (ret != BC_STS_SUCCESS) {
+ av_log(avctx, AV_LOG_VERBOSE,
+ "CrystalHD: DtsCrystalHDVersion failed\n");
+ goto fail;
+ }
+ priv->is_70012 = version.device == 0;
+
+ if (priv->is_70012 &&
+ (subtype == BC_MSUBTYPE_DIVX || subtype == BC_MSUBTYPE_DIVX311)) {
+ av_log(avctx, AV_LOG_VERBOSE,
+ "CrystalHD: BCM70012 doesn't support MPEG4-ASP/DivX/Xvid\n");
+ goto fail;
+ }
+
+ ret = DtsSetInputFormat(priv->dev, &format);
+ if (ret != BC_STS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "CrystalHD: SetInputFormat failed\n");
+ goto fail;
+ }
+
+ ret = DtsOpenDecoder(priv->dev, BC_STREAM_TYPE_ES);
+ if (ret != BC_STS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsOpenDecoder failed\n");
+ goto fail;
+ }
+
+ ret = DtsSetColorSpace(priv->dev, OUTPUT_MODE422_YUY2);
+ if (ret != BC_STS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsSetColorSpace failed\n");
+ goto fail;
+ }
+ ret = DtsStartDecoder(priv->dev);
+ if (ret != BC_STS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsStartDecoder failed\n");
+ goto fail;
+ }
+ ret = DtsStartCapture(priv->dev);
+ if (ret != BC_STS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsStartCapture failed\n");
+ goto fail;
+ }
+
+ if (avctx->codec->id == AV_CODEC_ID_H264) {
+ priv->parser = av_parser_init(avctx->codec->id);
+ if (!priv->parser)
+ av_log(avctx, AV_LOG_WARNING,
+ "Cannot open the h.264 parser! Interlaced h.264 content "
+ "will not be detected reliably.\n");
+ priv->parser->flags = PARSER_FLAG_COMPLETE_FRAMES;
+ }
+ av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Init complete.\n");
+
+ return 0;
+
+ fail:
+ uninit(avctx);
+ return -1;
+}
+
+
+static inline CopyRet copy_frame(AVCodecContext *avctx,
+ BC_DTS_PROC_OUT *output,
+ void *data, int *got_frame)
+{
+ BC_STATUS ret;
+ BC_DTS_STATUS decoder_status = { 0, };
+ uint8_t trust_interlaced;
+ uint8_t interlaced;
+
+ CHDContext *priv = avctx->priv_data;
+ int64_t pkt_pts = AV_NOPTS_VALUE;
+ uint8_t pic_type = 0;
+
+ uint8_t bottom_field = (output->PicInfo.flags & VDEC_FLAG_BOTTOMFIELD) ==
+ VDEC_FLAG_BOTTOMFIELD;
+ uint8_t bottom_first = !!(output->PicInfo.flags & VDEC_FLAG_BOTTOM_FIRST);
+
+ int width = output->PicInfo.width;
+ int height = output->PicInfo.height;
+ int bwidth;
+ uint8_t *src = output->Ybuff;
+ int sStride;
+ uint8_t *dst;
+ int dStride;
+
+ if (output->PicInfo.timeStamp != 0) {
+ OpaqueList *node = opaque_list_pop(priv, output->PicInfo.timeStamp);
+ if (node) {
+ pkt_pts = node->reordered_opaque;
+ pic_type = node->pic_type;
+ av_free(node);
+ } else {
+ /*
+ * We will encounter a situation where a timestamp cannot be
+ * popped if a second field is being returned. In this case,
+ * each field has the same timestamp and the first one will
+ * cause it to be popped. To keep subsequent calculations
+ * simple, pic_type should be set a FIELD value - doesn't
+ * matter which, but I chose BOTTOM.
+ */
+ pic_type = PICT_BOTTOM_FIELD;
+ }
+ av_log(avctx, AV_LOG_VERBOSE, "output \"pts\": %"PRIu64"\n",
+ output->PicInfo.timeStamp);
+ av_log(avctx, AV_LOG_VERBOSE, "output picture type %d\n",
+ pic_type);
+ }
+
+ ret = DtsGetDriverStatus(priv->dev, &decoder_status);
+ if (ret != BC_STS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR,
+ "CrystalHD: GetDriverStatus failed: %u\n", ret);
+ return RET_ERROR;
+ }
+
+ /*
+ * For most content, we can trust the interlaced flag returned
+ * by the hardware, but sometimes we can't. These are the
+ * conditions under which we can trust the flag:
+ *
+ * 1) It's not h.264 content
+ * 2) The UNKNOWN_SRC flag is not set
+ * 3) We know we're expecting a second field
+ * 4) The hardware reports this picture and the next picture
+ * have the same picture number.
+ *
+ * Note that there can still be interlaced content that will
+ * fail this check, if the hardware hasn't decoded the next
+ * picture or if there is a corruption in the stream. (In either
+ * case a 0 will be returned for the next picture number)
+ */
+ trust_interlaced = avctx->codec->id != AV_CODEC_ID_H264 ||
+ !(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) ||
+ priv->need_second_field ||
+ (decoder_status.picNumFlags & ~0x40000000) ==
+ output->PicInfo.picture_number;
+
+ /*
+ * If we got a false negative for trust_interlaced on the first field,
+ * we will realise our mistake here when we see that the picture number is that
+ * of the previous picture. We cannot recover the frame and should discard the
+ * second field to keep the correct number of output frames.
+ */
+ if (output->PicInfo.picture_number == priv->last_picture && !priv->need_second_field) {
+ av_log(avctx, AV_LOG_WARNING,
+ "Incorrectly guessed progressive frame. Discarding second field\n");
+ /* Returning without providing a picture. */
+ return RET_OK;
+ }
+
+ interlaced = (output->PicInfo.flags & VDEC_FLAG_INTERLACED_SRC) &&
+ trust_interlaced;
+
+ if (!trust_interlaced && (decoder_status.picNumFlags & ~0x40000000) == 0) {
+ av_log(avctx, AV_LOG_VERBOSE,
+ "Next picture number unknown. Assuming progressive frame.\n");
+ }
+
+ av_log(avctx, AV_LOG_VERBOSE, "Interlaced state: %d | trust_interlaced %d\n",
+ interlaced, trust_interlaced);
+
+ if (priv->pic.data[0] && !priv->need_second_field)
+ avctx->release_buffer(avctx, &priv->pic);
+
+ priv->need_second_field = interlaced && !priv->need_second_field;
+
+ priv->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE |
+ FF_BUFFER_HINTS_REUSABLE;
+ if (!priv->pic.data[0]) {
++ if (ff_get_buffer(avctx, &priv->pic, 0) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return RET_ERROR;
+ }
+ }
+
+ bwidth = av_image_get_linesize(avctx->pix_fmt, width, 0);
+ if (priv->is_70012) {
+ int pStride;
+
+ if (width <= 720)
+ pStride = 720;
+ else if (width <= 1280)
+ pStride = 1280;
+ else pStride = 1920;
+ sStride = av_image_get_linesize(avctx->pix_fmt, pStride, 0);
+ } else {
+ sStride = bwidth;
+ }
+
+ dStride = priv->pic.linesize[0];
+ dst = priv->pic.data[0];
+
+ av_log(priv->avctx, AV_LOG_VERBOSE, "CrystalHD: Copying out frame\n");
+
+ if (interlaced) {
+ int dY = 0;
+ int sY = 0;
+
+ height /= 2;
+ if (bottom_field) {
+ av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: bottom field\n");
+ dY = 1;
+ } else {
+ av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: top field\n");
+ dY = 0;
+ }
+
+ for (sY = 0; sY < height; dY++, sY++) {
+ memcpy(&(dst[dY * dStride]), &(src[sY * sStride]), bwidth);
+ dY++;
+ }
+ } else {
+ av_image_copy_plane(dst, dStride, src, sStride, bwidth, height);
+ }
+
+ priv->pic.interlaced_frame = interlaced;
+ if (interlaced)
+ priv->pic.top_field_first = !bottom_first;
+
+ priv->pic.pkt_pts = pkt_pts;
+
+ if (!priv->need_second_field) {
+ *got_frame = 1;
+ *(AVFrame *)data = priv->pic;
+ }
+
+ /*
+ * Two types of PAFF content have been observed. One form causes the
+ * hardware to return a field pair and the other individual fields,
+ * even though the input is always individual fields. We must skip
+ * copying on the next decode() call to maintain pipeline length in
+ * the first case.
+ */
+ if (!interlaced && (output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) &&
+ (pic_type == PICT_TOP_FIELD || pic_type == PICT_BOTTOM_FIELD)) {
+ av_log(priv->avctx, AV_LOG_VERBOSE, "Fieldpair from two packets.\n");
+ return RET_SKIP_NEXT_COPY;
+ }
+
+ /*
+ * The logic here is purely based on empirical testing with samples.
+ * If we need a second field, it could come from a second input packet,
+ * or it could come from the same field-pair input packet at the current
+ * field. In the first case, we should return and wait for the next time
+ * round to get the second field, while in the second case, we should
+ * ask the decoder for it immediately.
+ *
+ * Testing has shown that we are dealing with the fieldpair -> two fields
+ * case if the VDEC_FLAG_UNKNOWN_SRC is not set or if the input picture
+ * type was PICT_FRAME (in this second case, the flag might still be set)
+ */
+ return priv->need_second_field &&
+ (!(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) ||
+ pic_type == PICT_FRAME) ?
+ RET_COPY_NEXT_FIELD : RET_OK;
+}
+
+
+static inline CopyRet receive_frame(AVCodecContext *avctx,
+ void *data, int *got_frame)
+{
+ BC_STATUS ret;
+ BC_DTS_PROC_OUT output = {
+ .PicInfo.width = avctx->width,
+ .PicInfo.height = avctx->height,
+ };
+ CHDContext *priv = avctx->priv_data;
+ HANDLE dev = priv->dev;
+
+ *got_frame = 0;
+
+ // Request decoded data from the driver
+ ret = DtsProcOutputNoCopy(dev, OUTPUT_PROC_TIMEOUT, &output);
+ if (ret == BC_STS_FMT_CHANGE) {
+ av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Initial format change\n");
+ avctx->width = output.PicInfo.width;
+ avctx->height = output.PicInfo.height;
+ switch ( output.PicInfo.aspect_ratio ) {
+ case vdecAspectRatioSquare:
+ avctx->sample_aspect_ratio = (AVRational) { 1, 1};
+ break;
+ case vdecAspectRatio12_11:
+ avctx->sample_aspect_ratio = (AVRational) { 12, 11};
+ break;
+ case vdecAspectRatio10_11:
+ avctx->sample_aspect_ratio = (AVRational) { 10, 11};
+ break;
+ case vdecAspectRatio16_11:
+ avctx->sample_aspect_ratio = (AVRational) { 16, 11};
+ break;
+ case vdecAspectRatio40_33:
+ avctx->sample_aspect_ratio = (AVRational) { 40, 33};
+ break;
+ case vdecAspectRatio24_11:
+ avctx->sample_aspect_ratio = (AVRational) { 24, 11};
+ break;
+ case vdecAspectRatio20_11:
+ avctx->sample_aspect_ratio = (AVRational) { 20, 11};
+ break;
+ case vdecAspectRatio32_11:
+ avctx->sample_aspect_ratio = (AVRational) { 32, 11};
+ break;
+ case vdecAspectRatio80_33:
+ avctx->sample_aspect_ratio = (AVRational) { 80, 33};
+ break;
+ case vdecAspectRatio18_11:
+ avctx->sample_aspect_ratio = (AVRational) { 18, 11};
+ break;
+ case vdecAspectRatio15_11:
+ avctx->sample_aspect_ratio = (AVRational) { 15, 11};
+ break;
+ case vdecAspectRatio64_33:
+ avctx->sample_aspect_ratio = (AVRational) { 64, 33};
+ break;
+ case vdecAspectRatio160_99:
+ avctx->sample_aspect_ratio = (AVRational) {160, 99};
+ break;
+ case vdecAspectRatio4_3:
+ avctx->sample_aspect_ratio = (AVRational) { 4, 3};
+ break;
+ case vdecAspectRatio16_9:
+ avctx->sample_aspect_ratio = (AVRational) { 16, 9};
+ break;
+ case vdecAspectRatio221_1:
+ avctx->sample_aspect_ratio = (AVRational) {221, 1};
+ break;
+ }
+ return RET_COPY_AGAIN;
+ } else if (ret == BC_STS_SUCCESS) {
+ int copy_ret = -1;
+ if (output.PoutFlags & BC_POUT_FLAGS_PIB_VALID) {
+ if (priv->last_picture == -1) {
+ /*
+ * Init to one less, so that the incrementing code doesn't
+ * need to be special-cased.
+ */
+ priv->last_picture = output.PicInfo.picture_number - 1;
+ }
+
+ if (avctx->codec->id == AV_CODEC_ID_MPEG4 &&
+ output.PicInfo.timeStamp == 0 && priv->bframe_bug) {
+ av_log(avctx, AV_LOG_VERBOSE,
+ "CrystalHD: Not returning packed frame twice.\n");
+ priv->last_picture++;
+ DtsReleaseOutputBuffs(dev, NULL, FALSE);
+ return RET_COPY_AGAIN;
+ }
+
+ print_frame_info(priv, &output);
+
+ if (priv->last_picture + 1 < output.PicInfo.picture_number) {
+ av_log(avctx, AV_LOG_WARNING,
+ "CrystalHD: Picture Number discontinuity\n");
+ /*
+ * Have we lost frames? If so, we need to shrink the
+ * pipeline length appropriately.
+ *
+ * XXX: I have no idea what the semantics of this situation
+ * are so I don't even know if we've lost frames or which
+ * ones.
+ *
+ * In any case, only warn the first time.
+ */
+ priv->last_picture = output.PicInfo.picture_number - 1;
+ }
+
+ copy_ret = copy_frame(avctx, &output, data, got_frame);
+ if (*got_frame > 0) {
+ avctx->has_b_frames--;
+ priv->last_picture++;
+ av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Pipeline length: %u\n",
+ avctx->has_b_frames);
+ }
+ } else {
+ /*
+ * An invalid frame has been consumed.
+ */
+ av_log(avctx, AV_LOG_ERROR, "CrystalHD: ProcOutput succeeded with "
+ "invalid PIB\n");
+ avctx->has_b_frames--;
+ copy_ret = RET_OK;
+ }
+ DtsReleaseOutputBuffs(dev, NULL, FALSE);
+
+ return copy_ret;
+ } else if (ret == BC_STS_BUSY) {
+ return RET_COPY_AGAIN;
+ } else {
+ av_log(avctx, AV_LOG_ERROR, "CrystalHD: ProcOutput failed %d\n", ret);
+ return RET_ERROR;
+ }
+}
+
+
+static int decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
+{
+ BC_STATUS ret;
+ BC_DTS_STATUS decoder_status = { 0, };
+ CopyRet rec_ret;
+ CHDContext *priv = avctx->priv_data;
+ HANDLE dev = priv->dev;
+ uint8_t *in_data = avpkt->data;
+ int len = avpkt->size;
+ int free_data = 0;
+ uint8_t pic_type = 0;
+
+ av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: decode_frame\n");
+
+ if (avpkt->size == 7 && !priv->bframe_bug) {
+ /*
+ * The use of a drop frame triggers the bug
+ */
+ av_log(avctx, AV_LOG_INFO,
+ "CrystalHD: Enabling work-around for packed b-frame bug\n");
+ priv->bframe_bug = 1;
+ } else if (avpkt->size == 8 && priv->bframe_bug) {
+ /*
+ * Delay frames don't trigger the bug
+ */
+ av_log(avctx, AV_LOG_INFO,
+ "CrystalHD: Disabling work-around for packed b-frame bug\n");
+ priv->bframe_bug = 0;
+ }
+
+ if (len) {
+ int32_t tx_free = (int32_t)DtsTxFreeSize(dev);
+
+ if (priv->parser) {
+ int ret = 0;
+
+ if (priv->bsfc) {
+ ret = av_bitstream_filter_filter(priv->bsfc, avctx, NULL,
+ &in_data, &len,
+ avpkt->data, len, 0);
+ }
+ free_data = ret > 0;
+
+ if (ret >= 0) {
+ uint8_t *pout;
+ int psize;
+ int index;
+ H264Context *h = priv->parser->priv_data;
+
+ index = av_parser_parse2(priv->parser, avctx, &pout, &psize,
+ in_data, len, avctx->pkt->pts,
+ avctx->pkt->dts, 0);
+ if (index < 0) {
+ av_log(avctx, AV_LOG_WARNING,
+ "CrystalHD: Failed to parse h.264 packet to "
+ "detect interlacing.\n");
+ } else if (index != len) {
+ av_log(avctx, AV_LOG_WARNING,
+ "CrystalHD: Failed to parse h.264 packet "
+ "completely. Interlaced frames may be "
+ "incorrectly detected.\n");
+ } else {
+ av_log(avctx, AV_LOG_VERBOSE,
+ "CrystalHD: parser picture type %d\n",
+ h->picture_structure);
+ pic_type = h->picture_structure;
+ }
+ } else {
+ av_log(avctx, AV_LOG_WARNING,
+ "CrystalHD: mp4toannexb filter failed to filter "
+ "packet. Interlaced frames may be incorrectly "
+ "detected.\n");
+ }
+ }
+
+ if (len < tx_free - 1024) {
+ /*
+ * Despite being notionally opaque, either libcrystalhd or
+ * the hardware itself will mangle pts values that are too
+ * small or too large. The docs claim it should be in units
+ * of 100ns. Given that we're nominally dealing with a black
+ * box on both sides, any transform we do has no guarantee of
+ * avoiding mangling so we need to build a mapping to values
+ * we know will not be mangled.
+ */
+ uint64_t pts = opaque_list_push(priv, avctx->pkt->pts, pic_type);
+ if (!pts) {
+ if (free_data) {
+ av_freep(&in_data);
+ }
+ return AVERROR(ENOMEM);
+ }
+ av_log(priv->avctx, AV_LOG_VERBOSE,
+ "input \"pts\": %"PRIu64"\n", pts);
+ ret = DtsProcInput(dev, in_data, len, pts, 0);
+ if (free_data) {
+ av_freep(&in_data);
+ }
+ if (ret == BC_STS_BUSY) {
+ av_log(avctx, AV_LOG_WARNING,
+ "CrystalHD: ProcInput returned busy\n");
+ usleep(BASE_WAIT);
+ return AVERROR(EBUSY);
+ } else if (ret != BC_STS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR,
+ "CrystalHD: ProcInput failed: %u\n", ret);
+ return -1;
+ }
+ avctx->has_b_frames++;
+ } else {
+ av_log(avctx, AV_LOG_WARNING, "CrystalHD: Input buffer full\n");
+ len = 0; // We didn't consume any bytes.
+ }
+ } else {
+ av_log(avctx, AV_LOG_INFO, "CrystalHD: No more input data\n");
+ }
+
+ if (priv->skip_next_output) {
+ av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Skipping next output.\n");
+ priv->skip_next_output = 0;
+ avctx->has_b_frames--;
+ return len;
+ }
+
+ ret = DtsGetDriverStatus(dev, &decoder_status);
+ if (ret != BC_STS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "CrystalHD: GetDriverStatus failed\n");
+ return -1;
+ }
+
+ /*
+ * No frames ready. Don't try to extract.
+ *
+ * Empirical testing shows that ReadyListCount can be a damn lie,
+ * and ProcOut still fails when count > 0. The same testing showed
+ * that two more iterations were needed before ProcOutput would
+ * succeed.
+ */
+ if (priv->output_ready < 2) {
+ if (decoder_status.ReadyListCount != 0)
+ priv->output_ready++;
+ usleep(BASE_WAIT);
+ av_log(avctx, AV_LOG_INFO, "CrystalHD: Filling pipeline.\n");
+ return len;
+ } else if (decoder_status.ReadyListCount == 0) {
+ /*
+ * After the pipeline is established, if we encounter a lack of frames
+ * that probably means we're not giving the hardware enough time to
+ * decode them, so start increasing the wait time at the end of a
+ * decode call.
+ */
+ usleep(BASE_WAIT);
+ priv->decode_wait += WAIT_UNIT;
+ av_log(avctx, AV_LOG_INFO, "CrystalHD: No frames ready. Returning\n");
+ return len;
+ }
+
+ do {
+ rec_ret = receive_frame(avctx, data, got_frame);
+ if (rec_ret == RET_OK && *got_frame == 0) {
+ /*
+ * This case is for when the encoded fields are stored
+ * separately and we get a separate avpkt for each one. To keep
+ * the pipeline stable, we should return nothing and wait for
+ * the next time round to grab the second field.
+ * H.264 PAFF is an example of this.
+ */
+ av_log(avctx, AV_LOG_VERBOSE, "Returning after first field.\n");
+ avctx->has_b_frames--;
+ } else if (rec_ret == RET_COPY_NEXT_FIELD) {
+ /*
+ * This case is for when the encoded fields are stored in a
+ * single avpkt but the hardware returns then separately. Unless
+ * we grab the second field before returning, we'll slip another
+ * frame in the pipeline and if that happens a lot, we're sunk.
+ * So we have to get that second field now.
+ * Interlaced mpeg2 and vc1 are examples of this.
+ */
+ av_log(avctx, AV_LOG_VERBOSE, "Trying to get second field.\n");
+ while (1) {
+ usleep(priv->decode_wait);
+ ret = DtsGetDriverStatus(dev, &decoder_status);
+ if (ret == BC_STS_SUCCESS &&
+ decoder_status.ReadyListCount > 0) {
+ rec_ret = receive_frame(avctx, data, got_frame);
+ if ((rec_ret == RET_OK && *got_frame > 0) ||
+ rec_ret == RET_ERROR)
+ break;
+ }
+ }
+ av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Got second field.\n");
+ } else if (rec_ret == RET_SKIP_NEXT_COPY) {
+ /*
+ * Two input packets got turned into a field pair. Gawd.
+ */
+ av_log(avctx, AV_LOG_VERBOSE,
+ "Don't output on next decode call.\n");
+ priv->skip_next_output = 1;
+ }
+ /*
+ * If rec_ret == RET_COPY_AGAIN, that means that either we just handled
+ * a FMT_CHANGE event and need to go around again for the actual frame,
+ * we got a busy status and need to try again, or we're dealing with
+ * packed b-frames, where the hardware strangely returns the packed
+ * p-frame twice. We choose to keep the second copy as it carries the
+ * valid pts.
+ */
+ } while (rec_ret == RET_COPY_AGAIN);
+ usleep(priv->decode_wait);
+ return len;
+}
+
+
+#if CONFIG_H264_CRYSTALHD_DECODER
+static AVClass h264_class = {
+ "h264_crystalhd",
+ av_default_item_name,
+ options,
+ LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_h264_crystalhd_decoder = {
+ .name = "h264_crystalhd",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_H264,
+ .priv_data_size = sizeof(CHDContext),
+ .init = init,
+ .close = uninit,
+ .decode = decode,
+ .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
+ .flush = flush,
+ .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (CrystalHD acceleration)"),
+ .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
+ .priv_class = &h264_class,
+};
+#endif
+
+#if CONFIG_MPEG2_CRYSTALHD_DECODER
+static AVClass mpeg2_class = {
+ "mpeg2_crystalhd",
+ av_default_item_name,
+ options,
+ LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_mpeg2_crystalhd_decoder = {
+ .name = "mpeg2_crystalhd",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_MPEG2VIDEO,
+ .priv_data_size = sizeof(CHDContext),
+ .init = init,
+ .close = uninit,
+ .decode = decode,
+ .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
+ .flush = flush,
+ .long_name = NULL_IF_CONFIG_SMALL("MPEG-2 Video (CrystalHD acceleration)"),
+ .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
+ .priv_class = &mpeg2_class,
+};
+#endif
+
+#if CONFIG_MPEG4_CRYSTALHD_DECODER
+static AVClass mpeg4_class = {
+ "mpeg4_crystalhd",
+ av_default_item_name,
+ options,
+ LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_mpeg4_crystalhd_decoder = {
+ .name = "mpeg4_crystalhd",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_MPEG4,
+ .priv_data_size = sizeof(CHDContext),
+ .init = init,
+ .close = uninit,
+ .decode = decode,
+ .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
+ .flush = flush,
+ .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 Part 2 (CrystalHD acceleration)"),
+ .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
+ .priv_class = &mpeg4_class,
+};
+#endif
+
+#if CONFIG_MSMPEG4_CRYSTALHD_DECODER
+static AVClass msmpeg4_class = {
+ "msmpeg4_crystalhd",
+ av_default_item_name,
+ options,
+ LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_msmpeg4_crystalhd_decoder = {
+ .name = "msmpeg4_crystalhd",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_MSMPEG4V3,
+ .priv_data_size = sizeof(CHDContext),
+ .init = init,
+ .close = uninit,
+ .decode = decode,
+ .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_EXPERIMENTAL,
+ .flush = flush,
+ .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 Part 2 Microsoft variant version 3 (CrystalHD acceleration)"),
+ .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
+ .priv_class = &msmpeg4_class,
+};
+#endif
+
+#if CONFIG_VC1_CRYSTALHD_DECODER
+static AVClass vc1_class = {
+ "vc1_crystalhd",
+ av_default_item_name,
+ options,
+ LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_vc1_crystalhd_decoder = {
+ .name = "vc1_crystalhd",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_VC1,
+ .priv_data_size = sizeof(CHDContext),
+ .init = init,
+ .close = uninit,
+ .decode = decode,
+ .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
+ .flush = flush,
+ .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 (CrystalHD acceleration)"),
+ .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
+ .priv_class = &vc1_class,
+};
+#endif
+
+#if CONFIG_WMV3_CRYSTALHD_DECODER
+static AVClass wmv3_class = {
+ "wmv3_crystalhd",
+ av_default_item_name,
+ options,
+ LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_wmv3_crystalhd_decoder = {
+ .name = "wmv3_crystalhd",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_WMV3,
+ .priv_data_size = sizeof(CHDContext),
+ .init = init,
+ .close = uninit,
+ .decode = decode,
+ .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
+ .flush = flush,
+ .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 (CrystalHD acceleration)"),
+ .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
+ .priv_class = &wmv3_class,
+};
+#endif
#include "libavutil/lzo.h"
typedef struct {
- AVFrame pic;
++ AVFrame *pic;
int linelen, height, bpp;
unsigned int decomp_size;
unsigned char* decomp_buf;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
CamStudioContext *c = avctx->priv_data;
-- AVFrame *picture = data;
int ret;
if (buf_size < 2) {
return AVERROR_INVALIDDATA;
}
- c->pic.reference = 3;
- c->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_READABLE |
- FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
- if ((ret = avctx->reget_buffer(avctx, &c->pic)) < 0) {
- if ((ret = ff_get_buffer(avctx, picture, 0)) < 0) {
++ if ((ret = ff_reget_buffer(avctx, c->pic)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
// flip upside down, add difference frame
if (buf[0] & 1) { // keyframe
- c->pic.pict_type = AV_PICTURE_TYPE_I;
- c->pic.key_frame = 1;
- copy_frame_default(&c->pic, c->decomp_buf,
- picture->pict_type = AV_PICTURE_TYPE_I;
- picture->key_frame = 1;
- switch (c->bpp) {
- case 16:
- copy_frame_16(picture, c->decomp_buf, c->linelen, c->height);
- break;
- case 32:
- copy_frame_32(picture, c->decomp_buf, c->linelen, c->height);
- break;
- default:
- copy_frame_default(picture, c->decomp_buf, FFALIGN(c->linelen, 4),
++ c->pic->pict_type = AV_PICTURE_TYPE_I;
++ c->pic->key_frame = 1;
++ copy_frame_default(c->pic, c->decomp_buf,
c->linelen, c->height);
- }
} else {
- c->pic.pict_type = AV_PICTURE_TYPE_P;
- c->pic.key_frame = 0;
- add_frame_default(&c->pic, c->decomp_buf,
- picture->pict_type = AV_PICTURE_TYPE_P;
- picture->key_frame = 0;
- switch (c->bpp) {
- case 16:
- add_frame_16(picture, c->decomp_buf, c->linelen, c->height);
- break;
- case 32:
- add_frame_32(picture, c->decomp_buf, c->linelen, c->height);
- break;
- default:
- add_frame_default(picture, c->decomp_buf, FFALIGN(c->linelen, 4),
++ c->pic->pict_type = AV_PICTURE_TYPE_P;
++ c->pic->key_frame = 0;
++ add_frame_default(c->pic, c->decomp_buf,
c->linelen, c->height);
- }
}
- *picture = c->pic;
*got_frame = 1;
++ if ((ret = av_frame_ref(data, c->pic)) < 0)
++ return ret;
++
return buf_size;
}
return AVERROR_INVALIDDATA;
}
c->bpp = avctx->bits_per_coded_sample;
- avcodec_get_frame_defaults(&c->pic);
- c->pic.data[0] = NULL;
c->linelen = avctx->width * avctx->bits_per_coded_sample / 8;
c->height = avctx->height;
- stride = c->linelen;
- if (avctx->bits_per_coded_sample == 24)
- stride = FFALIGN(stride, 4);
+ stride = FFALIGN(c->linelen, 4);
c->decomp_size = c->height * stride;
c->decomp_buf = av_malloc(c->decomp_size + AV_LZO_OUTPUT_PADDING);
if (!c->decomp_buf) {
av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
return AVERROR(ENOMEM);
}
++ c->pic = av_frame_alloc();
++ if (!c->pic)
++ return AVERROR(ENOMEM);
return 0;
}
static av_cold int decode_end(AVCodecContext *avctx) {
CamStudioContext *c = avctx->priv_data;
av_freep(&c->decomp_buf);
- if (c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
++ av_frame_free(&c->pic);
return 0;
}
if (s->width & 0x3)
return AVERROR_INVALIDDATA;
s->height = avctx->height;
- avcodec_get_frame_defaults(&s->frame);
- avctx->pix_fmt = AV_PIX_FMT_YUV411P;
return 0;
}
return ret;
}
- y_plane = s->frame.data[0];
- u_plane = s->frame.data[1];
- v_plane = s->frame.data[2];
+ y_plane = frame->data[0];
+ u_plane = frame->data[1];
+ v_plane = frame->data[2];
- y_plane += s->frame.linesize[0] * s->height;
+ if (buf_size == rawsize) {
+ int linesize = FFALIGN(s->width,2) * 2;
- y_plane -= s->frame.linesize[0];
++ y_plane += frame->linesize[0] * s->height;
+ for (stream_ptr = 0; stream_ptr < rawsize; stream_ptr += linesize) {
++ y_plane -= frame->linesize[0];
+ memcpy(y_plane, buf+stream_ptr, linesize);
+ }
+ } else {
+
/* iterate through each line in the height */
for (y_ptr = 0, u_ptr = 0, v_ptr = 0;
- y_ptr < (s->height * s->frame.linesize[0]);
- y_ptr += s->frame.linesize[0] - s->width,
- u_ptr += s->frame.linesize[1] - s->width / 4,
- v_ptr += s->frame.linesize[2] - s->width / 4) {
+ y_ptr < (s->height * frame->linesize[0]);
+ y_ptr += frame->linesize[0] - s->width,
+ u_ptr += frame->linesize[1] - s->width / 4,
+ v_ptr += frame->linesize[2] - s->width / 4) {
/* reset predictors */
cur_byte = buf[stream_ptr++];
}
}
+ }
*got_frame = 1;
- *(AVFrame*)data= s->frame;
return buf_size;
}
pal_elems = FFMIN(chunk_size / 3, 256);
for (i = 0; i < pal_elems; i++) {
s->pal[i] = bytestream2_get_be24(&gb) << 2;
- s->pal[i] |= (s->pal[i] >> 6) & 0x333;
+ s->pal[i] |= 0xFFU << 24 | (s->pal[i] >> 6) & 0x30303;
}
- s->pic.palette_has_changed = 1;
+ frame->palette_has_changed = 1;
} else if (chunk_type <= 9) {
if (decoder[chunk_type - 2](&gb, s->frame_buf, avctx->width, avctx->height)) {
av_log(avctx, AV_LOG_ERROR, "Error decoding %s chunk\n",
--- /dev/null
- s->avctx->release_buffer(s->avctx, &s->all_frames[i].avframe);
+/*
+ * Copyright (C) 2007 Marco Gerards <marco@gnu.org>
+ * Copyright (C) 2009 David Conrad
+ * Copyright (C) 2011 Jordi Ortiz
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Dirac Decoder
+ * @author Marco Gerards <marco@gnu.org>, David Conrad, Jordi Ortiz <nenjordi@gmail.com>
+ */
+
+#include "avcodec.h"
+#include "dsputil.h"
+#include "get_bits.h"
+#include "bytestream.h"
+#include "internal.h"
+#include "golomb.h"
+#include "dirac_arith.h"
+#include "mpeg12data.h"
+#include "dirac_dwt.h"
+#include "dirac.h"
+#include "diracdsp.h"
+#include "videodsp.h" // for ff_emulated_edge_mc_8
+
+/**
+ * The spec limits the number of wavelet decompositions to 4 for both
+ * level 1 (VC-2) and 128 (long-gop default).
+ * 5 decompositions is the maximum before >16-bit buffers are needed.
+ * Schroedinger allows this for DD 9,7 and 13,7 wavelets only, limiting
+ * the others to 4 decompositions (or 3 for the fidelity filter).
+ *
+ * We use this instead of MAX_DECOMPOSITIONS to save some memory.
+ */
+#define MAX_DWT_LEVELS 5
+
+/**
+ * The spec limits this to 3 for frame coding, but in practice can be as high as 6
+ */
+#define MAX_REFERENCE_FRAMES 8
+#define MAX_DELAY 5 /* limit for main profile for frame coding (TODO: field coding) */
+#define MAX_FRAMES (MAX_REFERENCE_FRAMES + MAX_DELAY + 1)
+#define MAX_QUANT 68 /* max quant for VC-2 */
+#define MAX_BLOCKSIZE 32 /* maximum xblen/yblen we support */
+
+/**
+ * DiracBlock->ref flags, if set then the block does MC from the given ref
+ */
+#define DIRAC_REF_MASK_REF1 1
+#define DIRAC_REF_MASK_REF2 2
+#define DIRAC_REF_MASK_GLOBAL 4
+
+/**
+ * Value of Picture.reference when Picture is not a reference picture, but
+ * is held for delayed output.
+ */
+#define DELAYED_PIC_REF 4
+
+#define ff_emulated_edge_mc ff_emulated_edge_mc_8 /* Fix: change the calls to this function regarding bit depth */
+
+#define CALC_PADDING(size, depth) \
+ (((size + (1 << depth) - 1) >> depth) << depth)
+
+#define DIVRNDUP(a, b) (((a) + (b) - 1) / (b))
+
+typedef struct {
+ AVFrame avframe;
+ int interpolated[3]; /* 1 if hpel[] is valid */
+ uint8_t *hpel[3][4];
+ uint8_t *hpel_base[3][4];
+} DiracFrame;
+
+typedef struct {
+ union {
+ int16_t mv[2][2];
+ int16_t dc[3];
+ } u; /* anonymous unions aren't in C99 :( */
+ uint8_t ref;
+} DiracBlock;
+
+typedef struct SubBand {
+ int level;
+ int orientation;
+ int stride;
+ int width;
+ int height;
+ int quant;
+ IDWTELEM *ibuf;
+ struct SubBand *parent;
+
+ /* for low delay */
+ unsigned length;
+ const uint8_t *coeff_data;
+} SubBand;
+
+typedef struct Plane {
+ int width;
+ int height;
+ int stride;
+
+ int idwt_width;
+ int idwt_height;
+ int idwt_stride;
+ IDWTELEM *idwt_buf;
+ IDWTELEM *idwt_buf_base;
+ IDWTELEM *idwt_tmp;
+
+ /* block length */
+ uint8_t xblen;
+ uint8_t yblen;
+ /* block separation (block n+1 starts after this many pixels in block n) */
+ uint8_t xbsep;
+ uint8_t ybsep;
+ /* amount of overspill on each edge (half of the overlap between blocks) */
+ uint8_t xoffset;
+ uint8_t yoffset;
+
+ SubBand band[MAX_DWT_LEVELS][4];
+} Plane;
+
+typedef struct DiracContext {
+ AVCodecContext *avctx;
+ DSPContext dsp;
+ DiracDSPContext diracdsp;
+ GetBitContext gb;
+ dirac_source_params source;
+ int seen_sequence_header;
+ int frame_number; /* number of the next frame to display */
+ Plane plane[3];
+ int chroma_x_shift;
+ int chroma_y_shift;
+
+ int zero_res; /* zero residue flag */
+ int is_arith; /* whether coeffs use arith or golomb coding */
+ int low_delay; /* use the low delay syntax */
+ int globalmc_flag; /* use global motion compensation */
+ int num_refs; /* number of reference pictures */
+
+ /* wavelet decoding */
+ unsigned wavelet_depth; /* depth of the IDWT */
+ unsigned wavelet_idx;
+
+ /**
+ * schroedinger older than 1.0.8 doesn't store
+ * quant delta if only one codebook exists in a band
+ */
+ unsigned old_delta_quant;
+ unsigned codeblock_mode;
+
+ struct {
+ unsigned width;
+ unsigned height;
+ } codeblock[MAX_DWT_LEVELS+1];
+
+ struct {
+ unsigned num_x; /* number of horizontal slices */
+ unsigned num_y; /* number of vertical slices */
+ AVRational bytes; /* average bytes per slice */
+ uint8_t quant[MAX_DWT_LEVELS][4]; /* [DIRAC_STD] E.1 */
+ } lowdelay;
+
+ struct {
+ int pan_tilt[2]; /* pan/tilt vector */
+ int zrs[2][2]; /* zoom/rotate/shear matrix */
+ int perspective[2]; /* perspective vector */
+ unsigned zrs_exp;
+ unsigned perspective_exp;
+ } globalmc[2];
+
+ /* motion compensation */
+ uint8_t mv_precision; /* [DIRAC_STD] REFS_WT_PRECISION */
+ int16_t weight[2]; /* [DIRAC_STD] REF1_WT and REF2_WT */
+ unsigned weight_log2denom; /* [DIRAC_STD] REFS_WT_PRECISION */
+
+ int blwidth; /* number of blocks (horizontally) */
+ int blheight; /* number of blocks (vertically) */
+ int sbwidth; /* number of superblocks (horizontally) */
+ int sbheight; /* number of superblocks (vertically) */
+
+ uint8_t *sbsplit;
+ DiracBlock *blmotion;
+
+ uint8_t *edge_emu_buffer[4];
+ uint8_t *edge_emu_buffer_base;
+
+ uint16_t *mctmp; /* buffer holding the MC data multipled by OBMC weights */
+ uint8_t *mcscratch;
+
+ DECLARE_ALIGNED(16, uint8_t, obmc_weight)[3][MAX_BLOCKSIZE*MAX_BLOCKSIZE];
+
+ void (*put_pixels_tab[4])(uint8_t *dst, const uint8_t *src[5], int stride, int h);
+ void (*avg_pixels_tab[4])(uint8_t *dst, const uint8_t *src[5], int stride, int h);
+ void (*add_obmc)(uint16_t *dst, const uint8_t *src, int stride, const uint8_t *obmc_weight, int yblen);
+ dirac_weight_func weight_func;
+ dirac_biweight_func biweight_func;
+
+ DiracFrame *current_picture;
+ DiracFrame *ref_pics[2];
+
+ DiracFrame *ref_frames[MAX_REFERENCE_FRAMES+1];
+ DiracFrame *delay_frames[MAX_DELAY+1];
+ DiracFrame all_frames[MAX_FRAMES];
+} DiracContext;
+
+/**
+ * Dirac Specification ->
+ * Parse code values. 9.6.1 Table 9.1
+ */
+enum dirac_parse_code {
+ pc_seq_header = 0x00,
+ pc_eos = 0x10,
+ pc_aux_data = 0x20,
+ pc_padding = 0x30,
+};
+
+enum dirac_subband {
+ subband_ll = 0,
+ subband_hl = 1,
+ subband_lh = 2,
+ subband_hh = 3
+};
+
+static const uint8_t default_qmat[][4][4] = {
+ { { 5, 3, 3, 0}, { 0, 4, 4, 1}, { 0, 5, 5, 2}, { 0, 6, 6, 3} },
+ { { 4, 2, 2, 0}, { 0, 4, 4, 2}, { 0, 5, 5, 3}, { 0, 7, 7, 5} },
+ { { 5, 3, 3, 0}, { 0, 4, 4, 1}, { 0, 5, 5, 2}, { 0, 6, 6, 3} },
+ { { 8, 4, 4, 0}, { 0, 4, 4, 0}, { 0, 4, 4, 0}, { 0, 4, 4, 0} },
+ { { 8, 4, 4, 0}, { 0, 4, 4, 0}, { 0, 4, 4, 0}, { 0, 4, 4, 0} },
+ { { 0, 4, 4, 8}, { 0, 8, 8, 12}, { 0, 13, 13, 17}, { 0, 17, 17, 21} },
+ { { 3, 1, 1, 0}, { 0, 4, 4, 2}, { 0, 6, 6, 5}, { 0, 9, 9, 7} },
+};
+
+static const int qscale_tab[MAX_QUANT+1] = {
+ 4, 5, 6, 7, 8, 10, 11, 13,
+ 16, 19, 23, 27, 32, 38, 45, 54,
+ 64, 76, 91, 108, 128, 152, 181, 215,
+ 256, 304, 362, 431, 512, 609, 724, 861,
+ 1024, 1218, 1448, 1722, 2048, 2435, 2896, 3444,
+ 4096, 4871, 5793, 6889, 8192, 9742, 11585, 13777,
+ 16384, 19484, 23170, 27554, 32768, 38968, 46341, 55109,
+ 65536, 77936
+};
+
+static const int qoffset_intra_tab[MAX_QUANT+1] = {
+ 1, 2, 3, 4, 4, 5, 6, 7,
+ 8, 10, 12, 14, 16, 19, 23, 27,
+ 32, 38, 46, 54, 64, 76, 91, 108,
+ 128, 152, 181, 216, 256, 305, 362, 431,
+ 512, 609, 724, 861, 1024, 1218, 1448, 1722,
+ 2048, 2436, 2897, 3445, 4096, 4871, 5793, 6889,
+ 8192, 9742, 11585, 13777, 16384, 19484, 23171, 27555,
+ 32768, 38968
+};
+
+static const int qoffset_inter_tab[MAX_QUANT+1] = {
+ 1, 2, 2, 3, 3, 4, 4, 5,
+ 6, 7, 9, 10, 12, 14, 17, 20,
+ 24, 29, 34, 41, 48, 57, 68, 81,
+ 96, 114, 136, 162, 192, 228, 272, 323,
+ 384, 457, 543, 646, 768, 913, 1086, 1292,
+ 1536, 1827, 2172, 2583, 3072, 3653, 4344, 5166,
+ 6144, 7307, 8689, 10333, 12288, 14613, 17378, 20666,
+ 24576, 29226
+};
+
+/* magic number division by 3 from schroedinger */
+static inline int divide3(int x)
+{
+ return ((x+1)*21845 + 10922) >> 16;
+}
+
+static DiracFrame *remove_frame(DiracFrame *framelist[], int picnum)
+{
+ DiracFrame *remove_pic = NULL;
+ int i, remove_idx = -1;
+
+ for (i = 0; framelist[i]; i++)
+ if (framelist[i]->avframe.display_picture_number == picnum) {
+ remove_pic = framelist[i];
+ remove_idx = i;
+ }
+
+ if (remove_pic)
+ for (i = remove_idx; framelist[i]; i++)
+ framelist[i] = framelist[i+1];
+
+ return remove_pic;
+}
+
+static int add_frame(DiracFrame *framelist[], int maxframes, DiracFrame *frame)
+{
+ int i;
+ for (i = 0; i < maxframes; i++)
+ if (!framelist[i]) {
+ framelist[i] = frame;
+ return 0;
+ }
+ return -1;
+}
+
+static int alloc_sequence_buffers(DiracContext *s)
+{
+ int sbwidth = DIVRNDUP(s->source.width, 4);
+ int sbheight = DIVRNDUP(s->source.height, 4);
+ int i, w, h, top_padding;
+
+ /* todo: think more about this / use or set Plane here */
+ for (i = 0; i < 3; i++) {
+ int max_xblen = MAX_BLOCKSIZE >> (i ? s->chroma_x_shift : 0);
+ int max_yblen = MAX_BLOCKSIZE >> (i ? s->chroma_y_shift : 0);
+ w = s->source.width >> (i ? s->chroma_x_shift : 0);
+ h = s->source.height >> (i ? s->chroma_y_shift : 0);
+
+ /* we allocate the max we support here since num decompositions can
+ * change from frame to frame. Stride is aligned to 16 for SIMD, and
+ * 1<<MAX_DWT_LEVELS top padding to avoid if(y>0) in arith decoding
+ * MAX_BLOCKSIZE padding for MC: blocks can spill up to half of that
+ * on each side */
+ top_padding = FFMAX(1<<MAX_DWT_LEVELS, max_yblen/2);
+ w = FFALIGN(CALC_PADDING(w, MAX_DWT_LEVELS), 8); /* FIXME: Should this be 16 for SSE??? */
+ h = top_padding + CALC_PADDING(h, MAX_DWT_LEVELS) + max_yblen/2;
+
+ s->plane[i].idwt_buf_base = av_mallocz((w+max_xblen)*h * sizeof(IDWTELEM));
+ s->plane[i].idwt_tmp = av_malloc((w+16) * sizeof(IDWTELEM));
+ s->plane[i].idwt_buf = s->plane[i].idwt_buf_base + top_padding*w;
+ if (!s->plane[i].idwt_buf_base || !s->plane[i].idwt_tmp)
+ return AVERROR(ENOMEM);
+ }
+
+ w = s->source.width;
+ h = s->source.height;
+
+ /* fixme: allocate using real stride here */
+ s->sbsplit = av_malloc(sbwidth * sbheight);
+ s->blmotion = av_malloc(sbwidth * sbheight * 16 * sizeof(*s->blmotion));
+ s->edge_emu_buffer_base = av_malloc((w+64)*MAX_BLOCKSIZE);
+
+ s->mctmp = av_malloc((w+64+MAX_BLOCKSIZE) * (h+MAX_BLOCKSIZE) * sizeof(*s->mctmp));
+ s->mcscratch = av_malloc((w+64)*MAX_BLOCKSIZE);
+
+ if (!s->sbsplit || !s->blmotion || !s->mctmp || !s->mcscratch)
+ return AVERROR(ENOMEM);
+ return 0;
+}
+
+static void free_sequence_buffers(DiracContext *s)
+{
+ int i, j, k;
+
+ for (i = 0; i < MAX_FRAMES; i++) {
+ if (s->all_frames[i].avframe.data[0]) {
- ff_get_buffer(s->avctx, &s->ref_pics[i]->avframe);
++ av_frame_unref(&s->all_frames[i].avframe);
+ memset(s->all_frames[i].interpolated, 0, sizeof(s->all_frames[i].interpolated));
+ }
+
+ for (j = 0; j < 3; j++)
+ for (k = 1; k < 4; k++)
+ av_freep(&s->all_frames[i].hpel_base[j][k]);
+ }
+
+ memset(s->ref_frames, 0, sizeof(s->ref_frames));
+ memset(s->delay_frames, 0, sizeof(s->delay_frames));
+
+ for (i = 0; i < 3; i++) {
+ av_freep(&s->plane[i].idwt_buf_base);
+ av_freep(&s->plane[i].idwt_tmp);
+ }
+
+ av_freep(&s->sbsplit);
+ av_freep(&s->blmotion);
+ av_freep(&s->edge_emu_buffer_base);
+
+ av_freep(&s->mctmp);
+ av_freep(&s->mcscratch);
+}
+
+static av_cold int dirac_decode_init(AVCodecContext *avctx)
+{
+ DiracContext *s = avctx->priv_data;
+ s->avctx = avctx;
+ s->frame_number = -1;
+
+ if (avctx->flags&CODEC_FLAG_EMU_EDGE) {
+ av_log(avctx, AV_LOG_ERROR, "Edge emulation not supported!\n");
+ return AVERROR_PATCHWELCOME;
+ }
+
+ ff_dsputil_init(&s->dsp, avctx);
+ ff_diracdsp_init(&s->diracdsp);
+
+ return 0;
+}
+
+static void dirac_decode_flush(AVCodecContext *avctx)
+{
+ DiracContext *s = avctx->priv_data;
+ free_sequence_buffers(s);
+ s->seen_sequence_header = 0;
+ s->frame_number = -1;
+}
+
+static av_cold int dirac_decode_end(AVCodecContext *avctx)
+{
+ dirac_decode_flush(avctx);
+ return 0;
+}
+
+#define SIGN_CTX(x) (CTX_SIGN_ZERO + ((x) > 0) - ((x) < 0))
+
+static inline void coeff_unpack_arith(DiracArith *c, int qfactor, int qoffset,
+ SubBand *b, IDWTELEM *buf, int x, int y)
+{
+ int coeff, sign;
+ int sign_pred = 0;
+ int pred_ctx = CTX_ZPZN_F1;
+
+ /* Check if the parent subband has a 0 in the corresponding position */
+ if (b->parent)
+ pred_ctx += !!b->parent->ibuf[b->parent->stride * (y>>1) + (x>>1)] << 1;
+
+ if (b->orientation == subband_hl)
+ sign_pred = buf[-b->stride];
+
+ /* Determine if the pixel has only zeros in its neighbourhood */
+ if (x) {
+ pred_ctx += !(buf[-1] | buf[-b->stride] | buf[-1-b->stride]);
+ if (b->orientation == subband_lh)
+ sign_pred = buf[-1];
+ } else {
+ pred_ctx += !buf[-b->stride];
+ }
+
+ coeff = dirac_get_arith_uint(c, pred_ctx, CTX_COEFF_DATA);
+ if (coeff) {
+ coeff = (coeff * qfactor + qoffset + 2) >> 2;
+ sign = dirac_get_arith_bit(c, SIGN_CTX(sign_pred));
+ coeff = (coeff ^ -sign) + sign;
+ }
+ *buf = coeff;
+}
+
+static inline int coeff_unpack_golomb(GetBitContext *gb, int qfactor, int qoffset)
+{
+ int sign, coeff;
+
+ coeff = svq3_get_ue_golomb(gb);
+ if (coeff) {
+ coeff = (coeff * qfactor + qoffset + 2) >> 2;
+ sign = get_bits1(gb);
+ coeff = (coeff ^ -sign) + sign;
+ }
+ return coeff;
+}
+
+/**
+ * Decode the coeffs in the rectangle defined by left, right, top, bottom
+ * [DIRAC_STD] 13.4.3.2 Codeblock unpacking loop. codeblock()
+ */
+static inline void codeblock(DiracContext *s, SubBand *b,
+ GetBitContext *gb, DiracArith *c,
+ int left, int right, int top, int bottom,
+ int blockcnt_one, int is_arith)
+{
+ int x, y, zero_block;
+ int qoffset, qfactor;
+ IDWTELEM *buf;
+
+ /* check for any coded coefficients in this codeblock */
+ if (!blockcnt_one) {
+ if (is_arith)
+ zero_block = dirac_get_arith_bit(c, CTX_ZERO_BLOCK);
+ else
+ zero_block = get_bits1(gb);
+
+ if (zero_block)
+ return;
+ }
+
+ if (s->codeblock_mode && !(s->old_delta_quant && blockcnt_one)) {
+ int quant = b->quant;
+ if (is_arith)
+ quant += dirac_get_arith_int(c, CTX_DELTA_Q_F, CTX_DELTA_Q_DATA);
+ else
+ quant += dirac_get_se_golomb(gb);
+ if (quant < 0) {
+ av_log(s->avctx, AV_LOG_ERROR, "Invalid quant\n");
+ return;
+ }
+ b->quant = quant;
+ }
+
+ b->quant = FFMIN(b->quant, MAX_QUANT);
+
+ qfactor = qscale_tab[b->quant];
+ /* TODO: context pointer? */
+ if (!s->num_refs)
+ qoffset = qoffset_intra_tab[b->quant];
+ else
+ qoffset = qoffset_inter_tab[b->quant];
+
+ buf = b->ibuf + top * b->stride;
+ for (y = top; y < bottom; y++) {
+ for (x = left; x < right; x++) {
+ /* [DIRAC_STD] 13.4.4 Subband coefficients. coeff_unpack() */
+ if (is_arith)
+ coeff_unpack_arith(c, qfactor, qoffset, b, buf+x, x, y);
+ else
+ buf[x] = coeff_unpack_golomb(gb, qfactor, qoffset);
+ }
+ buf += b->stride;
+ }
+}
+
+/**
+ * Dirac Specification ->
+ * 13.3 intra_dc_prediction(band)
+ */
+static inline void intra_dc_prediction(SubBand *b)
+{
+ IDWTELEM *buf = b->ibuf;
+ int x, y;
+
+ for (x = 1; x < b->width; x++)
+ buf[x] += buf[x-1];
+ buf += b->stride;
+
+ for (y = 1; y < b->height; y++) {
+ buf[0] += buf[-b->stride];
+
+ for (x = 1; x < b->width; x++) {
+ int pred = buf[x - 1] + buf[x - b->stride] + buf[x - b->stride-1];
+ buf[x] += divide3(pred);
+ }
+ buf += b->stride;
+ }
+}
+
+/**
+ * Dirac Specification ->
+ * 13.4.2 Non-skipped subbands. subband_coeffs()
+ */
+static av_always_inline void decode_subband_internal(DiracContext *s, SubBand *b, int is_arith)
+{
+ int cb_x, cb_y, left, right, top, bottom;
+ DiracArith c;
+ GetBitContext gb;
+ int cb_width = s->codeblock[b->level + (b->orientation != subband_ll)].width;
+ int cb_height = s->codeblock[b->level + (b->orientation != subband_ll)].height;
+ int blockcnt_one = (cb_width + cb_height) == 2;
+
+ if (!b->length)
+ return;
+
+ init_get_bits(&gb, b->coeff_data, b->length*8);
+
+ if (is_arith)
+ ff_dirac_init_arith_decoder(&c, &gb, b->length);
+
+ top = 0;
+ for (cb_y = 0; cb_y < cb_height; cb_y++) {
+ bottom = (b->height * (cb_y+1)) / cb_height;
+ left = 0;
+ for (cb_x = 0; cb_x < cb_width; cb_x++) {
+ right = (b->width * (cb_x+1)) / cb_width;
+ codeblock(s, b, &gb, &c, left, right, top, bottom, blockcnt_one, is_arith);
+ left = right;
+ }
+ top = bottom;
+ }
+
+ if (b->orientation == subband_ll && s->num_refs == 0)
+ intra_dc_prediction(b);
+}
+
+static int decode_subband_arith(AVCodecContext *avctx, void *b)
+{
+ DiracContext *s = avctx->priv_data;
+ decode_subband_internal(s, b, 1);
+ return 0;
+}
+
+static int decode_subband_golomb(AVCodecContext *avctx, void *arg)
+{
+ DiracContext *s = avctx->priv_data;
+ SubBand **b = arg;
+ decode_subband_internal(s, *b, 0);
+ return 0;
+}
+
+/**
+ * Dirac Specification ->
+ * [DIRAC_STD] 13.4.1 core_transform_data()
+ */
+static void decode_component(DiracContext *s, int comp)
+{
+ AVCodecContext *avctx = s->avctx;
+ SubBand *bands[3*MAX_DWT_LEVELS+1];
+ enum dirac_subband orientation;
+ int level, num_bands = 0;
+
+ /* Unpack all subbands at all levels. */
+ for (level = 0; level < s->wavelet_depth; level++) {
+ for (orientation = !!level; orientation < 4; orientation++) {
+ SubBand *b = &s->plane[comp].band[level][orientation];
+ bands[num_bands++] = b;
+
+ align_get_bits(&s->gb);
+ /* [DIRAC_STD] 13.4.2 subband() */
+ b->length = svq3_get_ue_golomb(&s->gb);
+ if (b->length) {
+ b->quant = svq3_get_ue_golomb(&s->gb);
+ align_get_bits(&s->gb);
+ b->coeff_data = s->gb.buffer + get_bits_count(&s->gb)/8;
+ b->length = FFMIN(b->length, FFMAX(get_bits_left(&s->gb)/8, 0));
+ skip_bits_long(&s->gb, b->length*8);
+ }
+ }
+ /* arithmetic coding has inter-level dependencies, so we can only execute one level at a time */
+ if (s->is_arith)
+ avctx->execute(avctx, decode_subband_arith, &s->plane[comp].band[level][!!level],
+ NULL, 4-!!level, sizeof(SubBand));
+ }
+ /* golomb coding has no inter-level dependencies, so we can execute all subbands in parallel */
+ if (!s->is_arith)
+ avctx->execute(avctx, decode_subband_golomb, bands, NULL, num_bands, sizeof(SubBand*));
+}
+
+/* [DIRAC_STD] 13.5.5.2 Luma slice subband data. luma_slice_band(level,orient,sx,sy) --> if b2 == NULL */
+/* [DIRAC_STD] 13.5.5.3 Chroma slice subband data. chroma_slice_band(level,orient,sx,sy) --> if b2 != NULL */
+static void lowdelay_subband(DiracContext *s, GetBitContext *gb, int quant,
+ int slice_x, int slice_y, int bits_end,
+ SubBand *b1, SubBand *b2)
+{
+ int left = b1->width * slice_x / s->lowdelay.num_x;
+ int right = b1->width *(slice_x+1) / s->lowdelay.num_x;
+ int top = b1->height * slice_y / s->lowdelay.num_y;
+ int bottom = b1->height *(slice_y+1) / s->lowdelay.num_y;
+
+ int qfactor = qscale_tab[FFMIN(quant, MAX_QUANT)];
+ int qoffset = qoffset_intra_tab[FFMIN(quant, MAX_QUANT)];
+
+ IDWTELEM *buf1 = b1->ibuf + top * b1->stride;
+ IDWTELEM *buf2 = b2 ? b2->ibuf + top * b2->stride : NULL;
+ int x, y;
+ /* we have to constantly check for overread since the spec explictly
+ requires this, with the meaning that all remaining coeffs are set to 0 */
+ if (get_bits_count(gb) >= bits_end)
+ return;
+
+ for (y = top; y < bottom; y++) {
+ for (x = left; x < right; x++) {
+ buf1[x] = coeff_unpack_golomb(gb, qfactor, qoffset);
+ if (get_bits_count(gb) >= bits_end)
+ return;
+ if (buf2) {
+ buf2[x] = coeff_unpack_golomb(gb, qfactor, qoffset);
+ if (get_bits_count(gb) >= bits_end)
+ return;
+ }
+ }
+ buf1 += b1->stride;
+ if (buf2)
+ buf2 += b2->stride;
+ }
+}
+
+struct lowdelay_slice {
+ GetBitContext gb;
+ int slice_x;
+ int slice_y;
+ int bytes;
+};
+
+
+/**
+ * Dirac Specification ->
+ * 13.5.2 Slices. slice(sx,sy)
+ */
+static int decode_lowdelay_slice(AVCodecContext *avctx, void *arg)
+{
+ DiracContext *s = avctx->priv_data;
+ struct lowdelay_slice *slice = arg;
+ GetBitContext *gb = &slice->gb;
+ enum dirac_subband orientation;
+ int level, quant, chroma_bits, chroma_end;
+
+ int quant_base = get_bits(gb, 7); /*[DIRAC_STD] qindex */
+ int length_bits = av_log2(8 * slice->bytes)+1;
+ int luma_bits = get_bits_long(gb, length_bits);
+ int luma_end = get_bits_count(gb) + FFMIN(luma_bits, get_bits_left(gb));
+
+ /* [DIRAC_STD] 13.5.5.2 luma_slice_band */
+ for (level = 0; level < s->wavelet_depth; level++)
+ for (orientation = !!level; orientation < 4; orientation++) {
+ quant = FFMAX(quant_base - s->lowdelay.quant[level][orientation], 0);
+ lowdelay_subband(s, gb, quant, slice->slice_x, slice->slice_y, luma_end,
+ &s->plane[0].band[level][orientation], NULL);
+ }
+
+ /* consume any unused bits from luma */
+ skip_bits_long(gb, get_bits_count(gb) - luma_end);
+
+ chroma_bits = 8*slice->bytes - 7 - length_bits - luma_bits;
+ chroma_end = get_bits_count(gb) + FFMIN(chroma_bits, get_bits_left(gb));
+ /* [DIRAC_STD] 13.5.5.3 chroma_slice_band */
+ for (level = 0; level < s->wavelet_depth; level++)
+ for (orientation = !!level; orientation < 4; orientation++) {
+ quant = FFMAX(quant_base - s->lowdelay.quant[level][orientation], 0);
+ lowdelay_subband(s, gb, quant, slice->slice_x, slice->slice_y, chroma_end,
+ &s->plane[1].band[level][orientation],
+ &s->plane[2].band[level][orientation]);
+ }
+
+ return 0;
+}
+
+/**
+ * Dirac Specification ->
+ * 13.5.1 low_delay_transform_data()
+ */
+static void decode_lowdelay(DiracContext *s)
+{
+ AVCodecContext *avctx = s->avctx;
+ int slice_x, slice_y, bytes, bufsize;
+ const uint8_t *buf;
+ struct lowdelay_slice *slices;
+ int slice_num = 0;
+
+ slices = av_mallocz(s->lowdelay.num_x * s->lowdelay.num_y * sizeof(struct lowdelay_slice));
+
+ align_get_bits(&s->gb);
+ /*[DIRAC_STD] 13.5.2 Slices. slice(sx,sy) */
+ buf = s->gb.buffer + get_bits_count(&s->gb)/8;
+ bufsize = get_bits_left(&s->gb);
+
+ for (slice_y = 0; bufsize > 0 && slice_y < s->lowdelay.num_y; slice_y++)
+ for (slice_x = 0; bufsize > 0 && slice_x < s->lowdelay.num_x; slice_x++) {
+ bytes = (slice_num+1) * s->lowdelay.bytes.num / s->lowdelay.bytes.den
+ - slice_num * s->lowdelay.bytes.num / s->lowdelay.bytes.den;
+
+ slices[slice_num].bytes = bytes;
+ slices[slice_num].slice_x = slice_x;
+ slices[slice_num].slice_y = slice_y;
+ init_get_bits(&slices[slice_num].gb, buf, bufsize);
+ slice_num++;
+
+ buf += bytes;
+ bufsize -= bytes*8;
+ }
+
+ avctx->execute(avctx, decode_lowdelay_slice, slices, NULL, slice_num,
+ sizeof(struct lowdelay_slice)); /* [DIRAC_STD] 13.5.2 Slices */
+ intra_dc_prediction(&s->plane[0].band[0][0]); /* [DIRAC_STD] 13.3 intra_dc_prediction() */
+ intra_dc_prediction(&s->plane[1].band[0][0]); /* [DIRAC_STD] 13.3 intra_dc_prediction() */
+ intra_dc_prediction(&s->plane[2].band[0][0]); /* [DIRAC_STD] 13.3 intra_dc_prediction() */
+ av_free(slices);
+}
+
+static void init_planes(DiracContext *s)
+{
+ int i, w, h, level, orientation;
+
+ for (i = 0; i < 3; i++) {
+ Plane *p = &s->plane[i];
+
+ p->width = s->source.width >> (i ? s->chroma_x_shift : 0);
+ p->height = s->source.height >> (i ? s->chroma_y_shift : 0);
+ p->idwt_width = w = CALC_PADDING(p->width , s->wavelet_depth);
+ p->idwt_height = h = CALC_PADDING(p->height, s->wavelet_depth);
+ p->idwt_stride = FFALIGN(p->idwt_width, 8);
+
+ for (level = s->wavelet_depth-1; level >= 0; level--) {
+ w = w>>1;
+ h = h>>1;
+ for (orientation = !!level; orientation < 4; orientation++) {
+ SubBand *b = &p->band[level][orientation];
+
+ b->ibuf = p->idwt_buf;
+ b->level = level;
+ b->stride = p->idwt_stride << (s->wavelet_depth - level);
+ b->width = w;
+ b->height = h;
+ b->orientation = orientation;
+
+ if (orientation & 1)
+ b->ibuf += w;
+ if (orientation > 1)
+ b->ibuf += b->stride>>1;
+
+ if (level)
+ b->parent = &p->band[level-1][orientation];
+ }
+ }
+
+ if (i > 0) {
+ p->xblen = s->plane[0].xblen >> s->chroma_x_shift;
+ p->yblen = s->plane[0].yblen >> s->chroma_y_shift;
+ p->xbsep = s->plane[0].xbsep >> s->chroma_x_shift;
+ p->ybsep = s->plane[0].ybsep >> s->chroma_y_shift;
+ }
+
+ p->xoffset = (p->xblen - p->xbsep)/2;
+ p->yoffset = (p->yblen - p->ybsep)/2;
+ }
+}
+
+/**
+ * Unpack the motion compensation parameters
+ * Dirac Specification ->
+ * 11.2 Picture prediction data. picture_prediction()
+ */
+static int dirac_unpack_prediction_parameters(DiracContext *s)
+{
+ static const uint8_t default_blen[] = { 4, 12, 16, 24 };
+ static const uint8_t default_bsep[] = { 4, 8, 12, 16 };
+
+ GetBitContext *gb = &s->gb;
+ unsigned idx, ref;
+
+ align_get_bits(gb);
+ /* [DIRAC_STD] 11.2.2 Block parameters. block_parameters() */
+ /* Luma and Chroma are equal. 11.2.3 */
+ idx = svq3_get_ue_golomb(gb); /* [DIRAC_STD] index */
+
+ if (idx > 4) {
+ av_log(s->avctx, AV_LOG_ERROR, "Block prediction index too high\n");
+ return -1;
+ }
+
+ if (idx == 0) {
+ s->plane[0].xblen = svq3_get_ue_golomb(gb);
+ s->plane[0].yblen = svq3_get_ue_golomb(gb);
+ s->plane[0].xbsep = svq3_get_ue_golomb(gb);
+ s->plane[0].ybsep = svq3_get_ue_golomb(gb);
+ } else {
+ /*[DIRAC_STD] preset_block_params(index). Table 11.1 */
+ s->plane[0].xblen = default_blen[idx-1];
+ s->plane[0].yblen = default_blen[idx-1];
+ s->plane[0].xbsep = default_bsep[idx-1];
+ s->plane[0].ybsep = default_bsep[idx-1];
+ }
+ /*[DIRAC_STD] 11.2.4 motion_data_dimensions()
+ Calculated in function dirac_unpack_block_motion_data */
+
+ if (!s->plane[0].xbsep || !s->plane[0].ybsep || s->plane[0].xbsep < s->plane[0].xblen/2 || s->plane[0].ybsep < s->plane[0].yblen/2) {
+ av_log(s->avctx, AV_LOG_ERROR, "Block separation too small\n");
+ return -1;
+ }
+ if (s->plane[0].xbsep > s->plane[0].xblen || s->plane[0].ybsep > s->plane[0].yblen) {
+ av_log(s->avctx, AV_LOG_ERROR, "Block separation greater than size\n");
+ return -1;
+ }
+ if (FFMAX(s->plane[0].xblen, s->plane[0].yblen) > MAX_BLOCKSIZE) {
+ av_log(s->avctx, AV_LOG_ERROR, "Unsupported large block size\n");
+ return -1;
+ }
+
+ /*[DIRAC_STD] 11.2.5 Motion vector precision. motion_vector_precision()
+ Read motion vector precision */
+ s->mv_precision = svq3_get_ue_golomb(gb);
+ if (s->mv_precision > 3) {
+ av_log(s->avctx, AV_LOG_ERROR, "MV precision finer than eighth-pel\n");
+ return -1;
+ }
+
+ /*[DIRAC_STD] 11.2.6 Global motion. global_motion()
+ Read the global motion compensation parameters */
+ s->globalmc_flag = get_bits1(gb);
+ if (s->globalmc_flag) {
+ memset(s->globalmc, 0, sizeof(s->globalmc));
+ /* [DIRAC_STD] pan_tilt(gparams) */
+ for (ref = 0; ref < s->num_refs; ref++) {
+ if (get_bits1(gb)) {
+ s->globalmc[ref].pan_tilt[0] = dirac_get_se_golomb(gb);
+ s->globalmc[ref].pan_tilt[1] = dirac_get_se_golomb(gb);
+ }
+ /* [DIRAC_STD] zoom_rotate_shear(gparams)
+ zoom/rotation/shear parameters */
+ if (get_bits1(gb)) {
+ s->globalmc[ref].zrs_exp = svq3_get_ue_golomb(gb);
+ s->globalmc[ref].zrs[0][0] = dirac_get_se_golomb(gb);
+ s->globalmc[ref].zrs[0][1] = dirac_get_se_golomb(gb);
+ s->globalmc[ref].zrs[1][0] = dirac_get_se_golomb(gb);
+ s->globalmc[ref].zrs[1][1] = dirac_get_se_golomb(gb);
+ } else {
+ s->globalmc[ref].zrs[0][0] = 1;
+ s->globalmc[ref].zrs[1][1] = 1;
+ }
+ /* [DIRAC_STD] perspective(gparams) */
+ if (get_bits1(gb)) {
+ s->globalmc[ref].perspective_exp = svq3_get_ue_golomb(gb);
+ s->globalmc[ref].perspective[0] = dirac_get_se_golomb(gb);
+ s->globalmc[ref].perspective[1] = dirac_get_se_golomb(gb);
+ }
+ }
+ }
+
+ /*[DIRAC_STD] 11.2.7 Picture prediction mode. prediction_mode()
+ Picture prediction mode, not currently used. */
+ if (svq3_get_ue_golomb(gb)) {
+ av_log(s->avctx, AV_LOG_ERROR, "Unknown picture prediction mode\n");
+ return -1;
+ }
+
+ /* [DIRAC_STD] 11.2.8 Reference picture weight. reference_picture_weights()
+ just data read, weight calculation will be done later on. */
+ s->weight_log2denom = 1;
+ s->weight[0] = 1;
+ s->weight[1] = 1;
+
+ if (get_bits1(gb)) {
+ s->weight_log2denom = svq3_get_ue_golomb(gb);
+ s->weight[0] = dirac_get_se_golomb(gb);
+ if (s->num_refs == 2)
+ s->weight[1] = dirac_get_se_golomb(gb);
+ }
+ return 0;
+}
+
+/**
+ * Dirac Specification ->
+ * 11.3 Wavelet transform data. wavelet_transform()
+ */
+static int dirac_unpack_idwt_params(DiracContext *s)
+{
+ GetBitContext *gb = &s->gb;
+ int i, level;
+ unsigned tmp;
+
+#define CHECKEDREAD(dst, cond, errmsg) \
+ tmp = svq3_get_ue_golomb(gb); \
+ if (cond) { \
+ av_log(s->avctx, AV_LOG_ERROR, errmsg); \
+ return -1; \
+ }\
+ dst = tmp;
+
+ align_get_bits(gb);
+
+ s->zero_res = s->num_refs ? get_bits1(gb) : 0;
+ if (s->zero_res)
+ return 0;
+
+ /*[DIRAC_STD] 11.3.1 Transform parameters. transform_parameters() */
+ CHECKEDREAD(s->wavelet_idx, tmp > 6, "wavelet_idx is too big\n")
+
+ CHECKEDREAD(s->wavelet_depth, tmp > MAX_DWT_LEVELS || tmp < 1, "invalid number of DWT decompositions\n")
+
+ if (!s->low_delay) {
+ /* Codeblock parameters (core syntax only) */
+ if (get_bits1(gb)) {
+ for (i = 0; i <= s->wavelet_depth; i++) {
+ CHECKEDREAD(s->codeblock[i].width , tmp < 1, "codeblock width invalid\n")
+ CHECKEDREAD(s->codeblock[i].height, tmp < 1, "codeblock height invalid\n")
+ }
+
+ CHECKEDREAD(s->codeblock_mode, tmp > 1, "unknown codeblock mode\n")
+ } else
+ for (i = 0; i <= s->wavelet_depth; i++)
+ s->codeblock[i].width = s->codeblock[i].height = 1;
+ } else {
+ /* Slice parameters + quantization matrix*/
+ /*[DIRAC_STD] 11.3.4 Slice coding Parameters (low delay syntax only). slice_parameters() */
+ s->lowdelay.num_x = svq3_get_ue_golomb(gb);
+ s->lowdelay.num_y = svq3_get_ue_golomb(gb);
+ s->lowdelay.bytes.num = svq3_get_ue_golomb(gb);
+ s->lowdelay.bytes.den = svq3_get_ue_golomb(gb);
+
+ if (s->lowdelay.bytes.den <= 0) {
+ av_log(s->avctx,AV_LOG_ERROR,"Invalid lowdelay.bytes.den\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ /* [DIRAC_STD] 11.3.5 Quantisation matrices (low-delay syntax). quant_matrix() */
+ if (get_bits1(gb)) {
+ av_log(s->avctx,AV_LOG_DEBUG,"Low Delay: Has Custom Quantization Matrix!\n");
+ /* custom quantization matrix */
+ s->lowdelay.quant[0][0] = svq3_get_ue_golomb(gb);
+ for (level = 0; level < s->wavelet_depth; level++) {
+ s->lowdelay.quant[level][1] = svq3_get_ue_golomb(gb);
+ s->lowdelay.quant[level][2] = svq3_get_ue_golomb(gb);
+ s->lowdelay.quant[level][3] = svq3_get_ue_golomb(gb);
+ }
+ } else {
+ if (s->wavelet_depth > 4) {
+ av_log(s->avctx,AV_LOG_ERROR,"Mandatory custom low delay matrix missing for depth %d\n", s->wavelet_depth);
+ return AVERROR_INVALIDDATA;
+ }
+ /* default quantization matrix */
+ for (level = 0; level < s->wavelet_depth; level++)
+ for (i = 0; i < 4; i++) {
+ s->lowdelay.quant[level][i] = default_qmat[s->wavelet_idx][level][i];
+ /* haar with no shift differs for different depths */
+ if (s->wavelet_idx == 3)
+ s->lowdelay.quant[level][i] += 4*(s->wavelet_depth-1 - level);
+ }
+ }
+ }
+ return 0;
+}
+
+static inline int pred_sbsplit(uint8_t *sbsplit, int stride, int x, int y)
+{
+ static const uint8_t avgsplit[7] = { 0, 0, 1, 1, 1, 2, 2 };
+
+ if (!(x|y))
+ return 0;
+ else if (!y)
+ return sbsplit[-1];
+ else if (!x)
+ return sbsplit[-stride];
+
+ return avgsplit[sbsplit[-1] + sbsplit[-stride] + sbsplit[-stride-1]];
+}
+
+static inline int pred_block_mode(DiracBlock *block, int stride, int x, int y, int refmask)
+{
+ int pred;
+
+ if (!(x|y))
+ return 0;
+ else if (!y)
+ return block[-1].ref & refmask;
+ else if (!x)
+ return block[-stride].ref & refmask;
+
+ /* return the majority */
+ pred = (block[-1].ref & refmask) + (block[-stride].ref & refmask) + (block[-stride-1].ref & refmask);
+ return (pred >> 1) & refmask;
+}
+
+static inline void pred_block_dc(DiracBlock *block, int stride, int x, int y)
+{
+ int i, n = 0;
+
+ memset(block->u.dc, 0, sizeof(block->u.dc));
+
+ if (x && !(block[-1].ref & 3)) {
+ for (i = 0; i < 3; i++)
+ block->u.dc[i] += block[-1].u.dc[i];
+ n++;
+ }
+
+ if (y && !(block[-stride].ref & 3)) {
+ for (i = 0; i < 3; i++)
+ block->u.dc[i] += block[-stride].u.dc[i];
+ n++;
+ }
+
+ if (x && y && !(block[-1-stride].ref & 3)) {
+ for (i = 0; i < 3; i++)
+ block->u.dc[i] += block[-1-stride].u.dc[i];
+ n++;
+ }
+
+ if (n == 2) {
+ for (i = 0; i < 3; i++)
+ block->u.dc[i] = (block->u.dc[i]+1)>>1;
+ } else if (n == 3) {
+ for (i = 0; i < 3; i++)
+ block->u.dc[i] = divide3(block->u.dc[i]);
+ }
+}
+
+static inline void pred_mv(DiracBlock *block, int stride, int x, int y, int ref)
+{
+ int16_t *pred[3];
+ int refmask = ref+1;
+ int mask = refmask | DIRAC_REF_MASK_GLOBAL; /* exclude gmc blocks */
+ int n = 0;
+
+ if (x && (block[-1].ref & mask) == refmask)
+ pred[n++] = block[-1].u.mv[ref];
+
+ if (y && (block[-stride].ref & mask) == refmask)
+ pred[n++] = block[-stride].u.mv[ref];
+
+ if (x && y && (block[-stride-1].ref & mask) == refmask)
+ pred[n++] = block[-stride-1].u.mv[ref];
+
+ switch (n) {
+ case 0:
+ block->u.mv[ref][0] = 0;
+ block->u.mv[ref][1] = 0;
+ break;
+ case 1:
+ block->u.mv[ref][0] = pred[0][0];
+ block->u.mv[ref][1] = pred[0][1];
+ break;
+ case 2:
+ block->u.mv[ref][0] = (pred[0][0] + pred[1][0] + 1) >> 1;
+ block->u.mv[ref][1] = (pred[0][1] + pred[1][1] + 1) >> 1;
+ break;
+ case 3:
+ block->u.mv[ref][0] = mid_pred(pred[0][0], pred[1][0], pred[2][0]);
+ block->u.mv[ref][1] = mid_pred(pred[0][1], pred[1][1], pred[2][1]);
+ break;
+ }
+}
+
+static void global_mv(DiracContext *s, DiracBlock *block, int x, int y, int ref)
+{
+ int ez = s->globalmc[ref].zrs_exp;
+ int ep = s->globalmc[ref].perspective_exp;
+ int (*A)[2] = s->globalmc[ref].zrs;
+ int *b = s->globalmc[ref].pan_tilt;
+ int *c = s->globalmc[ref].perspective;
+
+ int m = (1<<ep) - (c[0]*x + c[1]*y);
+ int mx = m * ((A[0][0] * x + A[0][1]*y) + (1<<ez) * b[0]);
+ int my = m * ((A[1][0] * x + A[1][1]*y) + (1<<ez) * b[1]);
+
+ block->u.mv[ref][0] = (mx + (1<<(ez+ep))) >> (ez+ep);
+ block->u.mv[ref][1] = (my + (1<<(ez+ep))) >> (ez+ep);
+}
+
+static void decode_block_params(DiracContext *s, DiracArith arith[8], DiracBlock *block,
+ int stride, int x, int y)
+{
+ int i;
+
+ block->ref = pred_block_mode(block, stride, x, y, DIRAC_REF_MASK_REF1);
+ block->ref ^= dirac_get_arith_bit(arith, CTX_PMODE_REF1);
+
+ if (s->num_refs == 2) {
+ block->ref |= pred_block_mode(block, stride, x, y, DIRAC_REF_MASK_REF2);
+ block->ref ^= dirac_get_arith_bit(arith, CTX_PMODE_REF2) << 1;
+ }
+
+ if (!block->ref) {
+ pred_block_dc(block, stride, x, y);
+ for (i = 0; i < 3; i++)
+ block->u.dc[i] += dirac_get_arith_int(arith+1+i, CTX_DC_F1, CTX_DC_DATA);
+ return;
+ }
+
+ if (s->globalmc_flag) {
+ block->ref |= pred_block_mode(block, stride, x, y, DIRAC_REF_MASK_GLOBAL);
+ block->ref ^= dirac_get_arith_bit(arith, CTX_GLOBAL_BLOCK) << 2;
+ }
+
+ for (i = 0; i < s->num_refs; i++)
+ if (block->ref & (i+1)) {
+ if (block->ref & DIRAC_REF_MASK_GLOBAL) {
+ global_mv(s, block, x, y, i);
+ } else {
+ pred_mv(block, stride, x, y, i);
+ block->u.mv[i][0] += dirac_get_arith_int(arith + 4 + 2 * i, CTX_MV_F1, CTX_MV_DATA);
+ block->u.mv[i][1] += dirac_get_arith_int(arith + 5 + 2 * i, CTX_MV_F1, CTX_MV_DATA);
+ }
+ }
+}
+
+/**
+ * Copies the current block to the other blocks covered by the current superblock split mode
+ */
+static void propagate_block_data(DiracBlock *block, int stride, int size)
+{
+ int x, y;
+ DiracBlock *dst = block;
+
+ for (x = 1; x < size; x++)
+ dst[x] = *block;
+
+ for (y = 1; y < size; y++) {
+ dst += stride;
+ for (x = 0; x < size; x++)
+ dst[x] = *block;
+ }
+}
+
+/**
+ * Dirac Specification ->
+ * 12. Block motion data syntax
+ */
+static int dirac_unpack_block_motion_data(DiracContext *s)
+{
+ GetBitContext *gb = &s->gb;
+ uint8_t *sbsplit = s->sbsplit;
+ int i, x, y, q, p;
+ DiracArith arith[8];
+
+ align_get_bits(gb);
+
+ /* [DIRAC_STD] 11.2.4 and 12.2.1 Number of blocks and superblocks */
+ s->sbwidth = DIVRNDUP(s->source.width, 4*s->plane[0].xbsep);
+ s->sbheight = DIVRNDUP(s->source.height, 4*s->plane[0].ybsep);
+ s->blwidth = 4 * s->sbwidth;
+ s->blheight = 4 * s->sbheight;
+
+ /* [DIRAC_STD] 12.3.1 Superblock splitting modes. superblock_split_modes()
+ decode superblock split modes */
+ ff_dirac_init_arith_decoder(arith, gb, svq3_get_ue_golomb(gb)); /* svq3_get_ue_golomb(gb) is the length */
+ for (y = 0; y < s->sbheight; y++) {
+ for (x = 0; x < s->sbwidth; x++) {
+ unsigned int split = dirac_get_arith_uint(arith, CTX_SB_F1, CTX_SB_DATA);
+ if (split > 2)
+ return -1;
+ sbsplit[x] = (split + pred_sbsplit(sbsplit+x, s->sbwidth, x, y)) % 3;
+ }
+ sbsplit += s->sbwidth;
+ }
+
+ /* setup arith decoding */
+ ff_dirac_init_arith_decoder(arith, gb, svq3_get_ue_golomb(gb));
+ for (i = 0; i < s->num_refs; i++) {
+ ff_dirac_init_arith_decoder(arith + 4 + 2 * i, gb, svq3_get_ue_golomb(gb));
+ ff_dirac_init_arith_decoder(arith + 5 + 2 * i, gb, svq3_get_ue_golomb(gb));
+ }
+ for (i = 0; i < 3; i++)
+ ff_dirac_init_arith_decoder(arith+1+i, gb, svq3_get_ue_golomb(gb));
+
+ for (y = 0; y < s->sbheight; y++)
+ for (x = 0; x < s->sbwidth; x++) {
+ int blkcnt = 1 << s->sbsplit[y * s->sbwidth + x];
+ int step = 4 >> s->sbsplit[y * s->sbwidth + x];
+
+ for (q = 0; q < blkcnt; q++)
+ for (p = 0; p < blkcnt; p++) {
+ int bx = 4 * x + p*step;
+ int by = 4 * y + q*step;
+ DiracBlock *block = &s->blmotion[by*s->blwidth + bx];
+ decode_block_params(s, arith, block, s->blwidth, bx, by);
+ propagate_block_data(block, s->blwidth, step);
+ }
+ }
+
+ return 0;
+}
+
+static int weight(int i, int blen, int offset)
+{
+#define ROLLOFF(i) offset == 1 ? ((i) ? 5 : 3) : \
+ (1 + (6*(i) + offset - 1) / (2*offset - 1))
+
+ if (i < 2*offset)
+ return ROLLOFF(i);
+ else if (i > blen-1 - 2*offset)
+ return ROLLOFF(blen-1 - i);
+ return 8;
+}
+
+static void init_obmc_weight_row(Plane *p, uint8_t *obmc_weight, int stride,
+ int left, int right, int wy)
+{
+ int x;
+ for (x = 0; left && x < p->xblen >> 1; x++)
+ obmc_weight[x] = wy*8;
+ for (; x < p->xblen >> right; x++)
+ obmc_weight[x] = wy*weight(x, p->xblen, p->xoffset);
+ for (; x < p->xblen; x++)
+ obmc_weight[x] = wy*8;
+ for (; x < stride; x++)
+ obmc_weight[x] = 0;
+}
+
+static void init_obmc_weight(Plane *p, uint8_t *obmc_weight, int stride,
+ int left, int right, int top, int bottom)
+{
+ int y;
+ for (y = 0; top && y < p->yblen >> 1; y++) {
+ init_obmc_weight_row(p, obmc_weight, stride, left, right, 8);
+ obmc_weight += stride;
+ }
+ for (; y < p->yblen >> bottom; y++) {
+ int wy = weight(y, p->yblen, p->yoffset);
+ init_obmc_weight_row(p, obmc_weight, stride, left, right, wy);
+ obmc_weight += stride;
+ }
+ for (; y < p->yblen; y++) {
+ init_obmc_weight_row(p, obmc_weight, stride, left, right, 8);
+ obmc_weight += stride;
+ }
+}
+
+static void init_obmc_weights(DiracContext *s, Plane *p, int by)
+{
+ int top = !by;
+ int bottom = by == s->blheight-1;
+
+ /* don't bother re-initing for rows 2 to blheight-2, the weights don't change */
+ if (top || bottom || by == 1) {
+ init_obmc_weight(p, s->obmc_weight[0], MAX_BLOCKSIZE, 1, 0, top, bottom);
+ init_obmc_weight(p, s->obmc_weight[1], MAX_BLOCKSIZE, 0, 0, top, bottom);
+ init_obmc_weight(p, s->obmc_weight[2], MAX_BLOCKSIZE, 0, 1, top, bottom);
+ }
+}
+
+static const uint8_t epel_weights[4][4][4] = {
+ {{ 16, 0, 0, 0 },
+ { 12, 4, 0, 0 },
+ { 8, 8, 0, 0 },
+ { 4, 12, 0, 0 }},
+ {{ 12, 0, 4, 0 },
+ { 9, 3, 3, 1 },
+ { 6, 6, 2, 2 },
+ { 3, 9, 1, 3 }},
+ {{ 8, 0, 8, 0 },
+ { 6, 2, 6, 2 },
+ { 4, 4, 4, 4 },
+ { 2, 6, 2, 6 }},
+ {{ 4, 0, 12, 0 },
+ { 3, 1, 9, 3 },
+ { 2, 2, 6, 6 },
+ { 1, 3, 3, 9 }}
+};
+
+/**
+ * For block x,y, determine which of the hpel planes to do bilinear
+ * interpolation from and set src[] to the location in each hpel plane
+ * to MC from.
+ *
+ * @return the index of the put_dirac_pixels_tab function to use
+ * 0 for 1 plane (fpel,hpel), 1 for 2 planes (qpel), 2 for 4 planes (qpel), and 3 for epel
+ */
+static int mc_subpel(DiracContext *s, DiracBlock *block, const uint8_t *src[5],
+ int x, int y, int ref, int plane)
+{
+ Plane *p = &s->plane[plane];
+ uint8_t **ref_hpel = s->ref_pics[ref]->hpel[plane];
+ int motion_x = block->u.mv[ref][0];
+ int motion_y = block->u.mv[ref][1];
+ int mx, my, i, epel, nplanes = 0;
+
+ if (plane) {
+ motion_x >>= s->chroma_x_shift;
+ motion_y >>= s->chroma_y_shift;
+ }
+
+ mx = motion_x & ~(-1 << s->mv_precision);
+ my = motion_y & ~(-1 << s->mv_precision);
+ motion_x >>= s->mv_precision;
+ motion_y >>= s->mv_precision;
+ /* normalize subpel coordinates to epel */
+ /* TODO: template this function? */
+ mx <<= 3 - s->mv_precision;
+ my <<= 3 - s->mv_precision;
+
+ x += motion_x;
+ y += motion_y;
+ epel = (mx|my)&1;
+
+ /* hpel position */
+ if (!((mx|my)&3)) {
+ nplanes = 1;
+ src[0] = ref_hpel[(my>>1)+(mx>>2)] + y*p->stride + x;
+ } else {
+ /* qpel or epel */
+ nplanes = 4;
+ for (i = 0; i < 4; i++)
+ src[i] = ref_hpel[i] + y*p->stride + x;
+
+ /* if we're interpolating in the right/bottom halves, adjust the planes as needed
+ we increment x/y because the edge changes for half of the pixels */
+ if (mx > 4) {
+ src[0] += 1;
+ src[2] += 1;
+ x++;
+ }
+ if (my > 4) {
+ src[0] += p->stride;
+ src[1] += p->stride;
+ y++;
+ }
+
+ /* hpel planes are:
+ [0]: F [1]: H
+ [2]: V [3]: C */
+ if (!epel) {
+ /* check if we really only need 2 planes since either mx or my is
+ a hpel position. (epel weights of 0 handle this there) */
+ if (!(mx&3)) {
+ /* mx == 0: average [0] and [2]
+ mx == 4: average [1] and [3] */
+ src[!mx] = src[2 + !!mx];
+ nplanes = 2;
+ } else if (!(my&3)) {
+ src[0] = src[(my>>1) ];
+ src[1] = src[(my>>1)+1];
+ nplanes = 2;
+ }
+ } else {
+ /* adjust the ordering if needed so the weights work */
+ if (mx > 4) {
+ FFSWAP(const uint8_t *, src[0], src[1]);
+ FFSWAP(const uint8_t *, src[2], src[3]);
+ }
+ if (my > 4) {
+ FFSWAP(const uint8_t *, src[0], src[2]);
+ FFSWAP(const uint8_t *, src[1], src[3]);
+ }
+ src[4] = epel_weights[my&3][mx&3];
+ }
+ }
+
+ /* fixme: v/h _edge_pos */
+ if (x + p->xblen > p->width +EDGE_WIDTH/2 ||
+ y + p->yblen > p->height+EDGE_WIDTH/2 ||
+ x < 0 || y < 0) {
+ for (i = 0; i < nplanes; i++) {
+ ff_emulated_edge_mc(s->edge_emu_buffer[i], src[i], p->stride,
+ p->xblen, p->yblen, x, y,
+ p->width+EDGE_WIDTH/2, p->height+EDGE_WIDTH/2);
+ src[i] = s->edge_emu_buffer[i];
+ }
+ }
+ return (nplanes>>1) + epel;
+}
+
+static void add_dc(uint16_t *dst, int dc, int stride,
+ uint8_t *obmc_weight, int xblen, int yblen)
+{
+ int x, y;
+ dc += 128;
+
+ for (y = 0; y < yblen; y++) {
+ for (x = 0; x < xblen; x += 2) {
+ dst[x ] += dc * obmc_weight[x ];
+ dst[x+1] += dc * obmc_weight[x+1];
+ }
+ dst += stride;
+ obmc_weight += MAX_BLOCKSIZE;
+ }
+}
+
+static void block_mc(DiracContext *s, DiracBlock *block,
+ uint16_t *mctmp, uint8_t *obmc_weight,
+ int plane, int dstx, int dsty)
+{
+ Plane *p = &s->plane[plane];
+ const uint8_t *src[5];
+ int idx;
+
+ switch (block->ref&3) {
+ case 0: /* DC */
+ add_dc(mctmp, block->u.dc[plane], p->stride, obmc_weight, p->xblen, p->yblen);
+ return;
+ case 1:
+ case 2:
+ idx = mc_subpel(s, block, src, dstx, dsty, (block->ref&3)-1, plane);
+ s->put_pixels_tab[idx](s->mcscratch, src, p->stride, p->yblen);
+ if (s->weight_func)
+ s->weight_func(s->mcscratch, p->stride, s->weight_log2denom,
+ s->weight[0] + s->weight[1], p->yblen);
+ break;
+ case 3:
+ idx = mc_subpel(s, block, src, dstx, dsty, 0, plane);
+ s->put_pixels_tab[idx](s->mcscratch, src, p->stride, p->yblen);
+ idx = mc_subpel(s, block, src, dstx, dsty, 1, plane);
+ if (s->biweight_func) {
+ /* fixme: +32 is a quick hack */
+ s->put_pixels_tab[idx](s->mcscratch + 32, src, p->stride, p->yblen);
+ s->biweight_func(s->mcscratch, s->mcscratch+32, p->stride, s->weight_log2denom,
+ s->weight[0], s->weight[1], p->yblen);
+ } else
+ s->avg_pixels_tab[idx](s->mcscratch, src, p->stride, p->yblen);
+ break;
+ }
+ s->add_obmc(mctmp, s->mcscratch, p->stride, obmc_weight, p->yblen);
+}
+
+static void mc_row(DiracContext *s, DiracBlock *block, uint16_t *mctmp, int plane, int dsty)
+{
+ Plane *p = &s->plane[plane];
+ int x, dstx = p->xbsep - p->xoffset;
+
+ block_mc(s, block, mctmp, s->obmc_weight[0], plane, -p->xoffset, dsty);
+ mctmp += p->xbsep;
+
+ for (x = 1; x < s->blwidth-1; x++) {
+ block_mc(s, block+x, mctmp, s->obmc_weight[1], plane, dstx, dsty);
+ dstx += p->xbsep;
+ mctmp += p->xbsep;
+ }
+ block_mc(s, block+x, mctmp, s->obmc_weight[2], plane, dstx, dsty);
+}
+
+static void select_dsp_funcs(DiracContext *s, int width, int height, int xblen, int yblen)
+{
+ int idx = 0;
+ if (xblen > 8)
+ idx = 1;
+ if (xblen > 16)
+ idx = 2;
+
+ memcpy(s->put_pixels_tab, s->diracdsp.put_dirac_pixels_tab[idx], sizeof(s->put_pixels_tab));
+ memcpy(s->avg_pixels_tab, s->diracdsp.avg_dirac_pixels_tab[idx], sizeof(s->avg_pixels_tab));
+ s->add_obmc = s->diracdsp.add_dirac_obmc[idx];
+ if (s->weight_log2denom > 1 || s->weight[0] != 1 || s->weight[1] != 1) {
+ s->weight_func = s->diracdsp.weight_dirac_pixels_tab[idx];
+ s->biweight_func = s->diracdsp.biweight_dirac_pixels_tab[idx];
+ } else {
+ s->weight_func = NULL;
+ s->biweight_func = NULL;
+ }
+}
+
+static void interpolate_refplane(DiracContext *s, DiracFrame *ref, int plane, int width, int height)
+{
+ /* chroma allocates an edge of 8 when subsampled
+ which for 4:2:2 means an h edge of 16 and v edge of 8
+ just use 8 for everything for the moment */
+ int i, edge = EDGE_WIDTH/2;
+
+ ref->hpel[plane][0] = ref->avframe.data[plane];
+ s->dsp.draw_edges(ref->hpel[plane][0], ref->avframe.linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM); /* EDGE_TOP | EDGE_BOTTOM values just copied to make it build, this needs to be ensured */
+
+ /* no need for hpel if we only have fpel vectors */
+ if (!s->mv_precision)
+ return;
+
+ for (i = 1; i < 4; i++) {
+ if (!ref->hpel_base[plane][i])
+ ref->hpel_base[plane][i] = av_malloc((height+2*edge) * ref->avframe.linesize[plane] + 32);
+ /* we need to be 16-byte aligned even for chroma */
+ ref->hpel[plane][i] = ref->hpel_base[plane][i] + edge*ref->avframe.linesize[plane] + 16;
+ }
+
+ if (!ref->interpolated[plane]) {
+ s->diracdsp.dirac_hpel_filter(ref->hpel[plane][1], ref->hpel[plane][2],
+ ref->hpel[plane][3], ref->hpel[plane][0],
+ ref->avframe.linesize[plane], width, height);
+ s->dsp.draw_edges(ref->hpel[plane][1], ref->avframe.linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM);
+ s->dsp.draw_edges(ref->hpel[plane][2], ref->avframe.linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM);
+ s->dsp.draw_edges(ref->hpel[plane][3], ref->avframe.linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM);
+ }
+ ref->interpolated[plane] = 1;
+}
+
+/**
+ * Dirac Specification ->
+ * 13.0 Transform data syntax. transform_data()
+ */
+static int dirac_decode_frame_internal(DiracContext *s)
+{
+ DWTContext d;
+ int y, i, comp, dsty;
+
+ if (s->low_delay) {
+ /* [DIRAC_STD] 13.5.1 low_delay_transform_data() */
+ for (comp = 0; comp < 3; comp++) {
+ Plane *p = &s->plane[comp];
+ memset(p->idwt_buf, 0, p->idwt_stride * p->idwt_height * sizeof(IDWTELEM));
+ }
+ if (!s->zero_res)
+ decode_lowdelay(s);
+ }
+
+ for (comp = 0; comp < 3; comp++) {
+ Plane *p = &s->plane[comp];
+ uint8_t *frame = s->current_picture->avframe.data[comp];
+
+ /* FIXME: small resolutions */
+ for (i = 0; i < 4; i++)
+ s->edge_emu_buffer[i] = s->edge_emu_buffer_base + i*FFALIGN(p->width, 16);
+
+ if (!s->zero_res && !s->low_delay)
+ {
+ memset(p->idwt_buf, 0, p->idwt_stride * p->idwt_height * sizeof(IDWTELEM));
+ decode_component(s, comp); /* [DIRAC_STD] 13.4.1 core_transform_data() */
+ }
+ if (ff_spatial_idwt_init2(&d, p->idwt_buf, p->idwt_width, p->idwt_height, p->idwt_stride,
+ s->wavelet_idx+2, s->wavelet_depth, p->idwt_tmp))
+ return -1;
+
+ if (!s->num_refs) { /* intra */
+ for (y = 0; y < p->height; y += 16) {
+ ff_spatial_idwt_slice2(&d, y+16); /* decode */
+ s->diracdsp.put_signed_rect_clamped(frame + y*p->stride, p->stride,
+ p->idwt_buf + y*p->idwt_stride, p->idwt_stride, p->width, 16);
+ }
+ } else { /* inter */
+ int rowheight = p->ybsep*p->stride;
+
+ select_dsp_funcs(s, p->width, p->height, p->xblen, p->yblen);
+
+ for (i = 0; i < s->num_refs; i++)
+ interpolate_refplane(s, s->ref_pics[i], comp, p->width, p->height);
+
+ memset(s->mctmp, 0, 4*p->yoffset*p->stride);
+
+ dsty = -p->yoffset;
+ for (y = 0; y < s->blheight; y++) {
+ int h = 0,
+ start = FFMAX(dsty, 0);
+ uint16_t *mctmp = s->mctmp + y*rowheight;
+ DiracBlock *blocks = s->blmotion + y*s->blwidth;
+
+ init_obmc_weights(s, p, y);
+
+ if (y == s->blheight-1 || start+p->ybsep > p->height)
+ h = p->height - start;
+ else
+ h = p->ybsep - (start - dsty);
+ if (h < 0)
+ break;
+
+ memset(mctmp+2*p->yoffset*p->stride, 0, 2*rowheight);
+ mc_row(s, blocks, mctmp, comp, dsty);
+
+ mctmp += (start - dsty)*p->stride + p->xoffset;
+ ff_spatial_idwt_slice2(&d, start + h); /* decode */
+ s->diracdsp.add_rect_clamped(frame + start*p->stride, mctmp, p->stride,
+ p->idwt_buf + start*p->idwt_stride, p->idwt_stride, p->width, h);
+
+ dsty += p->ybsep;
+ }
+ }
+ }
+
+
+ return 0;
+}
+
+/**
+ * Dirac Specification ->
+ * 11.1.1 Picture Header. picture_header()
+ */
+static int dirac_decode_picture_header(DiracContext *s)
+{
+ int retire, picnum;
+ int i, j, refnum, refdist;
+ GetBitContext *gb = &s->gb;
+
+ /* [DIRAC_STD] 11.1.1 Picture Header. picture_header() PICTURE_NUM */
+ picnum = s->current_picture->avframe.display_picture_number = get_bits_long(gb, 32);
+
+
+ av_log(s->avctx,AV_LOG_DEBUG,"PICTURE_NUM: %d\n",picnum);
+
+ /* if this is the first keyframe after a sequence header, start our
+ reordering from here */
+ if (s->frame_number < 0)
+ s->frame_number = picnum;
+
+ s->ref_pics[0] = s->ref_pics[1] = NULL;
+ for (i = 0; i < s->num_refs; i++) {
+ refnum = picnum + dirac_get_se_golomb(gb);
+ refdist = INT_MAX;
+
+ /* find the closest reference to the one we want */
+ /* Jordi: this is needed if the referenced picture hasn't yet arrived */
+ for (j = 0; j < MAX_REFERENCE_FRAMES && refdist; j++)
+ if (s->ref_frames[j]
+ && FFABS(s->ref_frames[j]->avframe.display_picture_number - refnum) < refdist) {
+ s->ref_pics[i] = s->ref_frames[j];
+ refdist = FFABS(s->ref_frames[j]->avframe.display_picture_number - refnum);
+ }
+
+ if (!s->ref_pics[i] || refdist)
+ av_log(s->avctx, AV_LOG_DEBUG, "Reference not found\n");
+
+ /* if there were no references at all, allocate one */
+ if (!s->ref_pics[i])
+ for (j = 0; j < MAX_FRAMES; j++)
+ if (!s->all_frames[j].avframe.data[0]) {
+ s->ref_pics[i] = &s->all_frames[j];
- *(AVFrame *)picture = out->avframe;
++ ff_get_buffer(s->avctx, &s->ref_pics[i]->avframe, AV_GET_BUFFER_FLAG_REF);
+ break;
+ }
+ }
+
+ /* retire the reference frames that are not used anymore */
+ if (s->current_picture->avframe.reference) {
+ retire = picnum + dirac_get_se_golomb(gb);
+ if (retire != picnum) {
+ DiracFrame *retire_pic = remove_frame(s->ref_frames, retire);
+
+ if (retire_pic)
+ retire_pic->avframe.reference &= DELAYED_PIC_REF;
+ else
+ av_log(s->avctx, AV_LOG_DEBUG, "Frame to retire not found\n");
+ }
+
+ /* if reference array is full, remove the oldest as per the spec */
+ while (add_frame(s->ref_frames, MAX_REFERENCE_FRAMES, s->current_picture)) {
+ av_log(s->avctx, AV_LOG_ERROR, "Reference frame overflow\n");
+ remove_frame(s->ref_frames, s->ref_frames[0]->avframe.display_picture_number)->avframe.reference &= DELAYED_PIC_REF;
+ }
+ }
+
+ if (s->num_refs) {
+ if (dirac_unpack_prediction_parameters(s)) /* [DIRAC_STD] 11.2 Picture Prediction Data. picture_prediction() */
+ return -1;
+ if (dirac_unpack_block_motion_data(s)) /* [DIRAC_STD] 12. Block motion data syntax */
+ return -1;
+ }
+ if (dirac_unpack_idwt_params(s)) /* [DIRAC_STD] 11.3 Wavelet transform data */
+ return -1;
+
+ init_planes(s);
+ return 0;
+}
+
+static int get_delayed_pic(DiracContext *s, AVFrame *picture, int *got_frame)
+{
+ DiracFrame *out = s->delay_frames[0];
+ int i, out_idx = 0;
++ int ret;
+
+ /* find frame with lowest picture number */
+ for (i = 1; s->delay_frames[i]; i++)
+ if (s->delay_frames[i]->avframe.display_picture_number < out->avframe.display_picture_number) {
+ out = s->delay_frames[i];
+ out_idx = i;
+ }
+
+ for (i = out_idx; s->delay_frames[i]; i++)
+ s->delay_frames[i] = s->delay_frames[i+1];
+
+ if (out) {
+ out->avframe.reference ^= DELAYED_PIC_REF;
+ *got_frame = 1;
- if (ff_get_buffer(avctx, &pic->avframe) < 0) {
++ if((ret = av_frame_ref(picture, &out->avframe)) < 0)
++ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * Dirac Specification ->
+ * 9.6 Parse Info Header Syntax. parse_info()
+ * 4 byte start code + byte parse code + 4 byte size + 4 byte previous size
+ */
+#define DATA_UNIT_HEADER_SIZE 13
+
+/* [DIRAC_STD] dirac_decode_data_unit makes reference to the while defined in 9.3
+ inside the function parse_sequence() */
+static int dirac_decode_data_unit(AVCodecContext *avctx, const uint8_t *buf, int size)
+{
+ DiracContext *s = avctx->priv_data;
+ DiracFrame *pic = NULL;
+ int i, parse_code = buf[4];
+ unsigned tmp;
+
+ if (size < DATA_UNIT_HEADER_SIZE)
+ return -1;
+
+ init_get_bits(&s->gb, &buf[13], 8*(size - DATA_UNIT_HEADER_SIZE));
+
+ if (parse_code == pc_seq_header) {
+ if (s->seen_sequence_header)
+ return 0;
+
+ /* [DIRAC_STD] 10. Sequence header */
+ if (avpriv_dirac_parse_sequence_header(avctx, &s->gb, &s->source))
+ return -1;
+
+ avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift);
+
+ if (alloc_sequence_buffers(s))
+ return -1;
+
+ s->seen_sequence_header = 1;
+ } else if (parse_code == pc_eos) { /* [DIRAC_STD] End of Sequence */
+ free_sequence_buffers(s);
+ s->seen_sequence_header = 0;
+ } else if (parse_code == pc_aux_data) {
+ if (buf[13] == 1) { /* encoder implementation/version */
+ int ver[3];
+ /* versions older than 1.0.8 don't store quant delta for
+ subbands with only one codeblock */
+ if (sscanf(buf+14, "Schroedinger %d.%d.%d", ver, ver+1, ver+2) == 3)
+ if (ver[0] == 1 && ver[1] == 0 && ver[2] <= 7)
+ s->old_delta_quant = 1;
+ }
+ } else if (parse_code & 0x8) { /* picture data unit */
+ if (!s->seen_sequence_header) {
+ av_log(avctx, AV_LOG_DEBUG, "Dropping frame without sequence header\n");
+ return -1;
+ }
+
+ /* find an unused frame */
+ for (i = 0; i < MAX_FRAMES; i++)
+ if (s->all_frames[i].avframe.data[0] == NULL)
+ pic = &s->all_frames[i];
+ if (!pic) {
+ av_log(avctx, AV_LOG_ERROR, "framelist full\n");
+ return -1;
+ }
+
+ avcodec_get_frame_defaults(&pic->avframe);
+
+ /* [DIRAC_STD] Defined in 9.6.1 ... */
+ tmp = parse_code & 0x03; /* [DIRAC_STD] num_refs() */
+ if (tmp > 2) {
+ av_log(avctx, AV_LOG_ERROR, "num_refs of 3\n");
+ return -1;
+ }
+ s->num_refs = tmp;
+ s->is_arith = (parse_code & 0x48) == 0x08; /* [DIRAC_STD] using_ac() */
+ s->low_delay = (parse_code & 0x88) == 0x88; /* [DIRAC_STD] is_low_delay() */
+ pic->avframe.reference = (parse_code & 0x0C) == 0x0C; /* [DIRAC_STD] is_reference() */
+ pic->avframe.key_frame = s->num_refs == 0; /* [DIRAC_STD] is_intra() */
+ pic->avframe.pict_type = s->num_refs + 1; /* Definition of AVPictureType in avutil.h */
+
- avctx->release_buffer(avctx, &s->all_frames[i].avframe);
++ if (ff_get_buffer(avctx, &pic->avframe, (parse_code & 0x0C) == 0x0C ? AV_GET_BUFFER_FLAG_REF : 0) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return -1;
+ }
+ s->current_picture = pic;
+ s->plane[0].stride = pic->avframe.linesize[0];
+ s->plane[1].stride = pic->avframe.linesize[1];
+ s->plane[2].stride = pic->avframe.linesize[2];
+
+ /* [DIRAC_STD] 11.1 Picture parse. picture_parse() */
+ if (dirac_decode_picture_header(s))
+ return -1;
+
+ /* [DIRAC_STD] 13.0 Transform data syntax. transform_data() */
+ if (dirac_decode_frame_internal(s))
+ return -1;
+ }
+ return 0;
+}
+
+static int dirac_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *pkt)
+{
+ DiracContext *s = avctx->priv_data;
+ DiracFrame *picture = data;
+ uint8_t *buf = pkt->data;
+ int buf_size = pkt->size;
+ int i, data_unit_size, buf_idx = 0;
++ int ret;
+
+ /* release unused frames */
+ for (i = 0; i < MAX_FRAMES; i++)
+ if (s->all_frames[i].avframe.data[0] && !s->all_frames[i].avframe.reference) {
- *(AVFrame*)data = delayed_frame->avframe;
++ av_frame_unref(&s->all_frames[i].avframe);
+ memset(s->all_frames[i].interpolated, 0, sizeof(s->all_frames[i].interpolated));
+ }
+
+ s->current_picture = NULL;
+ *got_frame = 0;
+
+ /* end of stream, so flush delayed pics */
+ if (buf_size == 0)
+ return get_delayed_pic(s, (AVFrame *)data, got_frame);
+
+ for (;;) {
+ /*[DIRAC_STD] Here starts the code from parse_info() defined in 9.6
+ [DIRAC_STD] PARSE_INFO_PREFIX = "BBCD" as defined in ISO/IEC 646
+ BBCD start code search */
+ for (; buf_idx + DATA_UNIT_HEADER_SIZE < buf_size; buf_idx++) {
+ if (buf[buf_idx ] == 'B' && buf[buf_idx+1] == 'B' &&
+ buf[buf_idx+2] == 'C' && buf[buf_idx+3] == 'D')
+ break;
+ }
+ /* BBCD found or end of data */
+ if (buf_idx + DATA_UNIT_HEADER_SIZE >= buf_size)
+ break;
+
+ data_unit_size = AV_RB32(buf+buf_idx+5);
+ if (buf_idx + data_unit_size > buf_size || !data_unit_size) {
+ if(buf_idx + data_unit_size > buf_size)
+ av_log(s->avctx, AV_LOG_ERROR,
+ "Data unit with size %d is larger than input buffer, discarding\n",
+ data_unit_size);
+ buf_idx += 4;
+ continue;
+ }
+ /* [DIRAC_STD] dirac_decode_data_unit makes reference to the while defined in 9.3 inside the function parse_sequence() */
+ if (dirac_decode_data_unit(avctx, buf+buf_idx, data_unit_size))
+ {
+ av_log(s->avctx, AV_LOG_ERROR,"Error in dirac_decode_data_unit\n");
+ return -1;
+ }
+ buf_idx += data_unit_size;
+ }
+
+ if (!s->current_picture)
+ return buf_size;
+
+ if (s->current_picture->avframe.display_picture_number > s->frame_number) {
+ DiracFrame *delayed_frame = remove_frame(s->delay_frames, s->frame_number);
+
+ s->current_picture->avframe.reference |= DELAYED_PIC_REF;
+
+ if (add_frame(s->delay_frames, MAX_DELAY, s->current_picture)) {
+ int min_num = s->delay_frames[0]->avframe.display_picture_number;
+ /* Too many delayed frames, so we display the frame with the lowest pts */
+ av_log(avctx, AV_LOG_ERROR, "Delay frame overflow\n");
+ delayed_frame = s->delay_frames[0];
+
+ for (i = 1; s->delay_frames[i]; i++)
+ if (s->delay_frames[i]->avframe.display_picture_number < min_num)
+ min_num = s->delay_frames[i]->avframe.display_picture_number;
+
+ delayed_frame = remove_frame(s->delay_frames, min_num);
+ add_frame(s->delay_frames, MAX_DELAY, s->current_picture);
+ }
+
+ if (delayed_frame) {
+ delayed_frame->avframe.reference ^= DELAYED_PIC_REF;
- *(AVFrame*)data = s->current_picture->avframe;
++ if((ret=av_frame_ref(data, &delayed_frame->avframe)) < 0)
++ return ret;
+ *got_frame = 1;
+ }
+ } else if (s->current_picture->avframe.display_picture_number == s->frame_number) {
+ /* The right frame at the right time :-) */
++ if((ret=av_frame_ref(data, &s->current_picture->avframe)) < 0)
++ return ret;
+ *got_frame = 1;
+ }
+
+ if (*got_frame)
+ s->frame_number = picture->avframe.display_picture_number + 1;
+
+ return buf_idx;
+}
+
+AVCodec ff_dirac_decoder = {
+ .name = "dirac",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_DIRAC,
+ .priv_data_size = sizeof(DiracContext),
+ .init = dirac_decode_init,
+ .close = dirac_decode_end,
+ .decode = dirac_decode_frame,
+ .capabilities = CODEC_CAP_DELAY,
+ .flush = dirac_decode_flush,
+ .long_name = NULL_IF_CONFIG_SMALL("BBC Dirac VC-2"),
+};
typedef struct DNXHDContext {
AVCodecContext *avctx;
- AVFrame picture;
GetBitContext gb;
- int cid; ///< compression id
+ int64_t cid; ///< compression id
unsigned int width, height;
unsigned int mb_width, mb_height;
uint32_t mb_scan_index[68]; /* max for 1080p */
DNXHDContext *ctx = avctx->priv_data;
ctx->avctx = avctx;
- avctx->coded_frame = &ctx->picture;
- avcodec_get_frame_defaults(&ctx->picture);
- ctx->picture.type = AV_PICTURE_TYPE_I;
- ctx->picture.key_frame = 1;
+ ctx->cid = -1;
return 0;
}