Add CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little
speed gain at this point but it should work.
+If there are inter-frame dependencies, so the codec calls
+ff_thread_report/await_progress(), set AVCodecInternal.allocate_progress. The
+frames must then be freed with ff_thread_release_buffer().
+Otherwise leave it at zero and decode directly into the user-supplied frames.
+
Call ff_thread_report_progress() after some part of the current picture has decoded.
A good place to put this is where draw_horiz_band() is called - add this if it isn't
called anywhere, as it's useful too and the implementation is trivial when you're
* 4XM codec.
*/
+#include "libavutil/frame.h"
#include "libavutil/intreadwrite.h"
#include "avcodec.h"
#include "bytestream.h"
typedef struct FourXContext {
AVCodecContext *avctx;
DSPContext dsp;
- AVFrame *current_picture, *last_picture;
+ AVFrame *last_picture;
GetBitContext pre_gb; ///< ac/dc prefix
GetBitContext gb;
GetByteContext g;
}
}
-static void init_mv(FourXContext *f)
+static void init_mv(FourXContext *f, int linesize)
{
int i;
for (i = 0; i < 256; i++) {
if (f->version > 1)
- f->mv[i] = mv[i][0] + mv[i][1] * f->current_picture->linesize[0] / 2;
+ f->mv[i] = mv[i][0] + mv[i][1] * linesize / 2;
else
- f->mv[i] = (i & 15) - 8 + ((i >> 4) - 8) * f->current_picture->linesize[0] / 2;
+ f->mv[i] = (i & 15) - 8 + ((i >> 4) - 8) * linesize / 2;
}
}
}
}
-static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length)
+static int decode_p_frame(FourXContext *f, AVFrame *frame,
+ const uint8_t *buf, int length)
{
int x, y;
const int width = f->avctx->width;
const int height = f->avctx->height;
uint16_t *src = (uint16_t *)f->last_picture->data[0];
- uint16_t *dst = (uint16_t *)f->current_picture->data[0];
- const int stride = f->current_picture->linesize[0] >> 1;
+ uint16_t *dst = (uint16_t *)frame->data[0];
+ const int stride = frame->linesize[0] >> 1;
unsigned int bitstream_size, bytestream_size, wordstream_size, extra,
bytestream_offset, wordstream_offset;
bytestream2_init(&f->g, buf + bytestream_offset,
length - bytestream_offset);
- init_mv(f);
+ init_mv(f, frame->linesize[0]);
for (y = 0; y < height; y += 8) {
for (x = 0; x < width; x += 8)
return 0;
}
-static inline void idct_put(FourXContext *f, int x, int y)
+static inline void idct_put(FourXContext *f, AVFrame *frame, int x, int y)
{
int16_t (*block)[64] = f->block;
- int stride = f->current_picture->linesize[0] >> 1;
+ int stride = frame->linesize[0] >> 1;
int i;
- uint16_t *dst = ((uint16_t*)f->current_picture->data[0]) + y * stride + x;
+ uint16_t *dst = ((uint16_t*)frame->data[0]) + y * stride + x;
for (i = 0; i < 4; i++) {
block[i][0] += 0x80 * 8 * 8;
return red / 3 * 1024 + green / 3 * 32 + blue / 3;
}
-static int decode_i2_frame(FourXContext *f, const uint8_t *buf, int length)
+static int decode_i2_frame(FourXContext *f, AVFrame *frame, const uint8_t *buf, int length)
{
int x, y, x2, y2;
const int width = f->avctx->width;
const int height = f->avctx->height;
const int mbs = (FFALIGN(width, 16) >> 4) * (FFALIGN(height, 16) >> 4);
- uint16_t *dst = (uint16_t*)f->current_picture->data[0];
- const int stride = f->current_picture->linesize[0]>>1;
+ uint16_t *dst = (uint16_t*)frame->data[0];
+ const int stride = frame->linesize[0]>>1;
GetByteContext g3;
if (length < mbs * 8) {
return 0;
}
-static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length)
+static int decode_i_frame(FourXContext *f, AVFrame *frame, const uint8_t *buf, int length)
{
int x, y, ret;
const int width = f->avctx->width;
if ((ret = decode_i_mb(f)) < 0)
return ret;
- idct_put(f, x, y);
+ idct_put(f, frame, x, y);
}
}
int buf_size = avpkt->size;
FourXContext *const f = avctx->priv_data;
AVFrame *picture = data;
- AVFrame *p;
int i, frame_4cc, frame_size, ret;
frame_4cc = AV_RL32(buf);
frame_size = buf_size - 12;
}
- FFSWAP(AVFrame*, f->current_picture, f->last_picture);
-
- p = f->current_picture;
- avctx->coded_frame = p;
-
// alternatively we would have to use our own buffer management
avctx->flags |= CODEC_FLAG_EMU_EDGE;
- if (p->data[0])
- avctx->release_buffer(avctx, p);
-
- p->reference = 1;
- if ((ret = ff_get_buffer(avctx, p)) < 0) {
+ if ((ret = ff_get_buffer(avctx, picture, AV_GET_BUFFER_FLAG_REF)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
if (frame_4cc == AV_RL32("ifr2")) {
- p->pict_type = AV_PICTURE_TYPE_I;
- if ((ret = decode_i2_frame(f, buf - 4, frame_size + 4)) < 0)
+ picture->pict_type = AV_PICTURE_TYPE_I;
+ if ((ret = decode_i2_frame(f, picture, buf - 4, frame_size + 4)) < 0)
return ret;
} else if (frame_4cc == AV_RL32("ifrm")) {
- p->pict_type = AV_PICTURE_TYPE_I;
- if ((ret = decode_i_frame(f, buf, frame_size)) < 0)
+ picture->pict_type = AV_PICTURE_TYPE_I;
+ if ((ret = decode_i_frame(f, picture, buf, frame_size)) < 0)
return ret;
} else if (frame_4cc == AV_RL32("pfrm") || frame_4cc == AV_RL32("pfr2")) {
if (!f->last_picture->data[0]) {
- f->last_picture->reference = 1;
- if ((ret = ff_get_buffer(avctx, f->last_picture)) < 0) {
+ if ((ret = ff_get_buffer(avctx, f->last_picture,
+ AV_GET_BUFFER_FLAG_REF)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
memset(f->last_picture->data[0], 0, avctx->height * FFABS(f->last_picture->linesize[0]));
}
- p->pict_type = AV_PICTURE_TYPE_P;
- if ((ret = decode_p_frame(f, buf, frame_size)) < 0)
+ picture->pict_type = AV_PICTURE_TYPE_P;
+ if ((ret = decode_p_frame(f, picture, buf, frame_size)) < 0)
return ret;
} else if (frame_4cc == AV_RL32("snd_")) {
av_log(avctx, AV_LOG_ERROR, "ignoring snd_ chunk length:%d\n",
buf_size);
}
- p->key_frame = p->pict_type == AV_PICTURE_TYPE_I;
+ picture->key_frame = picture->pict_type == AV_PICTURE_TYPE_I;
- *picture = *p;
+ av_frame_unref(f->last_picture);
+ if ((ret = av_frame_ref(f->last_picture, picture)) < 0)
+ return ret;
*got_frame = 1;
emms_c();
else
avctx->pix_fmt = AV_PIX_FMT_BGR555;
- f->current_picture = avcodec_alloc_frame();
- f->last_picture = avcodec_alloc_frame();
- if (!f->current_picture || !f->last_picture) {
- avcodec_free_frame(&f->current_picture);
- avcodec_free_frame(&f->last_picture);
+ f->last_picture = av_frame_alloc();
+ if (!f->last_picture)
return AVERROR(ENOMEM);
- }
return 0;
}
f->cfrm[i].allocated_size = 0;
}
ff_free_vlc(&f->pre_vlc);
- if (f->current_picture->data[0])
- avctx->release_buffer(avctx, f->current_picture);
- if (f->last_picture->data[0])
- avctx->release_buffer(avctx, f->last_picture);
- avcodec_free_frame(&f->current_picture);
- avcodec_free_frame(&f->last_picture);
+ av_frame_free(&f->last_picture);
return 0;
}
typedef struct EightBpsContext {
AVCodecContext *avctx;
- AVFrame pic;
unsigned char planes;
unsigned char planemap[4];
static int decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
+ AVFrame *frame = data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
EightBpsContext * const c = avctx->priv_data;
unsigned char *planemap = c->planemap;
int ret;
- if (c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
-
- c->pic.reference = 0;
- c->pic.buffer_hints = FF_BUFFER_HINTS_VALID;
- if ((ret = ff_get_buffer(avctx, &c->pic)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
/* Decode a plane */
for (row = 0; row < height; row++) {
- pixptr = c->pic.data[0] + row * c->pic.linesize[0] + planemap[p];
- pixptr_end = pixptr + c->pic.linesize[0];
+ pixptr = frame->data[0] + row * frame->linesize[0] + planemap[p];
+ pixptr_end = pixptr + frame->linesize[0];
dlen = av_be2ne16(*(const unsigned short *)(lp + row * 2));
/* Decode a row of this plane */
while (dlen > 0) {
AV_PKT_DATA_PALETTE,
NULL);
if (pal) {
- c->pic.palette_has_changed = 1;
+ frame->palette_has_changed = 1;
memcpy(c->pal, pal, AVPALETTE_SIZE);
}
- memcpy (c->pic.data[1], c->pal, AVPALETTE_SIZE);
+ memcpy (frame->data[1], c->pal, AVPALETTE_SIZE);
}
*got_frame = 1;
- *(AVFrame*)data = c->pic;
/* always report that the buffer was completely consumed */
return buf_size;
EightBpsContext * const c = avctx->priv_data;
c->avctx = avctx;
- c->pic.data[0] = NULL;
switch (avctx->bits_per_coded_sample) {
case 8:
return 0;
}
-static av_cold int decode_end(AVCodecContext *avctx)
-{
- EightBpsContext * const c = avctx->priv_data;
-
- if (c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
-
- return 0;
-}
-
AVCodec ff_eightbps_decoder = {
.name = "8bps",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_8BPS,
.priv_data_size = sizeof(EightBpsContext),
.init = decode_init,
- .close = decode_end,
.decode = decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("QuickTime 8BPS video"),
/* get output buffer */
frame->nb_samples = buf_size * (is_compr + 1);
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
}
/* get output buffer */
+ av_frame_unref(ac->frame);
ac->frame->nb_samples = 2048;
- if ((ret = ff_get_buffer(avctx, ac->frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, ac->frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
#include <string.h>
#include "avcodec.h"
+#include "internal.h"
#include "msrledec.h"
typedef struct AascContext {
AVCodecContext *avctx;
GetByteContext gb;
- AVFrame frame;
+ AVFrame *frame;
} AascContext;
static av_cold int aasc_decode_init(AVCodecContext *avctx)
avctx->pix_fmt = AV_PIX_FMT_BGR24;
+ s->frame = av_frame_alloc();
+ if (!s->frame)
+ return AVERROR(ENOMEM);
+
return 0;
}
AascContext *s = avctx->priv_data;
int compr, i, stride, ret;
- s->frame.reference = 1;
- s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
- if ((ret = avctx->reget_buffer(avctx, &s->frame)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
case 0:
stride = (avctx->width * 3 + 3) & ~3;
for (i = avctx->height - 1; i >= 0; i--) {
- memcpy(s->frame.data[0] + i * s->frame.linesize[0], buf, avctx->width * 3);
+ memcpy(s->frame->data[0] + i * s->frame->linesize[0], buf, avctx->width * 3);
buf += stride;
}
break;
case 1:
bytestream2_init(&s->gb, buf, buf_size);
- ff_msrle_decode(avctx, (AVPicture*)&s->frame, 8, &s->gb);
+ ff_msrle_decode(avctx, (AVPicture*)s->frame, 8, &s->gb);
break;
default:
av_log(avctx, AV_LOG_ERROR, "Unknown compression type %d\n", compr);
}
*got_frame = 1;
- *(AVFrame*)data = s->frame;
+ if ((ret = av_frame_ref(data, s->frame)) < 0)
+ return ret;
/* report that the buffer was completely consumed */
return buf_size;
{
AascContext *s = avctx->priv_data;
- /* release the last frame */
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+ av_frame_free(&s->frame);
return 0;
}
/* get output buffer */
frame->nb_samples = s->num_blocks * 256;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
/* get output buffer */
frame->nb_samples = nb_samples;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
/* get output buffer */
frame->nb_samples = num_blocks * BLOCK_SAMPLES;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
if (!alac->nb_samples) {
/* get output buffer */
frame->nb_samples = output_samples;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
/* get output buffer */
frame->nb_samples = ctx->cur_frame_length;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
/* get output buffer */
frame->nb_samples = AMR_BLOCK_SIZE;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
/* get output buffer */
frame->nb_samples = 4 * AMRWB_SFR_SIZE_16k;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
#include "avcodec.h"
#include "bytestream.h"
+#include "internal.h"
typedef struct AnmContext {
- AVFrame frame;
+ AVFrame *frame;
int palette[AVPALETTE_COUNT];
GetByteContext gb;
int x; ///< x coordinate position
avctx->pix_fmt = AV_PIX_FMT_PAL8;
- s->frame.reference = 1;
+ s->frame = av_frame_alloc();
+ if (!s->frame)
+ return AVERROR(ENOMEM);
+
bytestream2_init(&s->gb, avctx->extradata, avctx->extradata_size);
if (bytestream2_get_bytes_left(&s->gb) < 16 * 8 + 4 * 256)
return AVERROR_INVALIDDATA;
uint8_t *dst, *dst_end;
int count, ret;
- if ((ret = avctx->reget_buffer(avctx, &s->frame)) < 0){
+ if ((ret = ff_reget_buffer(avctx, s->frame)) < 0){
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
- dst = s->frame.data[0];
- dst_end = s->frame.data[0] + s->frame.linesize[0]*avctx->height;
+ dst = s->frame->data[0];
+ dst_end = s->frame->data[0] + s->frame->linesize[0]*avctx->height;
bytestream2_init(&s->gb, avpkt->data, buf_size);
do {
/* if statements are ordered by probability */
#define OP(gb, pixel, count) \
- op(&dst, dst_end, (gb), (pixel), (count), &s->x, avctx->width, s->frame.linesize[0])
+ op(&dst, dst_end, (gb), (pixel), (count), &s->x, avctx->width, s->frame->linesize[0])
int type = bytestream2_get_byte(&s->gb);
count = type & 0x7F;
}
} while (bytestream2_get_bytes_left(&s->gb) > 0);
- memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE);
+ memcpy(s->frame->data[1], s->palette, AVPALETTE_SIZE);
*got_frame = 1;
- *(AVFrame*)data = s->frame;
+ if ((ret = av_frame_ref(data, s->frame)) < 0)
+ return ret;
+
return buf_size;
}
static av_cold int decode_end(AVCodecContext *avctx)
{
AnmContext *s = avctx->priv_data;
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+
+ av_frame_free(&s->frame);
return 0;
}
*/
#include "libavutil/common.h"
+#include "libavutil/frame.h"
#include "libavutil/lfg.h"
#include "avcodec.h"
#include "cga_data.h"
};
typedef struct {
- AVFrame frame;
+ AVFrame *frame;
int x; /**< x cursor position (pixels) */
int y; /**< y cursor position (pixels) */
int sx; /**< saved x cursor position (pixels) */
AnsiContext *s = avctx->priv_data;
avctx->pix_fmt = AV_PIX_FMT_PAL8;
+ s->frame = av_frame_alloc();
+ if (!s->frame)
+ return AVERROR(ENOMEM);
+
/* defaults */
s->font = ff_vga16_font;
s->font_height = 16;
i = 0;
for (; i < avctx->height - s->font_height; i++)
- memcpy(s->frame.data[0] + i * s->frame.linesize[0],
- s->frame.data[0] + (i + s->font_height) * s->frame.linesize[0],
+ memcpy(s->frame->data[0] + i * s->frame->linesize[0],
+ s->frame->data[0] + (i + s->font_height) * s->frame->linesize[0],
avctx->width);
for (; i < avctx->height; i++)
- memset(s->frame.data[0] + i * s->frame.linesize[0],
+ memset(s->frame->data[0] + i * s->frame->linesize[0],
DEFAULT_BG_COLOR, avctx->width);
}
AnsiContext *s = avctx->priv_data;
int i;
for (i = 0; i < s->font_height; i++)
- memset(s->frame.data[0] + (s->y + i)*s->frame.linesize[0] + xoffset,
+ memset(s->frame->data[0] + (s->y + i)*s->frame->linesize[0] + xoffset,
DEFAULT_BG_COLOR, xlength);
}
AnsiContext *s = avctx->priv_data;
int i;
for (i = 0; i < avctx->height; i++)
- memset(s->frame.data[0] + i * s->frame.linesize[0], DEFAULT_BG_COLOR, avctx->width);
+ memset(s->frame->data[0] + i * s->frame->linesize[0], DEFAULT_BG_COLOR, avctx->width);
s->x = s->y = 0;
}
FFSWAP(int, fg, bg);
if ((s->attributes & ATTR_CONCEALED))
fg = bg;
- ff_draw_pc_font(s->frame.data[0] + s->y * s->frame.linesize[0] + s->x,
- s->frame.linesize[0], s->font, s->font_height, c, fg, bg);
+ ff_draw_pc_font(s->frame->data[0] + s->y * s->frame->linesize[0] + s->x,
+ s->frame->linesize[0], s->font, s->font_height, c, fg, bg);
s->x += FONT_WIDTH;
if (s->x >= avctx->width) {
s->x = 0;
av_log_ask_for_sample(avctx, "unsupported screen mode\n");
}
if (width != avctx->width || height != avctx->height) {
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+ av_frame_unref(s->frame);
avcodec_set_dimensions(avctx, width, height);
- ret = ff_get_buffer(avctx, &s->frame);
+ ret = ff_get_buffer(avctx, s->frame, AV_GET_BUFFER_FLAG_REF);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
- s->frame.pict_type = AV_PICTURE_TYPE_I;
- s->frame.palette_has_changed = 1;
- memcpy(s->frame.data[1], ff_cga_palette, 16 * 4);
+ s->frame->pict_type = AV_PICTURE_TYPE_I;
+ s->frame->palette_has_changed = 1;
+ memcpy(s->frame->data[1], ff_cga_palette, 16 * 4);
erase_screen(avctx);
} else if (c == 'l') {
erase_screen(avctx);
case 0:
erase_line(avctx, s->x, avctx->width - s->x);
if (s->y < avctx->height - s->font_height)
- memset(s->frame.data[0] + (s->y + s->font_height)*s->frame.linesize[0],
- DEFAULT_BG_COLOR, (avctx->height - s->y - s->font_height)*s->frame.linesize[0]);
+ memset(s->frame->data[0] + (s->y + s->font_height)*s->frame->linesize[0],
+ DEFAULT_BG_COLOR, (avctx->height - s->y - s->font_height)*s->frame->linesize[0]);
break;
case 1:
erase_line(avctx, 0, s->x);
if (s->y > 0)
- memset(s->frame.data[0], DEFAULT_BG_COLOR, s->y * s->frame.linesize[0]);
+ memset(s->frame->data[0], DEFAULT_BG_COLOR, s->y * s->frame->linesize[0]);
break;
case 2:
erase_screen(avctx);
const uint8_t *buf_end = buf+buf_size;
int ret, i, count;
- ret = avctx->reget_buffer(avctx, &s->frame);
+ ret = ff_reget_buffer(avctx, s->frame);
if (ret < 0){
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
if (!avctx->frame_number) {
- memset(s->frame.data[0], 0, avctx->height * FFABS(s->frame.linesize[0]));
- memset(s->frame.data[1], 0, AVPALETTE_SIZE);
+ memset(s->frame->data[0], 0, avctx->height * FFABS(s->frame->linesize[0]));
+ memset(s->frame->data[1], 0, AVPALETTE_SIZE);
}
- s->frame.pict_type = AV_PICTURE_TYPE_I;
- s->frame.palette_has_changed = 1;
- memcpy(s->frame.data[1], ff_cga_palette, 16 * 4);
+ s->frame->pict_type = AV_PICTURE_TYPE_I;
+ s->frame->palette_has_changed = 1;
+ memcpy(s->frame->data[1], ff_cga_palette, 16 * 4);
while(buf < buf_end) {
switch(s->state) {
}
*got_frame = 1;
- *(AVFrame*)data = s->frame;
+ if ((ret = av_frame_ref(data, s->frame)) < 0)
+ return ret;
return buf_size;
}
static av_cold int decode_close(AVCodecContext *avctx)
{
AnsiContext *s = avctx->priv_data;
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+
+ av_frame_free(&s->frame);
return 0;
}
/* get output buffer */
frame->nb_samples = blockstodecode;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
return 0;
}
-static inline void idct_put(ASV1Context *a, int mb_x, int mb_y)
+static inline void idct_put(ASV1Context *a, AVFrame *frame, int mb_x, int mb_y)
{
int16_t (*block)[64] = a->block;
- int linesize = a->picture.linesize[0];
+ int linesize = frame->linesize[0];
- uint8_t *dest_y = a->picture.data[0] + (mb_y * 16* linesize ) + mb_x * 16;
- uint8_t *dest_cb = a->picture.data[1] + (mb_y * 8 * a->picture.linesize[1]) + mb_x * 8;
- uint8_t *dest_cr = a->picture.data[2] + (mb_y * 8 * a->picture.linesize[2]) + mb_x * 8;
+ uint8_t *dest_y = frame->data[0] + (mb_y * 16* linesize ) + mb_x * 16;
+ uint8_t *dest_cb = frame->data[1] + (mb_y * 8 * frame->linesize[1]) + mb_x * 8;
+ uint8_t *dest_cr = frame->data[2] + (mb_y * 8 * frame->linesize[2]) + mb_x * 8;
a->dsp.idct_put(dest_y , linesize, block[0]);
a->dsp.idct_put(dest_y + 8, linesize, block[1]);
a->dsp.idct_put(dest_y + 8*linesize + 8, linesize, block[3]);
if (!(a->avctx->flags&CODEC_FLAG_GRAY)) {
- a->dsp.idct_put(dest_cb, a->picture.linesize[1], block[4]);
- a->dsp.idct_put(dest_cr, a->picture.linesize[2], block[5]);
+ a->dsp.idct_put(dest_cb, frame->linesize[1], block[4]);
+ a->dsp.idct_put(dest_cr, frame->linesize[2], block[5]);
}
}
ASV1Context * const a = avctx->priv_data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
- AVFrame *picture = data;
- AVFrame * const p = &a->picture;
+ AVFrame * const p = data;
int mb_x, mb_y, ret;
- if (p->data[0])
- avctx->release_buffer(avctx, p);
-
- p->reference = 0;
- if ((ret = ff_get_buffer(avctx, p)) < 0) {
+ if ((ret = ff_get_buffer(avctx, p, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
if ((ret = decode_mb(a, a->block)) < 0)
return ret;
- idct_put(a, mb_x, mb_y);
+ idct_put(a, p, mb_x, mb_y);
}
}
if ((ret = decode_mb(a, a->block)) < 0)
return ret;
- idct_put(a, mb_x, mb_y);
+ idct_put(a, p, mb_x, mb_y);
}
}
if ((ret = decode_mb(a, a->block)) < 0)
return ret;
- idct_put(a, mb_x, mb_y);
+ idct_put(a, p, mb_x, mb_y);
}
}
- *picture = a->picture;
*got_frame = 1;
emms_c();
static av_cold int decode_init(AVCodecContext *avctx)
{
ASV1Context * const a = avctx->priv_data;
- AVFrame *p = &a->picture;
const int scale = avctx->codec_id == AV_CODEC_ID_ASV1 ? 1 : 2;
int i;
a->intra_matrix[i] = 64 * scale * ff_mpeg1_default_intra_matrix[index] / a->inv_qscale;
}
- p->qstride = a->mb_width;
- p->qscale_table = av_malloc(p->qstride * a->mb_height);
- p->quality = (32 * scale + a->inv_qscale / 2) / a->inv_qscale;
- memset(p->qscale_table, p->quality, p->qstride * a->mb_height);
-
return 0;
}
ASV1Context * const a = avctx->priv_data;
av_freep(&a->bitstream_buffer);
- av_freep(&a->picture.qscale_table);
a->bitstream_buffer_size = 0;
- if (a->picture.data[0])
- avctx->release_buffer(avctx, &a->picture);
-
return 0;
}
/* get output buffer */
frame->nb_samples = AT1_SU_SAMPLES;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
/* get output buffer */
frame->nb_samples = SAMPLES_PER_FRAME;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
#include "internal.h"
#include "libavutil/internal.h"
-typedef struct AuraDecodeContext {
- AVCodecContext *avctx;
- AVFrame frame;
-} AuraDecodeContext;
-
static av_cold int aura_decode_init(AVCodecContext *avctx)
{
- AuraDecodeContext *s = avctx->priv_data;
-
- s->avctx = avctx;
/* width needs to be divisible by 4 for this codec to work */
if (avctx->width & 0x3)
return AVERROR(EINVAL);
void *data, int *got_frame,
AVPacket *pkt)
{
- AuraDecodeContext *s = avctx->priv_data;
+ AVFrame *frame = data;
uint8_t *Y, *U, *V;
uint8_t val;
int x, y, ret;
/* pixel data starts 48 bytes in, after 3x16-byte tables */
buf += 48;
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
-
- s->frame.buffer_hints = FF_BUFFER_HINTS_VALID;
- s->frame.reference = 0;
- if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
- Y = s->frame.data[0];
- U = s->frame.data[1];
- V = s->frame.data[2];
+ Y = frame->data[0];
+ U = frame->data[1];
+ V = frame->data[2];
/* iterate through each line in the height */
for (y = 0; y < avctx->height; y++) {
Y[1] = Y[ 0] + delta_table[val & 0xF];
Y += 2; U++; V++;
}
- Y += s->frame.linesize[0] - avctx->width;
- U += s->frame.linesize[1] - (avctx->width >> 1);
- V += s->frame.linesize[2] - (avctx->width >> 1);
+ Y += frame->linesize[0] - avctx->width;
+ U += frame->linesize[1] - (avctx->width >> 1);
+ V += frame->linesize[2] - (avctx->width >> 1);
}
*got_frame = 1;
- *(AVFrame*)data = s->frame;
return pkt->size;
}
-static av_cold int aura_decode_end(AVCodecContext *avctx)
-{
- AuraDecodeContext *s = avctx->priv_data;
-
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
-
- return 0;
-}
-
AVCodec ff_aura2_decoder = {
.name = "aura2",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_AURA2,
- .priv_data_size = sizeof(AuraDecodeContext),
.init = aura_decode_init,
- .close = aura_decode_end,
.decode = aura_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Auravision Aura 2"),
#define FF_QSCALE_TYPE_H264 2
#define FF_QSCALE_TYPE_VP56 3
+#if FF_API_GET_BUFFER
#define FF_BUFFER_TYPE_INTERNAL 1
#define FF_BUFFER_TYPE_USER 2 ///< direct rendering buffers (image is (de)allocated by user)
#define FF_BUFFER_TYPE_SHARED 4 ///< Buffer from somewhere else; don't deallocate image (data/base), all other tables are not shared.
#define FF_BUFFER_HINTS_READABLE 0x02 // Codec will read from buffer.
#define FF_BUFFER_HINTS_PRESERVE 0x04 // User must not alter buffer content.
#define FF_BUFFER_HINTS_REUSABLE 0x08 // Codec will reuse the buffer (update).
+#endif
+
+/**
+ * The decoder will keep a reference to the frame and may reuse it later.
+ */
+#define AV_GET_BUFFER_FLAG_REF (1 << 0)
/**
* @defgroup lavc_packet AVPacket
*/
enum AVSampleFormat request_sample_fmt;
+#if FF_API_GET_BUFFER
/**
* Called at the beginning of each frame to get a buffer for it.
*
*
* - encoding: unused
* - decoding: Set by libavcodec, user can override.
+ *
+ * @deprecated use get_buffer2()
*/
+ attribute_deprecated
int (*get_buffer)(struct AVCodecContext *c, AVFrame *pic);
/**
* but not by more than one thread at once, so does not need to be reentrant.
* - encoding: unused
* - decoding: Set by libavcodec, user can override.
+ *
+ * @deprecated custom freeing callbacks should be set from get_buffer2()
*/
+ attribute_deprecated
void (*release_buffer)(struct AVCodecContext *c, AVFrame *pic);
/**
* - encoding: unused
* - decoding: Set by libavcodec, user can override.
*/
+ attribute_deprecated
int (*reget_buffer)(struct AVCodecContext *c, AVFrame *pic);
+#endif
+ /**
+ * This callback is called at the beginning of each frame to get data
+ * buffer(s) for it. There may be one contiguous buffer for all the data or
+ * there may be a buffer per each data plane or anything in between. Each
+ * buffer must be reference-counted using the AVBuffer API.
+ *
+ * The following fields will be set in the frame before this callback is
+ * called:
+ * - format
+ * - width, height (video only)
+ * - sample_rate, channel_layout, nb_samples (audio only)
+ * Their values may differ from the corresponding values in
+ * AVCodecContext. This callback must use the frame values, not the codec
+ * context values, to calculate the required buffer size.
+ *
+ * This callback must fill the following fields in the frame:
+ * - data[]
+ * - linesize[]
+ * - extended_data:
+ * * if the data is planar audio with more than 8 channels, then this
+ * callback must allocate and fill extended_data to contain all pointers
+ * to all data planes. data[] must hold as many pointers as it can.
+ * extended_data must be allocated with av_malloc() and will be freed in
+ * av_frame_unref().
+ * * otherwise exended_data must point to data
+ * - buf[] must contain references to the buffers that contain the frame
+ * data.
+ * - extended_buf and nb_extended_buf must be allocated with av_malloc() by
+ * this callback and filled with the extra buffers if there are more
+ * buffers than buf[] can hold. extended_buf will be freed in
+ * av_frame_unref().
+ *
+ * If CODEC_CAP_DR1 is not set then get_buffer2() must call
+ * avcodec_default_get_buffer2() instead of providing buffers allocated by
+ * some other means.
+ *
+ * Each data plane must be aligned to the maximum required by the target
+ * CPU.
+ *
+ * @see avcodec_default_get_buffer2()
+ *
+ * Video:
+ *
+ * If AV_GET_BUFFER_FLAG_REF is set in flags then the frame may be reused
+ * (read and/or written to if it is writable) later by libavcodec.
+ *
+ * If CODEC_FLAG_EMU_EDGE is not set in s->flags, the buffer must contain an
+ * edge of the size returned by avcodec_get_edge_width() on all sides.
+ *
+ * avcodec_align_dimensions2() should be used to find the required width and
+ * height, as they normally need to be rounded up to the next multiple of 16.
+ *
+ * If frame multithreading is used and thread_safe_callbacks is set,
+ * this callback may be called from a different thread, but not from more
+ * than one at once. Does not need to be reentrant.
+ *
+ * @see avcodec_align_dimensions2()
+ *
+ * Audio:
+ *
+ * Decoders request a buffer of a particular size by setting
+ * AVFrame.nb_samples prior to calling get_buffer2(). The decoder may,
+ * however, utilize only part of the buffer by setting AVFrame.nb_samples
+ * to a smaller value in the output frame.
+ *
+ * As a convenience, av_samples_get_buffer_size() and
+ * av_samples_fill_arrays() in libavutil may be used by custom get_buffer2()
+ * functions to find the required data size and to fill data pointers and
+ * linesize. In AVFrame.linesize, only linesize[0] may be set for audio
+ * since all planes must be the same size.
+ *
+ * @see av_samples_get_buffer_size(), av_samples_fill_arrays()
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec, user can override.
+ */
+ int (*get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags);
+
+ /**
+ * If non-zero, the decoded audio and video frames returned from
+ * avcodec_decode_video2() and avcodec_decode_audio4() are reference-counted
+ * and are valid indefinitely. The caller must free them with
+ * av_frame_unref() when they are not needed anymore.
+ * Otherwise, the decoded frames must not be freed by the caller and are
+ * only valid until the next decode call.
+ *
+ * - encoding: unused
+ * - decoding: set by the caller before avcodec_open2().
+ */
+ int refcounted_frames;
/* - encoding parameters */
float qcompress; ///< amount of qscale change between easy & hard scenes (0.0-1.0)
*/
AVCodec *avcodec_find_decoder_by_name(const char *name);
-int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic);
-void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic);
-int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic);
+#if FF_API_GET_BUFFER
+attribute_deprecated int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic);
+attribute_deprecated void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic);
+attribute_deprecated int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic);
+#endif
+
+/**
+ * The default callback for AVCodecContext.get_buffer2(). It is made public so
+ * it can be called by custom get_buffer2() implementations for decoders without
+ * CODEC_CAP_DR1 set.
+ */
+int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags);
/**
* Return the amount of padding in pixels which the get_buffer callback must
*/
void avcodec_flush_buffers(AVCodecContext *avctx);
-void avcodec_default_free_buffers(AVCodecContext *s);
-
/**
* Return codec bits per sample.
*
#include "avcodec.h"
#include "get_bits.h"
+#include "internal.h"
typedef struct {
AvsBlockType type;
GetBitContext change_map;
- if ((ret = avctx->reget_buffer(avctx, p)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, p)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
- p->reference = 1;
p->pict_type = AV_PICTURE_TYPE_P;
p->key_frame = 0;
align_get_bits(&change_map);
}
- *picture = avs->picture;
+ if ((ret = av_frame_ref(picture, &avs->picture)) < 0)
+ return ret;
*got_frame = 1;
return buf_size;
static av_cold int avs_decode_end(AVCodecContext *avctx)
{
AvsContext *s = avctx->priv_data;
- if (s->picture.data[0])
- avctx->release_buffer(avctx, &s->picture);
+ av_frame_unref(&s->picture);
return 0;
}
#include "avcodec.h"
#include "bethsoftvideo.h"
#include "bytestream.h"
+#include "internal.h"
typedef struct BethsoftvidContext {
AVFrame frame;
static av_cold int bethsoftvid_decode_init(AVCodecContext *avctx)
{
BethsoftvidContext *vid = avctx->priv_data;
- vid->frame.reference = 1;
- vid->frame.buffer_hints = FF_BUFFER_HINTS_VALID |
- FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
avctx->pix_fmt = AV_PIX_FMT_PAL8;
return 0;
}
int code, ret;
int yoffset;
- if ((ret = avctx->reget_buffer(avctx, &vid->frame)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, &vid->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
}
end:
+ if ((ret = av_frame_ref(data, &vid->frame)) < 0)
+ return ret;
+
*got_frame = 1;
- *(AVFrame*)data = vid->frame;
return avpkt->size;
}
static av_cold int bethsoftvid_decode_end(AVCodecContext *avctx)
{
BethsoftvidContext * vid = avctx->priv_data;
- if(vid->frame.data[0])
- avctx->release_buffer(avctx, &vid->frame);
+ av_frame_unref(&vid->frame);
return 0;
}
typedef struct BFIContext {
AVCodecContext *avctx;
- AVFrame frame;
uint8_t *dst;
} BFIContext;
static int bfi_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
+ AVFrame *frame = data;
GetByteContext g;
int buf_size = avpkt->size;
BFIContext *bfi = avctx->priv_data;
uint32_t *pal;
int i, j, ret, height = avctx->height;
- if (bfi->frame.data[0])
- avctx->release_buffer(avctx, &bfi->frame);
-
- bfi->frame.reference = 1;
-
- if ((ret = ff_get_buffer(avctx, &bfi->frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
/* Set frame parameters and palette, if necessary */
if (!avctx->frame_number) {
- bfi->frame.pict_type = AV_PICTURE_TYPE_I;
- bfi->frame.key_frame = 1;
+ frame->pict_type = AV_PICTURE_TYPE_I;
+ frame->key_frame = 1;
/* Setting the palette */
if (avctx->extradata_size > 768) {
av_log(NULL, AV_LOG_ERROR, "Palette is too large.\n");
return AVERROR_INVALIDDATA;
}
- pal = (uint32_t *)bfi->frame.data[1];
+ pal = (uint32_t *)frame->data[1];
for (i = 0; i < avctx->extradata_size / 3; i++) {
int shift = 16;
*pal = 0;
(avctx->extradata[i * 3 + j] >> 4)) << shift;
pal++;
}
- bfi->frame.palette_has_changed = 1;
+ frame->palette_has_changed = 1;
} else {
- bfi->frame.pict_type = AV_PICTURE_TYPE_P;
- bfi->frame.key_frame = 0;
+ frame->pict_type = AV_PICTURE_TYPE_P;
+ frame->key_frame = 0;
}
bytestream2_skip(&g, 4); // Unpacked size, not required.
}
src = bfi->dst;
- dst = bfi->frame.data[0];
+ dst = frame->data[0];
while (height--) {
memcpy(dst, src, avctx->width);
src += avctx->width;
- dst += bfi->frame.linesize[0];
+ dst += frame->linesize[0];
}
*got_frame = 1;
- *(AVFrame *)data = bfi->frame;
+
return buf_size;
}
static av_cold int bfi_decode_close(AVCodecContext *avctx)
{
BFIContext *bfi = avctx->priv_data;
- if (bfi->frame.data[0])
- avctx->release_buffer(avctx, &bfi->frame);
av_free(bfi->dst);
return 0;
}
AVCodecContext *avctx;
DSPContext dsp;
BinkDSPContext bdsp;
- AVFrame *pic, *last;
+ AVFrame *last;
int version; ///< internal Bink file version
int has_alpha;
int swap_planes;
memcpy(dst + i*stride, tmp + i*8, 8);
}
-static int binkb_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx,
- int is_key, int is_chroma)
+static int binkb_decode_plane(BinkContext *c, AVFrame *frame, GetBitContext *gb,
+ int plane_idx, int is_key, int is_chroma)
{
int blk, ret;
int i, j, bx, by;
int ybias = is_key ? -15 : 0;
int qp;
- const int stride = c->pic->linesize[plane_idx];
+ const int stride = frame->linesize[plane_idx];
int bw = is_chroma ? (c->avctx->width + 15) >> 4 : (c->avctx->width + 7) >> 3;
int bh = is_chroma ? (c->avctx->height + 15) >> 4 : (c->avctx->height + 7) >> 3;
binkb_init_bundles(c);
- ref_start = c->pic->data[plane_idx];
- ref_end = c->pic->data[plane_idx] + (bh * c->pic->linesize[plane_idx] + bw) * 8;
+ ref_start = frame->data[plane_idx];
+ ref_end = frame->data[plane_idx] + (bh * frame->linesize[plane_idx] + bw) * 8;
for (i = 0; i < 64; i++)
coordmap[i] = (i & 7) + (i >> 3) * stride;
return ret;
}
- dst = c->pic->data[plane_idx] + 8*by*stride;
+ dst = frame->data[plane_idx] + 8*by*stride;
for (bx = 0; bx < bw; bx++, dst += 8) {
blk = binkb_get_value(c, BINKB_SRC_BLOCK_TYPES);
switch (blk) {
return 0;
}
-static int bink_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx,
- int is_chroma)
+static int bink_decode_plane(BinkContext *c, AVFrame *frame, GetBitContext *gb,
+ int plane_idx, int is_chroma)
{
int blk, ret;
int i, j, bx, by;
LOCAL_ALIGNED_16(int32_t, dctblock, [64]);
int coordmap[64];
- const int stride = c->pic->linesize[plane_idx];
+ const int stride = frame->linesize[plane_idx];
int bw = is_chroma ? (c->avctx->width + 15) >> 4 : (c->avctx->width + 7) >> 3;
int bh = is_chroma ? (c->avctx->height + 15) >> 4 : (c->avctx->height + 7) >> 3;
int width = c->avctx->width >> is_chroma;
read_bundle(gb, c, i);
ref_start = c->last->data[plane_idx] ? c->last->data[plane_idx]
- : c->pic->data[plane_idx];
+ : frame->data[plane_idx];
ref_end = ref_start
+ (bw - 1 + c->last->linesize[plane_idx] * (bh - 1)) * 8;
if (by == bh)
break;
- dst = c->pic->data[plane_idx] + 8*by*stride;
+ dst = frame->data[plane_idx] + 8*by*stride;
prev = (c->last->data[plane_idx] ? c->last->data[plane_idx]
- : c->pic->data[plane_idx]) + 8*by*stride;
+ : frame->data[plane_idx]) + 8*by*stride;
for (bx = 0; bx < bw; bx++, dst += 8, prev += 8) {
blk = get_value(c, BINK_SRC_BLOCK_TYPES);
// 16x16 block type on odd line means part of the already decoded block, so skip it
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *pkt)
{
BinkContext * const c = avctx->priv_data;
+ AVFrame *frame = data;
GetBitContext gb;
int plane, plane_idx, ret;
int bits_count = pkt->size << 3;
if (c->version > 'b') {
- if(c->pic->data[0])
- avctx->release_buffer(avctx, c->pic);
-
- if ((ret = ff_get_buffer(avctx, c->pic)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
} else {
- if ((ret = avctx->reget_buffer(avctx, c->pic)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, c->last)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
+ if ((ret = av_frame_ref(frame, c->last)) < 0)
+ return ret;
}
init_get_bits(&gb, pkt->data, bits_count);
if (c->has_alpha) {
if (c->version >= 'i')
skip_bits_long(&gb, 32);
- if ((ret = bink_decode_plane(c, &gb, 3, 0)) < 0)
+ if ((ret = bink_decode_plane(c, frame, &gb, 3, 0)) < 0)
return ret;
}
if (c->version >= 'i')
plane_idx = (!plane || !c->swap_planes) ? plane : (plane ^ 3);
if (c->version > 'b') {
- if ((ret = bink_decode_plane(c, &gb, plane_idx, !!plane)) < 0)
+ if ((ret = bink_decode_plane(c, frame, &gb, plane_idx, !!plane)) < 0)
return ret;
} else {
- if ((ret = binkb_decode_plane(c, &gb, plane_idx,
+ if ((ret = binkb_decode_plane(c, frame, &gb, plane_idx,
!avctx->frame_number, !!plane)) < 0)
return ret;
}
}
emms_c();
- *got_frame = 1;
- *(AVFrame*)data = *c->pic;
+ if (c->version > 'b') {
+ av_frame_unref(c->last);
+ if ((ret = av_frame_ref(c->last, frame)) < 0)
+ return ret;
+ }
- if (c->version > 'b')
- FFSWAP(AVFrame*, c->pic, c->last);
+ *got_frame = 1;
/* always report that the buffer was completely consumed */
return pkt->size;
}
c->avctx = avctx;
- c->pic = avcodec_alloc_frame();
- c->last = avcodec_alloc_frame();
- if (!c->pic || !c->last) {
- avcodec_free_frame(&c->pic);
- avcodec_free_frame(&c->last);
+ c->last = av_frame_alloc();
+ if (!c->last)
return AVERROR(ENOMEM);
- }
if ((ret = av_image_check_size(avctx->width, avctx->height, 0, avctx)) < 0)
return ret;
{
BinkContext * const c = avctx->priv_data;
- if (c->pic->data[0])
- avctx->release_buffer(avctx, c->pic);
- if (c->last->data[0])
- avctx->release_buffer(avctx, c->last);
- avcodec_free_frame(&c->pic);
- avcodec_free_frame(&c->last);
+ av_frame_free(&c->last);
free_bundles(c);
return 0;
/* get output buffer */
frame->nb_samples = s->frame_len;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
#include "internal.h"
#include "msrledec.h"
-static av_cold int bmp_decode_init(AVCodecContext *avctx)
-{
- BMPContext *s = avctx->priv_data;
-
- avcodec_get_frame_defaults(&s->picture);
- avctx->coded_frame = &s->picture;
-
- return 0;
-}
-
static int bmp_decode_frame(AVCodecContext *avctx,
void *data, int *got_frame,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
- BMPContext *s = avctx->priv_data;
- AVFrame *picture = data;
- AVFrame *p = &s->picture;
+ AVFrame *p = data;
unsigned int fsize, hsize;
int width, height;
unsigned int depth;
return AVERROR_INVALIDDATA;
}
- if (p->data[0])
- avctx->release_buffer(avctx, p);
-
- p->reference = 0;
- if ((ret = ff_get_buffer(avctx, p)) < 0) {
+ if ((ret = ff_get_buffer(avctx, p, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
}
}
- *picture = s->picture;
*got_frame = 1;
return buf_size;
}
-static av_cold int bmp_decode_end(AVCodecContext *avctx)
-{
- BMPContext* c = avctx->priv_data;
-
- if (c->picture.data[0])
- avctx->release_buffer(avctx, &c->picture);
-
- return 0;
-}
-
AVCodec ff_bmp_decoder = {
.name = "bmp",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_BMP,
- .priv_data_size = sizeof(BMPContext),
- .init = bmp_decode_init,
- .close = bmp_decode_end,
.decode = bmp_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("BMP (Windows and OS/2 bitmap)"),
typedef struct BMVDecContext {
AVCodecContext *avctx;
- AVFrame pic;
uint8_t *frame, frame_base[SCREEN_WIDE * (SCREEN_HIGH + 1)];
uint32_t pal[256];
AVPacket *pkt)
{
BMVDecContext * const c = avctx->priv_data;
+ AVFrame *frame = data;
int type, scr_off;
int i, ret;
uint8_t *srcptr, *outptr;
scr_off = 0;
}
- if (c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
-
- c->pic.reference = 3;
- if ((ret = ff_get_buffer(avctx, &c->pic)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
return AVERROR_INVALIDDATA;
}
- memcpy(c->pic.data[1], c->pal, AVPALETTE_SIZE);
- c->pic.palette_has_changed = type & BMV_PALETTE;
+ memcpy(frame->data[1], c->pal, AVPALETTE_SIZE);
+ frame->palette_has_changed = type & BMV_PALETTE;
- outptr = c->pic.data[0];
+ outptr = frame->data[0];
srcptr = c->frame;
for (i = 0; i < avctx->height; i++) {
memcpy(outptr, srcptr, avctx->width);
srcptr += avctx->width;
- outptr += c->pic.linesize[0];
+ outptr += frame->linesize[0];
}
*got_frame = 1;
- *(AVFrame*)data = c->pic;
/* always report that the buffer was completely consumed */
return pkt->size;
return 0;
}
-static av_cold int decode_end(AVCodecContext *avctx)
-{
- BMVDecContext *c = avctx->priv_data;
-
- if (c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
-
- return 0;
-}
-
static const int bmv_aud_mults[16] = {
16512, 8256, 4128, 2064, 1032, 516, 258, 192, 129, 88, 64, 56, 48, 40, 36, 32
};
/* get output buffer */
frame->nb_samples = total_blocks * 32;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
.id = AV_CODEC_ID_BMV_VIDEO,
.priv_data_size = sizeof(BMVDecContext),
.init = decode_init,
- .close = decode_end,
.decode = decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Discworld II BMV video"),
#include "avcodec.h"
#include "bytestream.h"
+#include "internal.h"
typedef struct {
AVFrame pictures[2];
{
C93DecoderContext * const c93 = avctx->priv_data;
- if (c93->pictures[0].data[0])
- avctx->release_buffer(avctx, &c93->pictures[0]);
- if (c93->pictures[1].data[0])
- avctx->release_buffer(avctx, &c93->pictures[1]);
+ av_frame_unref(&c93->pictures[0]);
+ av_frame_unref(&c93->pictures[1]);
+
return 0;
}
C93DecoderContext * const c93 = avctx->priv_data;
AVFrame * const newpic = &c93->pictures[c93->currentpic];
AVFrame * const oldpic = &c93->pictures[c93->currentpic^1];
- AVFrame *picture = data;
GetByteContext gb;
uint8_t *out;
int stride, ret, i, x, y, b, bt = 0;
c93->currentpic ^= 1;
- newpic->reference = 1;
- newpic->buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE |
- FF_BUFFER_HINTS_REUSABLE | FF_BUFFER_HINTS_READABLE;
- if ((ret = avctx->reget_buffer(avctx, newpic)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, newpic)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
memcpy(newpic->data[1], oldpic->data[1], 256 * 4);
}
- *picture = *newpic;
+ if ((ret = av_frame_ref(data, newpic)) < 0)
+ return ret;
*got_frame = 1;
return buf_size;
h->avctx = avctx;
avctx->pix_fmt= AV_PIX_FMT_YUV420P;
- h->cur.f = avcodec_alloc_frame();
- h->DPB[0].f = avcodec_alloc_frame();
- h->DPB[1].f = avcodec_alloc_frame();
+ h->cur.f = av_frame_alloc();
+ h->DPB[0].f = av_frame_alloc();
+ h->DPB[1].f = av_frame_alloc();
if (!h->cur.f || !h->DPB[0].f || !h->DPB[1].f) {
ff_cavs_end(avctx);
return AVERROR(ENOMEM);
av_cold int ff_cavs_end(AVCodecContext *avctx) {
AVSContext *h = avctx->priv_data;
- if (h->cur.f->data[0])
- avctx->release_buffer(avctx, h->cur.f);
- if (h->DPB[0].f->data[0])
- avctx->release_buffer(avctx, h->DPB[0].f);
- if (h->DPB[1].f->data[0])
- avctx->release_buffer(avctx, h->DPB[1].f);
- avcodec_free_frame(&h->cur.f);
- avcodec_free_frame(&h->DPB[0].f);
- avcodec_free_frame(&h->DPB[1].f);
+ av_frame_free(&h->cur.f);
+ av_frame_free(&h->DPB[0].f);
+ av_frame_free(&h->DPB[1].f);
av_free(h->top_qp);
av_free(h->top_mv[0]);
int skip_count = -1;
enum cavs_mb mb_type;
+ av_frame_unref(h->cur.f);
+
skip_bits(&h->gb, 16);//bbv_dwlay
if (h->stc == PIC_PB_START_CODE) {
h->cur.f->pict_type = get_bits(&h->gb, 2) + AV_PICTURE_TYPE_I;
if (h->stream_revision > 0)
skip_bits(&h->gb, 1); //marker_bit
}
- /* release last B frame */
- if (h->cur.f->data[0])
- h->avctx->release_buffer(h->avctx, h->cur.f);
- ff_get_buffer(h->avctx, h->cur.f);
+ ff_get_buffer(h->avctx, h->cur.f, h->cur.f->pict_type == AV_PICTURE_TYPE_B ?
+ 0 : AV_GET_BUFFER_FLAG_REF);
if (!h->edge_emu_buffer) {
int alloc_size = FFALIGN(FFABS(h->cur.f->linesize[0]) + 32, 32);
} while (ff_cavs_next_mb(h));
}
if (h->cur.f->pict_type != AV_PICTURE_TYPE_B) {
- if (h->DPB[1].f->data[0])
- h->avctx->release_buffer(h->avctx, h->DPB[1].f);
+ av_frame_unref(h->DPB[1].f);
FFSWAP(AVSFrame, h->cur, h->DPB[1]);
FFSWAP(AVSFrame, h->DPB[0], h->DPB[1]);
}
AVSContext *h = avctx->priv_data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
- AVFrame *picture = data;
uint32_t stc = -1;
- int input_size;
+ int input_size, ret;
const uint8_t *buf_end;
const uint8_t *buf_ptr;
if (buf_size == 0) {
if (!h->low_delay && h->DPB[0].f->data[0]) {
*got_frame = 1;
- *picture = *h->DPB[0].f;
- if (h->cur.f->data[0])
- avctx->release_buffer(avctx, h->cur.f);
- FFSWAP(AVSFrame, h->cur, h->DPB[0]);
+ av_frame_move_ref(data, h->DPB[0].f);
}
return 0;
}
break;
case PIC_I_START_CODE:
if (!h->got_keyframe) {
- if(h->DPB[0].f->data[0])
- avctx->release_buffer(avctx, h->DPB[0].f);
- if(h->DPB[1].f->data[0])
- avctx->release_buffer(avctx, h->DPB[1].f);
+ av_frame_unref(h->DPB[0].f);
+ av_frame_unref(h->DPB[1].f);
h->got_keyframe = 1;
}
case PIC_PB_START_CODE:
*got_frame = 1;
if (h->cur.f->pict_type != AV_PICTURE_TYPE_B) {
if (h->DPB[1].f->data[0]) {
- *picture = *h->DPB[1].f;
+ if ((ret = av_frame_ref(data, h->DPB[1].f)) < 0)
+ return ret;
} else {
*got_frame = 0;
}
- } else
- *picture = *h->cur.f;
+ } else {
+ av_frame_move_ref(data, h->cur.f);
+ }
break;
case EXT_START_CODE:
//mpeg_decode_extension(avctx, buf_ptr, input_size);
#define CDG_PALETTE_SIZE 16
typedef struct CDGraphicsContext {
- AVFrame frame;
+ AVFrame *frame;
int hscroll;
int vscroll;
} CDGraphicsContext;
-static void cdg_init_frame(AVFrame *frame)
-{
- avcodec_get_frame_defaults(frame);
- frame->reference = 3;
- frame->buffer_hints = FF_BUFFER_HINTS_VALID |
- FF_BUFFER_HINTS_READABLE |
- FF_BUFFER_HINTS_PRESERVE |
- FF_BUFFER_HINTS_REUSABLE;
-}
-
static av_cold int cdg_decode_init(AVCodecContext *avctx)
{
CDGraphicsContext *cc = avctx->priv_data;
- cdg_init_frame(&cc->frame);
+ cc->frame = av_frame_alloc();
+ if (!cc->frame)
+ return AVERROR(ENOMEM);
avctx->width = CDG_FULL_WIDTH;
avctx->height = CDG_FULL_HEIGHT;
static void cdg_border_preset(CDGraphicsContext *cc, uint8_t *data)
{
int y;
- int lsize = cc->frame.linesize[0];
- uint8_t *buf = cc->frame.data[0];
+ int lsize = cc->frame->linesize[0];
+ uint8_t *buf = cc->frame->data[0];
int color = data[0] & 0x0F;
if (!(data[1] & 0x0F)) {
uint16_t color;
int i;
int array_offset = low ? 0 : 8;
- uint32_t *palette = (uint32_t *) cc->frame.data[1];
+ uint32_t *palette = (uint32_t *) cc->frame->data[1];
for (i = 0; i < 8; i++) {
color = (data[2 * i] << 6) + (data[2 * i + 1] & 0x3F);
b = ((color ) & 0x000F) * 17;
palette[i + array_offset] = r << 16 | g << 8 | b;
}
- cc->frame.palette_has_changed = 1;
+ cc->frame->palette_has_changed = 1;
}
static int cdg_tile_block(CDGraphicsContext *cc, uint8_t *data, int b)
int color;
int x, y;
int ai;
- int stride = cc->frame.linesize[0];
- uint8_t *buf = cc->frame.data[0];
+ int stride = cc->frame->linesize[0];
+ uint8_t *buf = cc->frame->data[0];
ri = (data[2] & 0x1F) * CDG_TILE_HEIGHT + cc->vscroll;
ci = (data[3] & 0x3F) * CDG_TILE_WIDTH + cc->hscroll;
int color;
int hscmd, h_off, hinc, vscmd, v_off, vinc;
int y;
- int stride = cc->frame.linesize[0];
- uint8_t *in = cc->frame.data[0];
+ int stride = cc->frame->linesize[0];
+ uint8_t *in = cc->frame->data[0];
uint8_t *out = new_frame->data[0];
color = data[0] & 0x0F;
if (!hinc && !vinc)
return;
- memcpy(new_frame->data[1], cc->frame.data[1], CDG_PALETTE_SIZE * 4);
+ memcpy(new_frame->data[1], cc->frame->data[1], CDG_PALETTE_SIZE * 4);
for (y = FFMAX(0, vinc); y < FFMIN(CDG_FULL_HEIGHT + vinc, CDG_FULL_HEIGHT); y++)
memcpy(out + FFMAX(0, hinc) + stride * y,
int ret;
uint8_t command, inst;
uint8_t cdg_data[CDG_DATA_SIZE];
- AVFrame new_frame;
+ AVFrame *frame = data;
CDGraphicsContext *cc = avctx->priv_data;
if (buf_size < CDG_MINIMUM_PKT_SIZE) {
return AVERROR(EINVAL);
}
- ret = avctx->reget_buffer(avctx, &cc->frame);
+ ret = ff_reget_buffer(avctx, cc->frame);
if (ret) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
if (!avctx->frame_number)
- memset(cc->frame.data[0], 0, cc->frame.linesize[0] * avctx->height);
+ memset(cc->frame->data[0], 0, cc->frame->linesize[0] * avctx->height);
command = bytestream_get_byte(&buf);
inst = bytestream_get_byte(&buf);
switch (inst) {
case CDG_INST_MEMORY_PRESET:
if (!(cdg_data[1] & 0x0F))
- memset(cc->frame.data[0], cdg_data[0] & 0x0F,
- cc->frame.linesize[0] * CDG_FULL_HEIGHT);
+ memset(cc->frame->data[0], cdg_data[0] & 0x0F,
+ cc->frame->linesize[0] * CDG_FULL_HEIGHT);
break;
case CDG_INST_LOAD_PAL_LO:
case CDG_INST_LOAD_PAL_HIGH:
return AVERROR(EINVAL);
}
- cdg_init_frame(&new_frame);
- ret = ff_get_buffer(avctx, &new_frame);
+ ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
if (ret) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
- cdg_scroll(cc, cdg_data, &new_frame, inst == CDG_INST_SCROLL_COPY);
- avctx->release_buffer(avctx, &cc->frame);
- cc->frame = new_frame;
+ cdg_scroll(cc, cdg_data, frame, inst == CDG_INST_SCROLL_COPY);
+ av_frame_unref(cc->frame);
+ ret = av_frame_ref(cc->frame, frame);
+ if (ret < 0)
+ return ret;
break;
default:
break;
}
+ if (!frame->data[0]) {
+ ret = av_frame_ref(frame, cc->frame);
+ if (ret < 0)
+ return ret;
+ }
*got_frame = 1;
} else {
*got_frame = 0;
buf_size = 0;
}
- *(AVFrame *) data = cc->frame;
return buf_size;
}
{
CDGraphicsContext *cc = avctx->priv_data;
- if (cc->frame.data[0])
- avctx->release_buffer(avctx, &cc->frame);
+ av_frame_free(&cc->frame);
return 0;
}
{
CDXLVideoContext *c = avctx->priv_data;
- avcodec_get_frame_defaults(&c->frame);
c->new_video_size = 0;
c->avctx = avctx;
}
}
-static void cdxl_decode_rgb(CDXLVideoContext *c)
+static void cdxl_decode_rgb(CDXLVideoContext *c, AVFrame *frame)
{
- uint32_t *new_palette = (uint32_t *)c->frame.data[1];
+ uint32_t *new_palette = (uint32_t *)frame->data[1];
import_palette(c, new_palette);
- import_format(c, c->frame.linesize[0], c->frame.data[0]);
+ import_format(c, frame->linesize[0], frame->data[0]);
}
-static void cdxl_decode_ham6(CDXLVideoContext *c)
+static void cdxl_decode_ham6(CDXLVideoContext *c, AVFrame *frame)
{
AVCodecContext *avctx = c->avctx;
uint32_t new_palette[16], r, g, b;
int x, y;
ptr = c->new_video;
- out = c->frame.data[0];
+ out = frame->data[0];
import_palette(c, new_palette);
import_format(c, avctx->width, c->new_video);
}
AV_WL24(out + x * 3, r | g | b);
}
- out += c->frame.linesize[0];
+ out += frame->linesize[0];
}
}
-static void cdxl_decode_ham8(CDXLVideoContext *c)
+static void cdxl_decode_ham8(CDXLVideoContext *c, AVFrame *frame)
{
AVCodecContext *avctx = c->avctx;
uint32_t new_palette[64], r, g, b;
int x, y;
ptr = c->new_video;
- out = c->frame.data[0];
+ out = frame->data[0];
import_palette(c, new_palette);
import_format(c, avctx->width, c->new_video);
}
AV_WL24(out + x * 3, r | g | b);
}
- out += c->frame.linesize[0];
+ out += frame->linesize[0];
}
}
int *got_frame, AVPacket *pkt)
{
CDXLVideoContext *c = avctx->priv_data;
- AVFrame * const p = &c->frame;
+ AVFrame * const p = data;
int ret, w, h, encoding, aligned_width, buf_size = pkt->size;
const uint8_t *buf = pkt->data;
return AVERROR_PATCHWELCOME;
}
- if (p->data[0])
- avctx->release_buffer(avctx, p);
-
- p->reference = 0;
- if ((ret = ff_get_buffer(avctx, p)) < 0) {
+ if ((ret = ff_get_buffer(avctx, p, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
if (!c->new_video)
return AVERROR(ENOMEM);
if (c->bpp == 8)
- cdxl_decode_ham8(c);
+ cdxl_decode_ham8(c, p);
else
- cdxl_decode_ham6(c);
+ cdxl_decode_ham6(c, p);
} else {
- cdxl_decode_rgb(c);
+ cdxl_decode_rgb(c, p);
}
*got_frame = 1;
- *(AVFrame*)data = c->frame;
return buf_size;
}
CDXLVideoContext *c = avctx->priv_data;
av_free(c->new_video);
- if (c->frame.data[0])
- avctx->release_buffer(avctx, &c->frame);
return 0;
}
#include "libavutil/common.h"
#include "libavutil/intreadwrite.h"
#include "avcodec.h"
+#include "internal.h"
typedef struct {
s->data = buf;
s->size = buf_size;
- s->frame.reference = 1;
- s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE |
- FF_BUFFER_HINTS_REUSABLE;
- if ((ret = avctx->reget_buffer(avctx, &s->frame))) {
+ if ((ret = ff_reget_buffer(avctx, &s->frame))) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
if (s->palette_video)
memcpy (s->frame.data[1], s->pal, AVPALETTE_SIZE);
+ if ((ret = av_frame_ref(data, &s->frame)) < 0)
+ return ret;
+
*got_frame = 1;
- *(AVFrame*)data = s->frame;
/* report that the buffer was completely consumed */
return buf_size;
{
CinepakContext *s = avctx->priv_data;
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+ av_frame_unref(&s->frame);
return 0;
}
#include "internal.h"
#include "put_bits.h"
-typedef struct CLJRContext {
- AVFrame picture;
-} CLJRContext;
-
-static av_cold int common_init(AVCodecContext *avctx)
-{
- CLJRContext * const a = avctx->priv_data;
-
- avctx->coded_frame = &a->picture;
-
- return 0;
-}
-
#if CONFIG_CLJR_DECODER
static int decode_frame(AVCodecContext *avctx,
void *data, int *got_frame,
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
- CLJRContext * const a = avctx->priv_data;
GetBitContext gb;
- AVFrame *picture = data;
- AVFrame * const p = &a->picture;
+ AVFrame * const p = data;
int x, y, ret;
- if (p->data[0])
- avctx->release_buffer(avctx, p);
-
if (avctx->height <= 0 || avctx->width <= 0) {
av_log(avctx, AV_LOG_ERROR, "Invalid width or height\n");
return AVERROR_INVALIDDATA;
return AVERROR_INVALIDDATA;
}
- p->reference = 0;
- if ((ret = ff_get_buffer(avctx, p)) < 0) {
+ if ((ret = ff_get_buffer(avctx, p, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
init_get_bits(&gb, buf, buf_size * 8);
for (y = 0; y < avctx->height; y++) {
- uint8_t *luma = &a->picture.data[0][y * a->picture.linesize[0]];
- uint8_t *cb = &a->picture.data[1][y * a->picture.linesize[1]];
- uint8_t *cr = &a->picture.data[2][y * a->picture.linesize[2]];
+ uint8_t *luma = &p->data[0][y * p->linesize[0]];
+ uint8_t *cb = &p->data[1][y * p->linesize[1]];
+ uint8_t *cr = &p->data[2][y * p->linesize[2]];
for (x = 0; x < avctx->width; x += 4) {
luma[3] = get_bits(&gb, 5) << 3;
luma[2] = get_bits(&gb, 5) << 3;
}
}
- *picture = a->picture;
*got_frame = 1;
return buf_size;
static av_cold int decode_init(AVCodecContext *avctx)
{
avctx->pix_fmt = AV_PIX_FMT_YUV411P;
- return common_init(avctx);
-}
-
-static av_cold int decode_end(AVCodecContext *avctx)
-{
- CLJRContext *a = avctx->priv_data;
-
- if (a->picture.data[0])
- avctx->release_buffer(avctx, &a->picture);
return 0;
}
.name = "cljr",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_CLJR,
- .priv_data_size = sizeof(CLJRContext),
.init = decode_init,
- .close = decode_end,
.decode = decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Cirrus Logic AccuPak"),
#endif
#if CONFIG_CLJR_ENCODER
+typedef struct CLJRContext {
+ AVFrame picture;
+} CLJRContext;
+
+static av_cold int encode_init(AVCodecContext *avctx)
+{
+ CLJRContext * const a = avctx->priv_data;
+
+ avctx->coded_frame = &a->picture;
+
+ return 0;
+}
+
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *p, int *got_packet)
{
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_CLJR,
.priv_data_size = sizeof(CLJRContext),
- .init = common_init,
+ .init = encode_init,
.encode2 = encode_frame,
.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV411P,
AV_PIX_FMT_NONE },
int *got_picture_ptr, AVPacket *avpkt)
{
CLLCContext *ctx = avctx->priv_data;
- AVFrame *pic = avctx->coded_frame;
+ AVFrame *pic = data;
uint8_t *src = avpkt->data;
uint32_t info_tag, info_offset;
int data_size;
GetBitContext gb;
int coding_type, ret;
- if (pic->data[0])
- avctx->release_buffer(avctx, pic);
-
- pic->reference = 0;
-
/* Skip the INFO header if present */
info_offset = 0;
info_tag = AV_RL32(src);
avctx->pix_fmt = AV_PIX_FMT_RGB24;
avctx->bits_per_raw_sample = 8;
- ret = ff_get_buffer(avctx, pic);
+ ret = ff_get_buffer(avctx, pic, 0);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Could not allocate buffer.\n");
return ret;
avctx->pix_fmt = AV_PIX_FMT_ARGB;
avctx->bits_per_raw_sample = 8;
- ret = ff_get_buffer(avctx, pic);
+ ret = ff_get_buffer(avctx, pic, 0);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Could not allocate buffer.\n");
return ret;
pic->pict_type = AV_PICTURE_TYPE_I;
*got_picture_ptr = 1;
- *(AVFrame *)data = *pic;
return avpkt->size;
}
{
CLLCContext *ctx = avctx->priv_data;
- if (avctx->coded_frame->data[0])
- avctx->release_buffer(avctx, avctx->coded_frame);
-
- av_freep(&avctx->coded_frame);
av_freep(&ctx->swapped_buf);
return 0;
ff_dsputil_init(&ctx->dsp, avctx);
- avctx->coded_frame = avcodec_alloc_frame();
- if (!avctx->coded_frame) {
- av_log(avctx, AV_LOG_ERROR, "Could not allocate frame.\n");
- return AVERROR(ENOMEM);
- }
-
return 0;
}
p->excitation, avctx->frame_size, p->order);
frame->nb_samples = avctx->frame_size;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
/* get output buffer */
if (q->discarded_packets >= 2) {
frame->nb_samples = q->samples_per_channel;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
#include "libavutil/lzo.h"
typedef struct {
- AVFrame pic;
int linelen, height, bpp;
unsigned int decomp_size;
unsigned char* decomp_buf;
return AVERROR_INVALIDDATA;
}
- if (c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
- c->pic.reference = 1;
- c->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_READABLE |
- FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
- if ((ret = ff_get_buffer(avctx, &c->pic)) < 0) {
+ if ((ret = ff_get_buffer(avctx, picture, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
// flip upside down, add difference frame
if (buf[0] & 1) { // keyframe
- c->pic.pict_type = AV_PICTURE_TYPE_I;
- c->pic.key_frame = 1;
+ picture->pict_type = AV_PICTURE_TYPE_I;
+ picture->key_frame = 1;
switch (c->bpp) {
case 16:
- copy_frame_16(&c->pic, c->decomp_buf, c->linelen, c->height);
+ copy_frame_16(picture, c->decomp_buf, c->linelen, c->height);
break;
case 32:
- copy_frame_32(&c->pic, c->decomp_buf, c->linelen, c->height);
+ copy_frame_32(picture, c->decomp_buf, c->linelen, c->height);
break;
default:
- copy_frame_default(&c->pic, c->decomp_buf, FFALIGN(c->linelen, 4),
+ copy_frame_default(picture, c->decomp_buf, FFALIGN(c->linelen, 4),
c->linelen, c->height);
}
} else {
- c->pic.pict_type = AV_PICTURE_TYPE_P;
- c->pic.key_frame = 0;
+ picture->pict_type = AV_PICTURE_TYPE_P;
+ picture->key_frame = 0;
switch (c->bpp) {
case 16:
- add_frame_16(&c->pic, c->decomp_buf, c->linelen, c->height);
+ add_frame_16(picture, c->decomp_buf, c->linelen, c->height);
break;
case 32:
- add_frame_32(&c->pic, c->decomp_buf, c->linelen, c->height);
+ add_frame_32(picture, c->decomp_buf, c->linelen, c->height);
break;
default:
- add_frame_default(&c->pic, c->decomp_buf, FFALIGN(c->linelen, 4),
+ add_frame_default(picture, c->decomp_buf, FFALIGN(c->linelen, 4),
c->linelen, c->height);
}
}
- *picture = c->pic;
*got_frame = 1;
return buf_size;
}
return AVERROR_INVALIDDATA;
}
c->bpp = avctx->bits_per_coded_sample;
- c->pic.data[0] = NULL;
c->linelen = avctx->width * avctx->bits_per_coded_sample / 8;
c->height = avctx->height;
stride = c->linelen;
static av_cold int decode_end(AVCodecContext *avctx) {
CamStudioContext *c = avctx->priv_data;
av_freep(&c->decomp_buf);
- if (c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
return 0;
}
typedef struct CyuvDecodeContext {
AVCodecContext *avctx;
int width, height;
- AVFrame frame;
} CyuvDecodeContext;
static av_cold int cyuv_decode_init(AVCodecContext *avctx)
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
CyuvDecodeContext *s=avctx->priv_data;
+ AVFrame *frame = data;
unsigned char *y_plane;
unsigned char *u_plane;
/* pixel data starts 48 bytes in, after 3x16-byte tables */
stream_ptr = 48;
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
-
- s->frame.buffer_hints = FF_BUFFER_HINTS_VALID;
- s->frame.reference = 0;
- if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
- y_plane = s->frame.data[0];
- u_plane = s->frame.data[1];
- v_plane = s->frame.data[2];
+ y_plane = frame->data[0];
+ u_plane = frame->data[1];
+ v_plane = frame->data[2];
/* iterate through each line in the height */
for (y_ptr = 0, u_ptr = 0, v_ptr = 0;
- y_ptr < (s->height * s->frame.linesize[0]);
- y_ptr += s->frame.linesize[0] - s->width,
- u_ptr += s->frame.linesize[1] - s->width / 4,
- v_ptr += s->frame.linesize[2] - s->width / 4) {
+ y_ptr < (s->height * frame->linesize[0]);
+ y_ptr += frame->linesize[0] - s->width,
+ u_ptr += frame->linesize[1] - s->width / 4,
+ v_ptr += frame->linesize[2] - s->width / 4) {
/* reset predictors */
cur_byte = buf[stream_ptr++];
}
*got_frame = 1;
- *(AVFrame*)data= s->frame;
return buf_size;
}
-static av_cold int cyuv_decode_end(AVCodecContext *avctx)
-{
- CyuvDecodeContext *s = avctx->priv_data;
-
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
-
- return 0;
-}
-
#if CONFIG_AURA_DECODER
AVCodec ff_aura_decoder = {
.name = "aura",
.id = AV_CODEC_ID_AURA,
.priv_data_size = sizeof(CyuvDecodeContext),
.init = cyuv_decode_init,
- .close = cyuv_decode_end,
.decode = cyuv_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Auravision AURA"),
.id = AV_CODEC_ID_CYUV,
.priv_data_size = sizeof(CyuvDecodeContext),
.init = cyuv_decode_init,
- .close = cyuv_decode_end,
.decode = cyuv_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Creative YUV (CYUV)"),
/* get output buffer */
frame->nb_samples = 256 * (s->sample_blocks / 8);
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
#include "libavutil/mem.h"
typedef struct DfaContext {
- AVFrame pic;
-
uint32_t pal[256];
uint8_t *frame_buf;
} DfaContext;
void *data, int *got_frame,
AVPacket *avpkt)
{
+ AVFrame *frame = data;
DfaContext *s = avctx->priv_data;
GetByteContext gb;
const uint8_t *buf = avpkt->data;
int ret;
int i, pal_elems;
- if (s->pic.data[0])
- avctx->release_buffer(avctx, &s->pic);
-
- if ((ret = ff_get_buffer(avctx, &s->pic))) {
+ if ((ret = ff_get_buffer(avctx, frame, 0))) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
s->pal[i] = bytestream2_get_be24(&gb) << 2;
s->pal[i] |= (s->pal[i] >> 6) & 0x333;
}
- s->pic.palette_has_changed = 1;
+ frame->palette_has_changed = 1;
} else if (chunk_type <= 9) {
if (decoder[chunk_type - 2](&gb, s->frame_buf, avctx->width, avctx->height)) {
av_log(avctx, AV_LOG_ERROR, "Error decoding %s chunk\n",
}
buf = s->frame_buf;
- dst = s->pic.data[0];
+ dst = frame->data[0];
for (i = 0; i < avctx->height; i++) {
memcpy(dst, buf, avctx->width);
- dst += s->pic.linesize[0];
+ dst += frame->linesize[0];
buf += avctx->width;
}
- memcpy(s->pic.data[1], s->pal, sizeof(s->pal));
+ memcpy(frame->data[1], s->pal, sizeof(s->pal));
*got_frame = 1;
- *(AVFrame*)data = s->pic;
return avpkt->size;
}
{
DfaContext *s = avctx->priv_data;
- if (s->pic.data[0])
- avctx->release_buffer(avctx, &s->pic);
-
av_freep(&s->frame_buf);
return 0;
typedef struct DNXHDContext {
AVCodecContext *avctx;
- AVFrame picture;
GetBitContext gb;
int cid; ///< compression id
unsigned int width, height;
DNXHDContext *ctx = avctx->priv_data;
ctx->avctx = avctx;
- avctx->coded_frame = &ctx->picture;
- ctx->picture.type = AV_PICTURE_TYPE_I;
- ctx->picture.key_frame = 1;
return 0;
}
return 0;
}
-static int dnxhd_decode_header(DNXHDContext *ctx, const uint8_t *buf, int buf_size, int first_field)
+static int dnxhd_decode_header(DNXHDContext *ctx, AVFrame *frame,
+ const uint8_t *buf, int buf_size, int first_field)
{
static const uint8_t header_prefix[] = { 0x00, 0x00, 0x02, 0x80, 0x01 };
int i, cid;
}
if (buf[5] & 2) { /* interlaced */
ctx->cur_field = buf[5] & 1;
- ctx->picture.interlaced_frame = 1;
- ctx->picture.top_field_first = first_field ^ ctx->cur_field;
+ frame->interlaced_frame = 1;
+ frame->top_field_first = first_field ^ ctx->cur_field;
av_log(ctx->avctx, AV_LOG_DEBUG, "interlaced %d, cur field %d\n", buf[5] & 3, ctx->cur_field);
}
av_dlog(ctx->avctx, "mb width %d, mb height %d\n", ctx->mb_width, ctx->mb_height);
- if ((ctx->height+15)>>4 == ctx->mb_height && ctx->picture.interlaced_frame)
+ if ((ctx->height+15)>>4 == ctx->mb_height && frame->interlaced_frame)
ctx->height <<= 1;
if (ctx->mb_height > 68 ||
- (ctx->mb_height<<ctx->picture.interlaced_frame) > (ctx->height+15)>>4) {
+ (ctx->mb_height << frame->interlaced_frame) > (ctx->height+15)>>4) {
av_log(ctx->avctx, AV_LOG_ERROR, "mb height too big: %d\n", ctx->mb_height);
return -1;
}
dnxhd_decode_dct_block(ctx, block, n, qscale, 6, 8, 4);
}
-static int dnxhd_decode_macroblock(DNXHDContext *ctx, int x, int y)
+static int dnxhd_decode_macroblock(DNXHDContext *ctx, AVFrame *frame, int x, int y)
{
int shift1 = ctx->bit_depth == 10;
- int dct_linesize_luma = ctx->picture.linesize[0];
- int dct_linesize_chroma = ctx->picture.linesize[1];
+ int dct_linesize_luma = frame->linesize[0];
+ int dct_linesize_chroma = frame->linesize[1];
uint8_t *dest_y, *dest_u, *dest_v;
int dct_y_offset, dct_x_offset;
int qscale, i;
ctx->decode_dct_block(ctx, ctx->blocks[i], i, qscale);
}
- if (ctx->picture.interlaced_frame) {
+ if (frame->interlaced_frame) {
dct_linesize_luma <<= 1;
dct_linesize_chroma <<= 1;
}
- dest_y = ctx->picture.data[0] + ((y * dct_linesize_luma) << 4) + (x << (4 + shift1));
- dest_u = ctx->picture.data[1] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1));
- dest_v = ctx->picture.data[2] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1));
+ dest_y = frame->data[0] + ((y * dct_linesize_luma) << 4) + (x << (4 + shift1));
+ dest_u = frame->data[1] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1));
+ dest_v = frame->data[2] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1));
if (ctx->cur_field) {
- dest_y += ctx->picture.linesize[0];
- dest_u += ctx->picture.linesize[1];
- dest_v += ctx->picture.linesize[2];
+ dest_y += frame->linesize[0];
+ dest_u += frame->linesize[1];
+ dest_v += frame->linesize[2];
}
dct_y_offset = dct_linesize_luma << 3;
return 0;
}
-static int dnxhd_decode_macroblocks(DNXHDContext *ctx, const uint8_t *buf, int buf_size)
+static int dnxhd_decode_macroblocks(DNXHDContext *ctx, AVFrame *frame,
+ const uint8_t *buf, int buf_size)
{
int x, y;
for (y = 0; y < ctx->mb_height; y++) {
init_get_bits(&ctx->gb, buf + ctx->mb_scan_index[y], (buf_size - ctx->mb_scan_index[y]) << 3);
for (x = 0; x < ctx->mb_width; x++) {
//START_TIMER;
- dnxhd_decode_macroblock(ctx, x, y);
+ dnxhd_decode_macroblock(ctx, frame, x, y);
//STOP_TIMER("decode macroblock");
}
}
DNXHDContext *ctx = avctx->priv_data;
AVFrame *picture = data;
int first_field = 1;
+ int ret;
av_dlog(avctx, "frame size %d\n", buf_size);
decode_coding_unit:
- if (dnxhd_decode_header(ctx, buf, buf_size, first_field) < 0)
+ if (dnxhd_decode_header(ctx, picture, buf, buf_size, first_field) < 0)
return -1;
if ((avctx->width || avctx->height) &&
avcodec_set_dimensions(avctx, ctx->width, ctx->height);
if (first_field) {
- if (ctx->picture.data[0])
- avctx->release_buffer(avctx, &ctx->picture);
- if (ff_get_buffer(avctx, &ctx->picture) < 0) {
+ if ((ret = ff_get_buffer(avctx, picture, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
- return -1;
+ return ret;
}
+ picture->pict_type = AV_PICTURE_TYPE_I;
+ picture->key_frame = 1;
}
- dnxhd_decode_macroblocks(ctx, buf + 0x280, buf_size - 0x280);
+ dnxhd_decode_macroblocks(ctx, picture, buf + 0x280, buf_size - 0x280);
- if (first_field && ctx->picture.interlaced_frame) {
+ if (first_field && picture->interlaced_frame) {
buf += ctx->cid_table->coding_unit_size;
buf_size -= ctx->cid_table->coding_unit_size;
first_field = 0;
goto decode_coding_unit;
}
- *picture = ctx->picture;
*got_frame = 1;
return buf_size;
}
{
DNXHDContext *ctx = avctx->priv_data;
- if (ctx->picture.data[0])
- avctx->release_buffer(avctx, &ctx->picture);
ff_free_vlc(&ctx->ac_vlc);
ff_free_vlc(&ctx->dc_vlc);
ff_free_vlc(&ctx->run_vlc);
/* get output buffer */
frame->nb_samples = out / avctx->channels;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
#include "avcodec.h"
#include "internal.h"
-typedef struct DPXContext {
- AVFrame picture;
-} DPXContext;
-
-
static unsigned int read32(const uint8_t **ptr, int is_big)
{
unsigned int temp;
const uint8_t *buf = avpkt->data;
const uint8_t *buf_end = avpkt->data + avpkt->size;
int buf_size = avpkt->size;
- DPXContext *const s = avctx->priv_data;
- AVFrame *picture = data;
- AVFrame *const p = &s->picture;
+ AVFrame *const p = data;
uint8_t *ptr;
unsigned int offset;
return AVERROR_INVALIDDATA;
}
- if (s->picture.data[0])
- avctx->release_buffer(avctx, &s->picture);
if ((ret = av_image_check_size(w, h, 0, avctx)) < 0)
return ret;
if (w != avctx->width || h != avctx->height)
avcodec_set_dimensions(avctx, w, h);
- if ((ret = ff_get_buffer(avctx, p)) < 0) {
+ if ((ret = ff_get_buffer(avctx, p, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
break;
}
- *picture = s->picture;
*got_frame = 1;
return buf_size;
}
-static av_cold int decode_init(AVCodecContext *avctx)
-{
- DPXContext *s = avctx->priv_data;
- avcodec_get_frame_defaults(&s->picture);
- avctx->coded_frame = &s->picture;
- return 0;
-}
-
-static av_cold int decode_end(AVCodecContext *avctx)
-{
- DPXContext *s = avctx->priv_data;
- if (s->picture.data[0])
- avctx->release_buffer(avctx, &s->picture);
-
- return 0;
-}
-
AVCodec ff_dpx_decoder = {
.name = "dpx",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_DPX,
- .priv_data_size = sizeof(DPXContext),
- .init = decode_init,
- .close = decode_end,
.decode = decode_frame,
.long_name = NULL_IF_CONFIG_SMALL("DPX image"),
.capabilities = CODEC_CAP_DR1,
break;
}
- cin->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
- if (avctx->reget_buffer(avctx, &cin->frame)) {
+ if ((res = ff_reget_buffer(avctx, &cin->frame)) < 0) {
av_log(cin->avctx, AV_LOG_ERROR, "delphinecinvideo: reget_buffer() failed to allocate a frame\n");
- return -1;
+ return res;
}
memcpy(cin->frame.data[1], cin->palette, sizeof(cin->palette));
FFSWAP(uint8_t *, cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_table[CIN_PRE_BMP]);
+ if ((res = av_frame_ref(data, &cin->frame)) < 0)
+ return res;
+
*got_frame = 1;
- *(AVFrame *)data = cin->frame;
return buf_size;
}
CinVideoContext *cin = avctx->priv_data;
int i;
- if (cin->frame.data[0])
- avctx->release_buffer(avctx, &cin->frame);
+ av_frame_unref(&cin->frame);
for (i = 0; i < 3; ++i)
av_free(cin->bitmap_table[i]);
/* get output buffer */
frame->nb_samples = avpkt->size - cin->initial_decode_frame;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
return -1; /* NOTE: we only accept several full frames */
}
- if (s->picture.data[0])
- avctx->release_buffer(avctx, &s->picture);
-
- s->picture.reference = 0;
s->picture.key_frame = 1;
s->picture.pict_type = AV_PICTURE_TYPE_I;
avctx->pix_fmt = s->sys->pix_fmt;
avctx->time_base = s->sys->time_base;
avcodec_set_dimensions(avctx, s->sys->width, s->sys->height);
- if (ff_get_buffer(avctx, &s->picture) < 0) {
+ if (ff_get_buffer(avctx, &s->picture, 0) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
/* return image */
*got_frame = 1;
- *(AVFrame*)data = s->picture;
+ av_frame_move_ref(data, &s->picture);
/* Determine the codec's sample_aspect ratio from the packet */
vsc_pack = buf + 80*5 + 48 + 5;
{
DVVideoContext *s = c->priv_data;
- if (s->picture.data[0])
- c->release_buffer(c, &s->picture);
+ av_frame_unref(&s->picture);
return 0;
}
* Decoder context
*/
typedef struct DxaDecContext {
- AVFrame pic, prev;
+ AVFrame prev;
int dsize;
uint8_t *decomp_buf;
static const int shift1[6] = { 0, 8, 8, 8, 4, 4 };
static const int shift2[6] = { 0, 0, 8, 4, 0, 4 };
-static int decode_13(AVCodecContext *avctx, DxaDecContext *c, uint8_t* dst, uint8_t *src, uint8_t *ref)
+static int decode_13(AVCodecContext *avctx, DxaDecContext *c, uint8_t* dst,
+ int stride, uint8_t *src, uint8_t *ref)
{
uint8_t *code, *data, *mv, *msk, *tmp, *tmp2;
int i, j, k;
int type, x, y, d, d2;
- int stride = c->pic.linesize[0];
uint32_t mask;
code = src + 12;
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
{
+ AVFrame *frame = data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
DxaDecContext * const c = avctx->priv_data;
buf_size -= 768+4;
}
- if ((ret = ff_get_buffer(avctx, &c->pic)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
- memcpy(c->pic.data[1], c->pal, AVPALETTE_SIZE);
- c->pic.palette_has_changed = pc;
+ memcpy(frame->data[1], c->pal, AVPALETTE_SIZE);
+ frame->palette_has_changed = pc;
- outptr = c->pic.data[0];
+ outptr = frame->data[0];
srcptr = c->decomp_buf;
tmpptr = c->prev.data[0];
- stride = c->pic.linesize[0];
+ stride = frame->linesize[0];
if(buf[0]=='N' && buf[1]=='U' && buf[2]=='L' && buf[3]=='L')
compr = -1;
}
switch(compr){
case -1:
- c->pic.key_frame = 0;
- c->pic.pict_type = AV_PICTURE_TYPE_P;
+ frame->key_frame = 0;
+ frame->pict_type = AV_PICTURE_TYPE_P;
if(c->prev.data[0])
- memcpy(c->pic.data[0], c->prev.data[0], c->pic.linesize[0] * avctx->height);
+ memcpy(frame->data[0], c->prev.data[0], frame->linesize[0] * avctx->height);
else{ // Should happen only when first frame is 'NULL'
- memset(c->pic.data[0], 0, c->pic.linesize[0] * avctx->height);
- c->pic.key_frame = 1;
- c->pic.pict_type = AV_PICTURE_TYPE_I;
+ memset(frame->data[0], 0, frame->linesize[0] * avctx->height);
+ frame->key_frame = 1;
+ frame->pict_type = AV_PICTURE_TYPE_I;
}
break;
case 2:
case 3:
case 4:
case 5:
- c->pic.key_frame = !(compr & 1);
- c->pic.pict_type = (compr & 1) ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
+ frame->key_frame = !(compr & 1);
+ frame->pict_type = (compr & 1) ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
for(j = 0; j < avctx->height; j++){
if(compr & 1){
for(i = 0; i < avctx->width; i++)
break;
case 12: // ScummVM coding
case 13:
- c->pic.key_frame = 0;
- c->pic.pict_type = AV_PICTURE_TYPE_P;
- decode_13(avctx, c, c->pic.data[0], srcptr, c->prev.data[0]);
+ frame->key_frame = 0;
+ frame->pict_type = AV_PICTURE_TYPE_P;
+ decode_13(avctx, c, frame->data[0], frame->linesize[0], srcptr, c->prev.data[0]);
break;
default:
av_log(avctx, AV_LOG_ERROR, "Unknown/unsupported compression type %d\n", buf[4]);
return AVERROR_INVALIDDATA;
}
- FFSWAP(AVFrame, c->pic, c->prev);
- if(c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
+ av_frame_unref(&c->prev);
+ if ((ret = av_frame_ref(&c->prev, frame)) < 0)
+ return ret;
*got_frame = 1;
- *(AVFrame*)data = c->prev;
/* always report that the buffer was completely consumed */
return orig_buf_size;
DxaDecContext * const c = avctx->priv_data;
av_freep(&c->decomp_buf);
- if(c->prev.data[0])
- avctx->release_buffer(avctx, &c->prev);
- if(c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
+ av_frame_unref(&c->prev);
return 0;
}
static av_cold int decode_init(AVCodecContext *avctx)
{
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
- avctx->coded_frame = avcodec_alloc_frame();
- if (!avctx->coded_frame)
- return AVERROR(ENOMEM);
return 0;
}
AVPacket *avpkt)
{
int h, w;
- AVFrame *pic = avctx->coded_frame;
+ AVFrame *pic = data;
const uint8_t *src = avpkt->data;
uint8_t *Y1, *Y2, *U, *V;
int ret;
- if (pic->data[0])
- avctx->release_buffer(avctx, pic);
-
if (avpkt->size < avctx->width * avctx->height * 3 / 2 + 16) {
av_log(avctx, AV_LOG_ERROR, "packet too small\n");
return AVERROR_INVALIDDATA;
}
- pic->reference = 0;
- if ((ret = ff_get_buffer(avctx, pic)) < 0)
+ if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
return ret;
pic->pict_type = AV_PICTURE_TYPE_I;
}
*got_frame = 1;
- *(AVFrame*)data = *pic;
return avpkt->size;
}
-static av_cold int decode_close(AVCodecContext *avctx)
-{
- AVFrame *pic = avctx->coded_frame;
- if (pic->data[0])
- avctx->release_buffer(avctx, pic);
- av_freep(&avctx->coded_frame);
-
- return 0;
-}
-
AVCodec ff_dxtory_decoder = {
.name = "dxtory",
.long_name = NULL_IF_CONFIG_SMALL("Dxtory"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_DXTORY,
.init = decode_init,
- .close = decode_close,
.decode = decode_frame,
.capabilities = CODEC_CAP_DR1,
};
ff_dxva2_get_surface_index(ctx, r),
r->long_ref != 0);
- if ((r->f.reference & PICT_TOP_FIELD) && r->field_poc[0] != INT_MAX)
+ if ((r->reference & PICT_TOP_FIELD) && r->field_poc[0] != INT_MAX)
pp->FieldOrderCntList[i][0] = r->field_poc[0];
- if ((r->f.reference & PICT_BOTTOM_FIELD) && r->field_poc[1] != INT_MAX)
+ if ((r->reference & PICT_BOTTOM_FIELD) && r->field_poc[1] != INT_MAX)
pp->FieldOrderCntList[i][1] = r->field_poc[1];
pp->FrameNumList[i] = r->long_ref ? r->pic_id : r->frame_num;
- if (r->f.reference & PICT_TOP_FIELD)
+ if (r->reference & PICT_TOP_FIELD)
pp->UsedForReferenceFlags |= 1 << (2*i + 0);
- if (r->f.reference & PICT_BOTTOM_FIELD)
+ if (r->reference & PICT_BOTTOM_FIELD)
pp->UsedForReferenceFlags |= 1 << (2*i + 1);
} else {
pp->RefFrameList[i].bPicEntry = 0xff;
unsigned plane;
fill_picture_entry(&slice->RefPicList[list][i],
ff_dxva2_get_surface_index(ctx, r),
- r->f.reference == PICT_BOTTOM_FIELD);
+ r->reference == PICT_BOTTOM_FIELD);
for (plane = 0; plane < 3; plane++) {
int w, o;
if (plane == 0 && h->luma_weight_flag[list]) {
typedef struct CmvContext {
AVCodecContext *avctx;
- AVFrame frame; ///< current
- AVFrame last_frame; ///< last
- AVFrame last2_frame; ///< second-last
+ AVFrame *last_frame; ///< last
+ AVFrame *last2_frame; ///< second-last
int width, height;
unsigned int palette[AVPALETTE_COUNT];
} CmvContext;
CmvContext *s = avctx->priv_data;
s->avctx = avctx;
avctx->pix_fmt = AV_PIX_FMT_PAL8;
+
+ s->last_frame = av_frame_alloc();
+ s->last2_frame = av_frame_alloc();
+ if (!s->last_frame || !s->last2_frame) {
+ av_frame_free(&s->last_frame);
+ av_frame_free(&s->last2_frame);
+ return AVERROR(ENOMEM);
+ }
+
return 0;
}
-static void cmv_decode_intra(CmvContext * s, const uint8_t *buf, const uint8_t *buf_end){
- unsigned char *dst = s->frame.data[0];
+static void cmv_decode_intra(CmvContext * s, AVFrame *frame,
+ const uint8_t *buf, const uint8_t *buf_end)
+{
+ unsigned char *dst = frame->data[0];
int i;
for (i=0; i < s->avctx->height && buf_end - buf >= s->avctx->width; i++) {
memcpy(dst, buf, s->avctx->width);
- dst += s->frame.linesize[0];
+ dst += frame->linesize[0];
buf += s->avctx->width;
}
}
}
}
-static void cmv_decode_inter(CmvContext * s, const uint8_t *buf, const uint8_t *buf_end){
+static void cmv_decode_inter(CmvContext *s, AVFrame *frame, const uint8_t *buf,
+ const uint8_t *buf_end)
+{
const uint8_t *raw = buf + (s->avctx->width*s->avctx->height/16);
int x,y,i;
for(y=0; y<s->avctx->height/4; y++)
for(x=0; x<s->avctx->width/4 && buf_end - buf > i; x++) {
if (buf[i]==0xFF) {
- unsigned char *dst = s->frame.data[0] + (y*4)*s->frame.linesize[0] + x*4;
+ unsigned char *dst = frame->data[0] + (y*4)*frame->linesize[0] + x*4;
if (raw+16<buf_end && *raw==0xFF) { /* intra */
raw++;
memcpy(dst, raw, 4);
- memcpy(dst+s->frame.linesize[0], raw+4, 4);
- memcpy(dst+2*s->frame.linesize[0], raw+8, 4);
- memcpy(dst+3*s->frame.linesize[0], raw+12, 4);
+ memcpy(dst + frame->linesize[0], raw+4, 4);
+ memcpy(dst + 2 * frame->linesize[0], raw+8, 4);
+ memcpy(dst + 3 * frame->linesize[0], raw+12, 4);
raw+=16;
}else if(raw<buf_end) { /* inter using second-last frame as reference */
int xoffset = (*raw & 0xF) - 7;
int yoffset = ((*raw >> 4)) - 7;
- if (s->last2_frame.data[0])
- cmv_motcomp(s->frame.data[0], s->frame.linesize[0],
- s->last2_frame.data[0], s->last2_frame.linesize[0],
+ if (s->last2_frame->data[0])
+ cmv_motcomp(frame->data[0], frame->linesize[0],
+ s->last2_frame->data[0], s->last2_frame->linesize[0],
x*4, y*4, xoffset, yoffset, s->avctx->width, s->avctx->height);
raw++;
}
}else{ /* inter using last frame as reference */
int xoffset = (buf[i] & 0xF) - 7;
int yoffset = ((buf[i] >> 4)) - 7;
- cmv_motcomp(s->frame.data[0], s->frame.linesize[0],
- s->last_frame.data[0], s->last_frame.linesize[0],
+ cmv_motcomp(frame->data[0], frame->linesize[0],
+ s->last_frame->data[0], s->last_frame->linesize[0],
x*4, y*4, xoffset, yoffset, s->avctx->width, s->avctx->height);
}
i++;
int buf_size = avpkt->size;
CmvContext *s = avctx->priv_data;
const uint8_t *buf_end = buf + buf_size;
+ AVFrame *frame = data;
+ int ret;
if (buf_end - buf < EA_PREAMBLE_SIZE)
return AVERROR_INVALIDDATA;
if (av_image_check_size(s->width, s->height, 0, s->avctx))
return -1;
- /* shuffle */
- if (s->last2_frame.data[0])
- avctx->release_buffer(avctx, &s->last2_frame);
- FFSWAP(AVFrame, s->last_frame, s->last2_frame);
- FFSWAP(AVFrame, s->frame, s->last_frame);
-
- s->frame.reference = 1;
- s->frame.buffer_hints = FF_BUFFER_HINTS_VALID;
- if (ff_get_buffer(avctx, &s->frame)<0) {
+ if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
- return -1;
+ return ret;
}
- memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE);
+ memcpy(frame->data[1], s->palette, AVPALETTE_SIZE);
buf += EA_PREAMBLE_SIZE;
if ((buf[0]&1)) { // subtype
- cmv_decode_inter(s, buf+2, buf_end);
- s->frame.key_frame = 0;
- s->frame.pict_type = AV_PICTURE_TYPE_P;
+ cmv_decode_inter(s, frame, buf+2, buf_end);
+ frame->key_frame = 0;
+ frame->pict_type = AV_PICTURE_TYPE_P;
}else{
- s->frame.key_frame = 1;
- s->frame.pict_type = AV_PICTURE_TYPE_I;
- cmv_decode_intra(s, buf+2, buf_end);
+ frame->key_frame = 1;
+ frame->pict_type = AV_PICTURE_TYPE_I;
+ cmv_decode_intra(s, frame, buf+2, buf_end);
}
+ av_frame_unref(s->last2_frame);
+ av_frame_move_ref(s->last2_frame, s->last_frame);
+ if ((ret = av_frame_ref(s->last_frame, frame)) < 0)
+ return ret;
+
*got_frame = 1;
- *(AVFrame*)data = s->frame;
return buf_size;
}
static av_cold int cmv_decode_end(AVCodecContext *avctx){
CmvContext *s = avctx->priv_data;
- if (s->frame.data[0])
- s->avctx->release_buffer(avctx, &s->frame);
- if (s->last_frame.data[0])
- s->avctx->release_buffer(avctx, &s->last_frame);
- if (s->last2_frame.data[0])
- s->avctx->release_buffer(avctx, &s->last2_frame);
+
+ av_frame_free(&s->last_frame);
+ av_frame_free(&s->last2_frame);
return 0;
}
typedef struct MadContext {
AVCodecContext *avctx;
DSPContext dsp;
- AVFrame frame;
AVFrame last_frame;
GetBitContext gb;
void *bitstream_buf;
dst[j*dst_stride + i] = av_clip_uint8(src[j*src_stride + i] + add);
}
-static inline void comp_block(MadContext *t, int mb_x, int mb_y,
+static inline void comp_block(MadContext *t, AVFrame *frame,
+ int mb_x, int mb_y,
int j, int mv_x, int mv_y, int add)
{
if (j < 4) {
- comp(t->frame.data[0] + (mb_y*16 + ((j&2)<<2))*t->frame.linesize[0] + mb_x*16 + ((j&1)<<3),
- t->frame.linesize[0],
+ comp(frame->data[0] + (mb_y*16 + ((j&2)<<2))*frame->linesize[0] + mb_x*16 + ((j&1)<<3),
+ frame->linesize[0],
t->last_frame.data[0] + (mb_y*16 + ((j&2)<<2) + mv_y)*t->last_frame.linesize[0] + mb_x*16 + ((j&1)<<3) + mv_x,
t->last_frame.linesize[0], add);
} else if (!(t->avctx->flags & CODEC_FLAG_GRAY)) {
int index = j - 3;
- comp(t->frame.data[index] + (mb_y*8)*t->frame.linesize[index] + mb_x * 8,
- t->frame.linesize[index],
+ comp(frame->data[index] + (mb_y*8)*frame->linesize[index] + mb_x * 8,
+ frame->linesize[index],
t->last_frame.data[index] + (mb_y * 8 + (mv_y/2))*t->last_frame.linesize[index] + mb_x * 8 + (mv_x/2),
t->last_frame.linesize[index], add);
}
}
-static inline void idct_put(MadContext *t, int16_t *block, int mb_x, int mb_y, int j)
+static inline void idct_put(MadContext *t, AVFrame *frame, int16_t *block,
+ int mb_x, int mb_y, int j)
{
if (j < 4) {
ff_ea_idct_put_c(
- t->frame.data[0] + (mb_y*16 + ((j&2)<<2))*t->frame.linesize[0] + mb_x*16 + ((j&1)<<3),
- t->frame.linesize[0], block);
+ frame->data[0] + (mb_y*16 + ((j&2)<<2))*frame->linesize[0] + mb_x*16 + ((j&1)<<3),
+ frame->linesize[0], block);
} else if (!(t->avctx->flags & CODEC_FLAG_GRAY)) {
int index = j - 3;
ff_ea_idct_put_c(
- t->frame.data[index] + (mb_y*8)*t->frame.linesize[index] + mb_x*8,
- t->frame.linesize[index], block);
+ frame->data[index] + (mb_y*8)*frame->linesize[index] + mb_x*8,
+ frame->linesize[index], block);
}
}
return value;
}
-static void decode_mb(MadContext *s, int inter)
+static void decode_mb(MadContext *s, AVFrame *frame, int inter)
{
int mv_map = 0;
int mv_x, mv_y;
for (j=0; j<6; j++) {
if (mv_map & (1<<j)) { // mv_x and mv_y are guarded by mv_map
int add = 2*decode_motion(&s->gb);
- comp_block(s, s->mb_x, s->mb_y, j, mv_x, mv_y, add);
+ comp_block(s, frame, s->mb_x, s->mb_y, j, mv_x, mv_y, add);
} else {
s->dsp.clear_block(s->block);
decode_block_intra(s, s->block);
- idct_put(s, s->block, s->mb_x, s->mb_y, j);
+ idct_put(s, frame, s->block, s->mb_x, s->mb_y, j);
}
}
}
int buf_size = avpkt->size;
const uint8_t *buf_end = buf+buf_size;
MadContext *s = avctx->priv_data;
+ AVFrame *frame = data;
int width, height;
int chunk_type;
- int inter;
+ int inter, ret;
if (buf_size < 17) {
av_log(avctx, AV_LOG_ERROR, "Input buffer too small\n");
if (av_image_check_size(width, height, 0, avctx) < 0)
return -1;
avcodec_set_dimensions(avctx, width, height);
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+ av_frame_unref(&s->last_frame);
}
- s->frame.reference = 1;
- if (!s->frame.data[0]) {
- if (ff_get_buffer(avctx, &s->frame) < 0) {
- av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
- return -1;
- }
+ if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
av_fast_padded_malloc(&s->bitstream_buf, &s->bitstream_buf_size,
for (s->mb_y=0; s->mb_y < (avctx->height+15)/16; s->mb_y++)
for (s->mb_x=0; s->mb_x < (avctx->width +15)/16; s->mb_x++)
- decode_mb(s, inter);
+ decode_mb(s, frame, inter);
*got_frame = 1;
- *(AVFrame*)data = s->frame;
- if (chunk_type != MADe_TAG)
- FFSWAP(AVFrame, s->frame, s->last_frame);
+ if (chunk_type != MADe_TAG) {
+ av_frame_unref(&s->last_frame);
+ if ((ret = av_frame_ref(&s->last_frame, frame)) < 0)
+ return ret;
+ }
return buf_size;
}
static av_cold int decode_end(AVCodecContext *avctx)
{
MadContext *t = avctx->priv_data;
- if (t->frame.data[0])
- avctx->release_buffer(avctx, &t->frame);
- if (t->last_frame.data[0])
- avctx->release_buffer(avctx, &t->last_frame);
+ av_frame_unref(&t->last_frame);
av_free(t->bitstream_buf);
return 0;
}
typedef struct TgqContext {
AVCodecContext *avctx;
- AVFrame frame;
int width, height;
ScanTable scantable;
int qtable[64];
block[0] += 128 << 4;
}
-static void tgq_idct_put_mb(TgqContext *s, int16_t (*block)[64],
+static void tgq_idct_put_mb(TgqContext *s, int16_t (*block)[64], AVFrame *frame,
int mb_x, int mb_y)
{
- int linesize = s->frame.linesize[0];
- uint8_t *dest_y = s->frame.data[0] + (mb_y * 16 * linesize) + mb_x * 16;
- uint8_t *dest_cb = s->frame.data[1] + (mb_y * 8 * s->frame.linesize[1]) + mb_x * 8;
- uint8_t *dest_cr = s->frame.data[2] + (mb_y * 8 * s->frame.linesize[2]) + mb_x * 8;
+ int linesize = frame->linesize[0];
+ uint8_t *dest_y = frame->data[0] + (mb_y * 16 * linesize) + mb_x * 16;
+ uint8_t *dest_cb = frame->data[1] + (mb_y * 8 * frame->linesize[1]) + mb_x * 8;
+ uint8_t *dest_cr = frame->data[2] + (mb_y * 8 * frame->linesize[2]) + mb_x * 8;
ff_ea_idct_put_c(dest_y , linesize, block[0]);
ff_ea_idct_put_c(dest_y + 8, linesize, block[1]);
ff_ea_idct_put_c(dest_y + 8 * linesize , linesize, block[2]);
ff_ea_idct_put_c(dest_y + 8 * linesize + 8, linesize, block[3]);
if (!(s->avctx->flags & CODEC_FLAG_GRAY)) {
- ff_ea_idct_put_c(dest_cb, s->frame.linesize[1], block[4]);
- ff_ea_idct_put_c(dest_cr, s->frame.linesize[2], block[5]);
+ ff_ea_idct_put_c(dest_cb, frame->linesize[1], block[4]);
+ ff_ea_idct_put_c(dest_cr, frame->linesize[2], block[5]);
}
}
memset(dst + j * dst_stride, level, 8);
}
-static void tgq_idct_put_mb_dconly(TgqContext *s, int mb_x, int mb_y, const int8_t *dc)
+static void tgq_idct_put_mb_dconly(TgqContext *s, AVFrame *frame,
+ int mb_x, int mb_y, const int8_t *dc)
{
- int linesize = s->frame.linesize[0];
- uint8_t *dest_y = s->frame.data[0] + (mb_y * 16 * linesize) + mb_x * 16;
- uint8_t *dest_cb = s->frame.data[1] + (mb_y * 8 * s->frame.linesize[1]) + mb_x * 8;
- uint8_t *dest_cr = s->frame.data[2] + (mb_y * 8 * s->frame.linesize[2]) + mb_x * 8;
+ int linesize = frame->linesize[0];
+ uint8_t *dest_y = frame->data[0] + (mb_y * 16 * linesize) + mb_x * 16;
+ uint8_t *dest_cb = frame->data[1] + (mb_y * 8 * frame->linesize[1]) + mb_x * 8;
+ uint8_t *dest_cr = frame->data[2] + (mb_y * 8 * frame->linesize[2]) + mb_x * 8;
tgq_dconly(s, dest_y, linesize, dc[0]);
tgq_dconly(s, dest_y + 8, linesize, dc[1]);
tgq_dconly(s, dest_y + 8 * linesize, linesize, dc[2]);
tgq_dconly(s, dest_y + 8 * linesize + 8, linesize, dc[3]);
if (!(s->avctx->flags & CODEC_FLAG_GRAY)) {
- tgq_dconly(s, dest_cb, s->frame.linesize[1], dc[4]);
- tgq_dconly(s, dest_cr, s->frame.linesize[2], dc[5]);
+ tgq_dconly(s, dest_cb, frame->linesize[1], dc[4]);
+ tgq_dconly(s, dest_cr, frame->linesize[2], dc[5]);
}
}
-static void tgq_decode_mb(TgqContext *s, int mb_y, int mb_x)
+static void tgq_decode_mb(TgqContext *s, AVFrame *frame, int mb_y, int mb_x)
{
int mode;
int i;
init_get_bits(&gb, s->gb.buffer, FFMIN(s->gb.buffer_end - s->gb.buffer, mode) * 8);
for (i = 0; i < 6; i++)
tgq_decode_block(s, s->block[i], &gb);
- tgq_idct_put_mb(s, s->block, mb_x, mb_y);
+ tgq_idct_put_mb(s, s->block, frame, mb_x, mb_y);
bytestream2_skip(&s->gb, mode);
} else {
if (mode == 3) {
} else {
av_log(s->avctx, AV_LOG_ERROR, "unsupported mb mode %i\n", mode);
}
- tgq_idct_put_mb_dconly(s, mb_x, mb_y, dc);
+ tgq_idct_put_mb_dconly(s, frame, mb_x, mb_y, dc);
}
}
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
TgqContext *s = avctx->priv_data;
+ AVFrame *frame = data;
int x, y, ret;
int big_endian = AV_RL32(&buf[4]) > 0x000FFFFF;
if (s->avctx->width!=s->width || s->avctx->height!=s->height) {
avcodec_set_dimensions(s->avctx, s->width, s->height);
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
}
tgq_calculate_qtable(s, bytestream2_get_byteu(&s->gb));
bytestream2_skip(&s->gb, 3);
- if (!s->frame.data[0]) {
- s->frame.key_frame = 1;
- s->frame.pict_type = AV_PICTURE_TYPE_I;
- s->frame.buffer_hints = FF_BUFFER_HINTS_VALID;
- if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
- av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
- return ret;
- }
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
+ frame->key_frame = 1;
+ frame->pict_type = AV_PICTURE_TYPE_I;
for (y = 0; y < FFALIGN(avctx->height, 16) >> 4; y++)
for (x = 0; x < FFALIGN(avctx->width, 16) >> 4; x++)
- tgq_decode_mb(s, y, x);
+ tgq_decode_mb(s, frame, y, x);
*got_frame = 1;
- *(AVFrame*)data = s->frame;
return avpkt->size;
}
-static av_cold int tgq_decode_end(AVCodecContext *avctx)
-{
- TgqContext *s = avctx->priv_data;
- if (s->frame.data[0])
- s->avctx->release_buffer(avctx, &s->frame);
- return 0;
-}
-
AVCodec ff_eatgq_decoder = {
.name = "eatgq",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_TGQ,
.priv_data_size = sizeof(TgqContext),
.init = tgq_decode_init,
- .close = tgq_decode_end,
.decode = tgq_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Electronic Arts TGQ video"),
#include "avcodec.h"
#define BITSTREAM_READER_LE
#include "get_bits.h"
+#include "internal.h"
#include "libavutil/imgutils.h"
#include "libavutil/mem.h"
typedef struct TgvContext {
AVCodecContext *avctx;
- AVFrame frame;
AVFrame last_frame;
+ uint8_t *frame_buffer;
int width,height;
uint32_t palette[AVPALETTE_COUNT];
* Decode inter-frame
* @return 0 on success, -1 on critical buffer underflow
*/
-static int tgv_decode_inter(TgvContext *s, const uint8_t *buf,
- const uint8_t *buf_end)
+static int tgv_decode_inter(TgvContext *s, AVFrame *frame,
+ const uint8_t *buf, const uint8_t *buf_end)
{
int num_mvs;
int num_blocks_raw;
for (j = 0; j < 4; j++)
for (i = 0; i < 4; i++)
- s->frame.data[0][(y * 4 + j) * s->frame.linesize[0] + (x * 4 + i)] =
+ frame->data[0][(y * 4 + j) * frame->linesize[0] + (x * 4 + i)] =
src[j * src_stride + i];
}
return 0;
}
-/** release AVFrame buffers if allocated */
-static void cond_release_buffer(AVFrame *pic)
-{
- if (pic->data[0]) {
- av_freep(&pic->data[0]);
- av_free(pic->data[1]);
- }
-}
-
static int tgv_decode_frame(AVCodecContext *avctx,
void *data, int *got_frame,
AVPacket *avpkt)
int buf_size = avpkt->size;
TgvContext *s = avctx->priv_data;
const uint8_t *buf_end = buf + buf_size;
+ AVFrame *frame = data;
int chunk_type, ret;
chunk_type = AV_RL32(&buf[0]);
s->height = AV_RL16(&buf[2]);
if (s->avctx->width != s->width || s->avctx->height != s->height) {
avcodec_set_dimensions(s->avctx, s->width, s->height);
- cond_release_buffer(&s->frame);
- cond_release_buffer(&s->last_frame);
+ av_freep(&s->frame_buffer);
+ av_frame_unref(&s->last_frame);
}
pal_count = AV_RL16(&buf[6]);
if ((ret = av_image_check_size(s->width, s->height, 0, avctx)) < 0)
return ret;
- /* shuffle */
- FFSWAP(AVFrame, s->frame, s->last_frame);
- if (!s->frame.data[0]) {
- s->frame.reference = 1;
- s->frame.buffer_hints = FF_BUFFER_HINTS_VALID;
- s->frame.linesize[0] = s->width;
+ if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
+ return ret;
- s->frame.data[0] = av_malloc(s->width * s->height);
- if (!s->frame.data[0])
- return AVERROR(ENOMEM);
- s->frame.data[1] = av_malloc(AVPALETTE_SIZE);
- if (!s->frame.data[1]) {
- av_freep(&s->frame.data[0]);
- return AVERROR(ENOMEM);
- }
- }
- memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE);
+ memcpy(frame->data[1], s->palette, AVPALETTE_SIZE);
if (chunk_type == kVGT_TAG) {
- s->frame.key_frame = 1;
- s->frame.pict_type = AV_PICTURE_TYPE_I;
- if (unpack(buf, buf_end, s->frame.data[0], s->avctx->width, s->avctx->height) < 0) {
+ int y;
+ frame->key_frame = 1;
+ frame->pict_type = AV_PICTURE_TYPE_I;
+
+ if (!s->frame_buffer &&
+ !(s->frame_buffer = av_malloc(s->width * s->height)))
+ return AVERROR(ENOMEM);
+
+ if (unpack(buf, buf_end, s->frame_buffer, s->avctx->width, s->avctx->height) < 0) {
av_log(avctx, AV_LOG_WARNING, "truncated intra frame\n");
return AVERROR_INVALIDDATA;
}
+ for (y = 0; y < s->height; y++)
+ memcpy(frame->data[0] + y * frame->linesize[0],
+ s->frame_buffer + y * s->width,
+ s->width);
} else {
if (!s->last_frame.data[0]) {
av_log(avctx, AV_LOG_WARNING, "inter frame without corresponding intra frame\n");
return buf_size;
}
- s->frame.key_frame = 0;
- s->frame.pict_type = AV_PICTURE_TYPE_P;
- if (tgv_decode_inter(s, buf, buf_end) < 0) {
+ frame->key_frame = 0;
+ frame->pict_type = AV_PICTURE_TYPE_P;
+ if (tgv_decode_inter(s, frame, buf, buf_end) < 0) {
av_log(avctx, AV_LOG_WARNING, "truncated inter frame\n");
return AVERROR_INVALIDDATA;
}
}
+ av_frame_unref(&s->last_frame);
+ if ((ret = av_frame_ref(&s->last_frame, frame)) < 0)
+ return ret;
+
*got_frame = 1;
- *(AVFrame*)data = s->frame;
return buf_size;
}
static av_cold int tgv_decode_end(AVCodecContext *avctx)
{
TgvContext *s = avctx->priv_data;
- cond_release_buffer(&s->frame);
- cond_release_buffer(&s->last_frame);
+ av_frame_unref(&s->last_frame);
+ av_freep(&s->frame_buffer);
av_free(s->mv_codebook);
av_free(s->block_codebook);
return 0;
.close = tgv_decode_end,
.decode = tgv_decode_frame,
.long_name = NULL_IF_CONFIG_SMALL("Electronic Arts TGV video"),
+ .capabilities = CODEC_CAP_DR1,
};
typedef struct TqiContext {
MpegEncContext s;
- AVFrame frame;
void *bitstream_buf;
unsigned int bitstream_buf_size;
DECLARE_ALIGNED(16, int16_t, block)[6][64];
return 0;
}
-static inline void tqi_idct_put(TqiContext *t, int16_t (*block)[64])
+static inline void tqi_idct_put(TqiContext *t, AVFrame *frame, int16_t (*block)[64])
{
MpegEncContext *s = &t->s;
- int linesize= t->frame.linesize[0];
- uint8_t *dest_y = t->frame.data[0] + (s->mb_y * 16* linesize ) + s->mb_x * 16;
- uint8_t *dest_cb = t->frame.data[1] + (s->mb_y * 8 * t->frame.linesize[1]) + s->mb_x * 8;
- uint8_t *dest_cr = t->frame.data[2] + (s->mb_y * 8 * t->frame.linesize[2]) + s->mb_x * 8;
+ int linesize = frame->linesize[0];
+ uint8_t *dest_y = frame->data[0] + (s->mb_y * 16* linesize ) + s->mb_x * 16;
+ uint8_t *dest_cb = frame->data[1] + (s->mb_y * 8 * frame->linesize[1]) + s->mb_x * 8;
+ uint8_t *dest_cr = frame->data[2] + (s->mb_y * 8 * frame->linesize[2]) + s->mb_x * 8;
ff_ea_idct_put_c(dest_y , linesize, block[0]);
ff_ea_idct_put_c(dest_y + 8, linesize, block[1]);
ff_ea_idct_put_c(dest_y + 8*linesize , linesize, block[2]);
ff_ea_idct_put_c(dest_y + 8*linesize + 8, linesize, block[3]);
if(!(s->avctx->flags&CODEC_FLAG_GRAY)) {
- ff_ea_idct_put_c(dest_cb, t->frame.linesize[1], block[4]);
- ff_ea_idct_put_c(dest_cr, t->frame.linesize[2], block[5]);
+ ff_ea_idct_put_c(dest_cb, frame->linesize[1], block[4]);
+ ff_ea_idct_put_c(dest_cr, frame->linesize[2], block[5]);
}
}
const uint8_t *buf_end = buf+buf_size;
TqiContext *t = avctx->priv_data;
MpegEncContext *s = &t->s;
+ AVFrame *frame = data;
+ int ret;
s->width = AV_RL16(&buf[0]);
s->height = AV_RL16(&buf[2]);
tqi_calculate_qtable(s, buf[4]);
buf += 8;
- if (t->frame.data[0])
- avctx->release_buffer(avctx, &t->frame);
-
if (s->avctx->width!=s->width || s->avctx->height!=s->height)
avcodec_set_dimensions(s->avctx, s->width, s->height);
- if(ff_get_buffer(avctx, &t->frame) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
- return -1;
+ return ret;
}
av_fast_padded_malloc(&t->bitstream_buf, &t->bitstream_buf_size,
{
if (tqi_decode_mb(s, t->block) < 0)
break;
- tqi_idct_put(t, t->block);
+ tqi_idct_put(t, frame, t->block);
}
*got_frame = 1;
- *(AVFrame*)data = t->frame;
return buf_size;
}
static av_cold int tqi_decode_end(AVCodecContext *avctx)
{
TqiContext *t = avctx->priv_data;
- if(t->frame.data[0])
- avctx->release_buffer(avctx, &t->frame);
av_free(t->bitstream_buf);
return 0;
}
mb_index = (b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride;
error = s->error_status_table[mb_index];
- if (IS_INTER(s->cur_pic->f.mb_type[mb_index]))
+ if (IS_INTER(s->cur_pic->mb_type[mb_index]))
continue; // inter
if (!(error & ER_DC_ERROR))
continue; // dc-ok
for (j = b_x + 1; j < w; j++) {
int mb_index_j = (j >> is_luma) + (b_y >> is_luma) * s->mb_stride;
int error_j = s->error_status_table[mb_index_j];
- int intra_j = IS_INTRA(s->cur_pic->f.mb_type[mb_index_j]);
+ int intra_j = IS_INTRA(s->cur_pic->mb_type[mb_index_j]);
if (intra_j == 0 || !(error_j & ER_DC_ERROR)) {
color[0] = dc[j + b_y * stride];
distance[0] = j - b_x;
for (j = b_x - 1; j >= 0; j--) {
int mb_index_j = (j >> is_luma) + (b_y >> is_luma) * s->mb_stride;
int error_j = s->error_status_table[mb_index_j];
- int intra_j = IS_INTRA(s->cur_pic->f.mb_type[mb_index_j]);
+ int intra_j = IS_INTRA(s->cur_pic->mb_type[mb_index_j]);
if (intra_j == 0 || !(error_j & ER_DC_ERROR)) {
color[1] = dc[j + b_y * stride];
distance[1] = b_x - j;
for (j = b_y + 1; j < h; j++) {
int mb_index_j = (b_x >> is_luma) + (j >> is_luma) * s->mb_stride;
int error_j = s->error_status_table[mb_index_j];
- int intra_j = IS_INTRA(s->cur_pic->f.mb_type[mb_index_j]);
+ int intra_j = IS_INTRA(s->cur_pic->mb_type[mb_index_j]);
if (intra_j == 0 || !(error_j & ER_DC_ERROR)) {
color[2] = dc[b_x + j * stride];
for (j = b_y - 1; j >= 0; j--) {
int mb_index_j = (b_x >> is_luma) + (j >> is_luma) * s->mb_stride;
int error_j = s->error_status_table[mb_index_j];
- int intra_j = IS_INTRA(s->cur_pic->f.mb_type[mb_index_j]);
+ int intra_j = IS_INTRA(s->cur_pic->mb_type[mb_index_j]);
if (intra_j == 0 || !(error_j & ER_DC_ERROR)) {
color[3] = dc[b_x + j * stride];
distance[3] = b_y - j;
int y;
int left_status = s->error_status_table[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride];
int right_status = s->error_status_table[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride];
- int left_intra = IS_INTRA(s->cur_pic->f.mb_type[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
- int right_intra = IS_INTRA(s->cur_pic->f.mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
+ int left_intra = IS_INTRA(s->cur_pic->mb_type[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
+ int right_intra = IS_INTRA(s->cur_pic->mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
int left_damage = left_status & ER_MB_ERROR;
int right_damage = right_status & ER_MB_ERROR;
int offset = b_x * 8 + b_y * stride * 8;
- int16_t *left_mv = s->cur_pic->f.motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
- int16_t *right_mv = s->cur_pic->f.motion_val[0][mvy_stride * b_y + mvx_stride * (b_x + 1)];
+ int16_t *left_mv = s->cur_pic->motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
+ int16_t *right_mv = s->cur_pic->motion_val[0][mvy_stride * b_y + mvx_stride * (b_x + 1)];
if (!(left_damage || right_damage))
continue; // both undamaged
if ((!left_intra) && (!right_intra) &&
int x;
int top_status = s->error_status_table[(b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride];
int bottom_status = s->error_status_table[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride];
- int top_intra = IS_INTRA(s->cur_pic->f.mb_type[(b_x >> is_luma) + ( b_y >> is_luma) * s->mb_stride]);
- int bottom_intra = IS_INTRA(s->cur_pic->f.mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]);
+ int top_intra = IS_INTRA(s->cur_pic->mb_type[(b_x >> is_luma) + ( b_y >> is_luma) * s->mb_stride]);
+ int bottom_intra = IS_INTRA(s->cur_pic->mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]);
int top_damage = top_status & ER_MB_ERROR;
int bottom_damage = bottom_status & ER_MB_ERROR;
int offset = b_x * 8 + b_y * stride * 8;
- int16_t *top_mv = s->cur_pic->f.motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
- int16_t *bottom_mv = s->cur_pic->f.motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x];
+ int16_t *top_mv = s->cur_pic->motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
+ int16_t *bottom_mv = s->cur_pic->motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x];
if (!(top_damage || bottom_damage))
continue; // both undamaged
int f = 0;
int error = s->error_status_table[mb_xy];
- if (IS_INTRA(s->cur_pic->f.mb_type[mb_xy]))
+ if (IS_INTRA(s->cur_pic->mb_type[mb_xy]))
f = MV_FROZEN; // intra // FIXME check
if (!(error & ER_MV_ERROR))
f = MV_FROZEN; // inter with undamaged MV
const int mb_xy = mb_x + mb_y * s->mb_stride;
int mv_dir = (s->last_pic && s->last_pic->f.data[0]) ? MV_DIR_FORWARD : MV_DIR_BACKWARD;
- if (IS_INTRA(s->cur_pic->f.mb_type[mb_xy]))
+ if (IS_INTRA(s->cur_pic->mb_type[mb_xy]))
continue;
if (!(s->error_status_table[mb_xy] & ER_MV_ERROR))
continue;
if (fixed[mb_xy] == MV_FROZEN)
continue;
- assert(!IS_INTRA(s->cur_pic->f.mb_type[mb_xy]));
+ assert(!IS_INTRA(s->cur_pic->mb_type[mb_xy]));
assert(s->last_pic && s->last_pic->f.data[0]);
j = 0;
if (mb_x > 0 && fixed[mb_xy - 1]) {
mv_predictor[pred_count][0] =
- s->cur_pic->f.motion_val[0][mot_index - mot_step][0];
+ s->cur_pic->motion_val[0][mot_index - mot_step][0];
mv_predictor[pred_count][1] =
- s->cur_pic->f.motion_val[0][mot_index - mot_step][1];
+ s->cur_pic->motion_val[0][mot_index - mot_step][1];
ref[pred_count] =
- s->cur_pic->f.ref_index[0][4 * (mb_xy - 1)];
+ s->cur_pic->ref_index[0][4 * (mb_xy - 1)];
pred_count++;
}
if (mb_x + 1 < mb_width && fixed[mb_xy + 1]) {
mv_predictor[pred_count][0] =
- s->cur_pic->f.motion_val[0][mot_index + mot_step][0];
+ s->cur_pic->motion_val[0][mot_index + mot_step][0];
mv_predictor[pred_count][1] =
- s->cur_pic->f.motion_val[0][mot_index + mot_step][1];
+ s->cur_pic->motion_val[0][mot_index + mot_step][1];
ref[pred_count] =
- s->cur_pic->f.ref_index[0][4 * (mb_xy + 1)];
+ s->cur_pic->ref_index[0][4 * (mb_xy + 1)];
pred_count++;
}
if (mb_y > 0 && fixed[mb_xy - mb_stride]) {
mv_predictor[pred_count][0] =
- s->cur_pic->f.motion_val[0][mot_index - mot_stride * mot_step][0];
+ s->cur_pic->motion_val[0][mot_index - mot_stride * mot_step][0];
mv_predictor[pred_count][1] =
- s->cur_pic->f.motion_val[0][mot_index - mot_stride * mot_step][1];
+ s->cur_pic->motion_val[0][mot_index - mot_stride * mot_step][1];
ref[pred_count] =
- s->cur_pic->f.ref_index[0][4 * (mb_xy - s->mb_stride)];
+ s->cur_pic->ref_index[0][4 * (mb_xy - s->mb_stride)];
pred_count++;
}
if (mb_y + 1<mb_height && fixed[mb_xy + mb_stride]) {
mv_predictor[pred_count][0] =
- s->cur_pic->f.motion_val[0][mot_index + mot_stride * mot_step][0];
+ s->cur_pic->motion_val[0][mot_index + mot_stride * mot_step][0];
mv_predictor[pred_count][1] =
- s->cur_pic->f.motion_val[0][mot_index + mot_stride * mot_step][1];
+ s->cur_pic->motion_val[0][mot_index + mot_stride * mot_step][1];
ref[pred_count] =
- s->cur_pic->f.ref_index[0][4 * (mb_xy + s->mb_stride)];
+ s->cur_pic->ref_index[0][4 * (mb_xy + s->mb_stride)];
pred_count++;
}
if (pred_count == 0)
if (s->avctx->codec_id == AV_CODEC_ID_H264) {
// FIXME
} else {
- ff_thread_await_progress(&s->last_pic->f,
+ ff_thread_await_progress(&s->last_pic->tf,
mb_y, 0);
}
- if (!s->last_pic->f.motion_val[0] ||
- !s->last_pic->f.ref_index[0])
+ if (!s->last_pic->motion_val[0] ||
+ !s->last_pic->ref_index[0])
goto skip_last_mv;
- prev_x = s->last_pic->f.motion_val[0][mot_index][0];
- prev_y = s->last_pic->f.motion_val[0][mot_index][1];
- prev_ref = s->last_pic->f.ref_index[0][4 * mb_xy];
+ prev_x = s->last_pic->motion_val[0][mot_index][0];
+ prev_y = s->last_pic->motion_val[0][mot_index][1];
+ prev_ref = s->last_pic->ref_index[0][4 * mb_xy];
} else {
- prev_x = s->cur_pic->f.motion_val[0][mot_index][0];
- prev_y = s->cur_pic->f.motion_val[0][mot_index][1];
- prev_ref = s->cur_pic->f.ref_index[0][4 * mb_xy];
+ prev_x = s->cur_pic->motion_val[0][mot_index][0];
+ prev_y = s->cur_pic->motion_val[0][mot_index][1];
+ prev_ref = s->cur_pic->ref_index[0][4 * mb_xy];
}
/* last MV */
uint8_t *src = s->cur_pic->f.data[0] +
mb_x * 16 + mb_y * 16 * linesize[0];
- s->cur_pic->f.motion_val[0][mot_index][0] =
+ s->cur_pic->motion_val[0][mot_index][0] =
s->mv[0][0][0] = mv_predictor[j][0];
- s->cur_pic->f.motion_val[0][mot_index][1] =
+ s->cur_pic->motion_val[0][mot_index][1] =
s->mv[0][0][1] = mv_predictor[j][1];
// predictor intra or otherwise not available
for (i = 0; i < mot_step; i++)
for (j = 0; j < mot_step; j++) {
- s->cur_pic->f.motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0];
- s->cur_pic->f.motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1];
+ s->cur_pic->motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0];
+ s->cur_pic->motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1];
}
s->decode_mb(s->opaque, ref[best_pred], MV_DIR_FORWARD,
if (s->avctx->codec_id == AV_CODEC_ID_H264) {
// FIXME
} else {
- ff_thread_await_progress(&s->last_pic->f, mb_y, 0);
+ ff_thread_await_progress(&s->last_pic->tf, mb_y, 0);
}
is_intra_likely += s->dsp->sad[0](NULL, last_mb_ptr, mb_ptr,
linesize[0], 16);
last_mb_ptr + linesize[0] * 16,
linesize[0], 16);
} else {
- if (IS_INTRA(s->cur_pic->f.mb_type[mb_xy]))
+ if (IS_INTRA(s->cur_pic->mb_type[mb_xy]))
is_intra_likely++;
else
is_intra_likely--;
return;
};
- if (s->cur_pic->f.motion_val[0] == NULL) {
+ if (s->cur_pic->motion_val[0] == NULL) {
av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n");
for (i = 0; i < 2; i++) {
- s->cur_pic->f.ref_index[i] = av_mallocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t));
- s->cur_pic->motion_val_base[i] = av_mallocz((size + 4) * 2 * sizeof(uint16_t));
- s->cur_pic->f.motion_val[i] = s->cur_pic->motion_val_base[i] + 4;
+ s->cur_pic->ref_index_buf[i] = av_buffer_allocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t));
+ s->cur_pic->motion_val_buf[i] = av_buffer_allocz((size + 4) * 2 * sizeof(uint16_t));
+ if (!s->cur_pic->ref_index_buf[i] || !s->cur_pic->motion_val_buf[i])
+ break;
+ s->cur_pic->ref_index[i] = s->cur_pic->ref_index_buf[i]->data;
+ s->cur_pic->motion_val[i] = (int16_t (*)[2])s->cur_pic->motion_val_buf[i]->data + 4;
+ }
+ if (i < 2) {
+ for (i = 0; i < 2; i++) {
+ av_buffer_unref(&s->cur_pic->ref_index_buf[i]);
+ av_buffer_unref(&s->cur_pic->motion_val_buf[i]);
+ s->cur_pic->ref_index[i] = NULL;
+ s->cur_pic->motion_val[i] = NULL;
+ }
+ return;
}
s->cur_pic->f.motion_subsample_log2 = 3;
}
continue;
if (is_intra_likely)
- s->cur_pic->f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
+ s->cur_pic->mb_type[mb_xy] = MB_TYPE_INTRA4x4;
else
- s->cur_pic->f.mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0;
+ s->cur_pic->mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0;
}
// change inter to intra blocks if no reference frames are available
!(s->next_pic && s->next_pic->f.data[0]))
for (i = 0; i < s->mb_num; i++) {
const int mb_xy = s->mb_index2xy[i];
- if (!IS_INTRA(s->cur_pic->f.mb_type[mb_xy]))
- s->cur_pic->f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
+ if (!IS_INTRA(s->cur_pic->mb_type[mb_xy]))
+ s->cur_pic->mb_type[mb_xy] = MB_TYPE_INTRA4x4;
}
/* handle inter blocks with damaged AC */
for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
const int mb_xy = mb_x + mb_y * s->mb_stride;
- const int mb_type = s->cur_pic->f.mb_type[mb_xy];
+ const int mb_type = s->cur_pic->mb_type[mb_xy];
const int dir = !(s->last_pic && s->last_pic->f.data[0]);
const int mv_dir = dir ? MV_DIR_BACKWARD : MV_DIR_FORWARD;
int mv_type;
int j;
mv_type = MV_TYPE_8X8;
for (j = 0; j < 4; j++) {
- s->mv[0][j][0] = s->cur_pic->f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0];
- s->mv[0][j][1] = s->cur_pic->f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1];
+ s->mv[0][j][0] = s->cur_pic->motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0];
+ s->mv[0][j][1] = s->cur_pic->motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1];
}
} else {
mv_type = MV_TYPE_16X16;
- s->mv[0][0][0] = s->cur_pic->f.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][0];
- s->mv[0][0][1] = s->cur_pic->f.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][1];
+ s->mv[0][0][0] = s->cur_pic->motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][0];
+ s->mv[0][0][1] = s->cur_pic->motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][1];
}
s->decode_mb(s->opaque, 0 /* FIXME h264 partitioned slices need this set */,
for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
int xy = mb_x * 2 + mb_y * 2 * s->b8_stride;
const int mb_xy = mb_x + mb_y * s->mb_stride;
- const int mb_type = s->cur_pic->f.mb_type[mb_xy];
+ const int mb_type = s->cur_pic->mb_type[mb_xy];
int mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
error = s->error_status_table[mb_xy];
int time_pp = s->pp_time;
int time_pb = s->pb_time;
- ff_thread_await_progress(&s->next_pic->f, mb_y, 0);
+ ff_thread_await_progress(&s->next_pic->tf, mb_y, 0);
- s->mv[0][0][0] = s->next_pic->f.motion_val[0][xy][0] * time_pb / time_pp;
- s->mv[0][0][1] = s->next_pic->f.motion_val[0][xy][1] * time_pb / time_pp;
- s->mv[1][0][0] = s->next_pic->f.motion_val[0][xy][0] * (time_pb - time_pp) / time_pp;
- s->mv[1][0][1] = s->next_pic->f.motion_val[0][xy][1] * (time_pb - time_pp) / time_pp;
+ s->mv[0][0][0] = s->next_pic->motion_val[0][xy][0] * time_pb / time_pp;
+ s->mv[0][0][1] = s->next_pic->motion_val[0][xy][1] * time_pb / time_pp;
+ s->mv[1][0][0] = s->next_pic->motion_val[0][xy][0] * (time_pb - time_pp) / time_pp;
+ s->mv[1][0][1] = s->next_pic->motion_val[0][xy][1] * (time_pb - time_pp) / time_pp;
} else {
s->mv[0][0][0] = 0;
s->mv[0][0][1] = 0;
int16_t *dc_ptr;
uint8_t *dest_y, *dest_cb, *dest_cr;
const int mb_xy = mb_x + mb_y * s->mb_stride;
- const int mb_type = s->cur_pic->f.mb_type[mb_xy];
+ const int mb_type = s->cur_pic->mb_type[mb_xy];
error = s->error_status_table[mb_xy];
for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
uint8_t *dest_y, *dest_cb, *dest_cr;
const int mb_xy = mb_x + mb_y * s->mb_stride;
- const int mb_type = s->cur_pic->f.mb_type[mb_xy];
+ const int mb_type = s->cur_pic->mb_type[mb_xy];
error = s->error_status_table[mb_xy];
for (i = 0; i < 3; i++)
av_free(s->codebooks[i].blocks);
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+ av_frame_unref(&s->frame);
return 0;
}
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
Escape124Context *s = avctx->priv_data;
+ AVFrame *frame = data;
GetBitContext gb;
unsigned frame_flags, frame_size;
uint16_t* old_frame_data, *new_frame_data;
unsigned old_stride, new_stride;
-
- AVFrame new_frame = { { 0 } };
+ int ret;
init_get_bits(&gb, buf, buf_size * 8);
// Leave last frame unchanged
// FIXME: Is this necessary? I haven't seen it in any real samples
if (!(frame_flags & 0x114) || !(frame_flags & 0x7800000)) {
+ if (!s->frame.data[0])
+ return AVERROR_INVALIDDATA;
+
av_log(NULL, AV_LOG_DEBUG, "Skipping frame\n");
*got_frame = 1;
- *(AVFrame*)data = s->frame;
+ if ((ret = av_frame_ref(frame, &s->frame)) < 0)
+ return ret;
return frame_size;
}
}
}
- new_frame.reference = 3;
- if (ff_get_buffer(avctx, &new_frame)) {
+ if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
- return -1;
+ return ret;
}
- new_frame_data = (uint16_t*)new_frame.data[0];
- new_stride = new_frame.linesize[0] / 2;
+ new_frame_data = (uint16_t*)frame->data[0];
+ new_stride = frame->linesize[0] / 2;
old_frame_data = (uint16_t*)s->frame.data[0];
old_stride = s->frame.linesize[0] / 2;
"Escape sizes: %i, %i, %i\n",
frame_size, buf_size, get_bits_count(&gb) / 8);
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+ av_frame_unref(&s->frame);
+ if ((ret = av_frame_ref(&s->frame, frame)) < 0)
+ return ret;
- *(AVFrame*)data = s->frame = new_frame;
*got_frame = 1;
return frame_size;
FFV1Context *s = avctx->priv_data;
int i, j;
- if (avctx->codec->decode && s->picture.data[0])
- avctx->release_buffer(avctx, &s->picture);
- if (avctx->codec->decode && s->last_picture.data[0])
- avctx->release_buffer(avctx, &s->last_picture);
+ av_frame_unref(&s->last_picture);
for (j = 0; j < s->slice_count; j++) {
FFV1Context *fs = s->slice_context[j];
int flags;
int picture_number;
AVFrame picture, last_picture;
+
+ AVFrame *cur;
int plane_count;
int ac; // 1 = range coder <-> 0 = golomb rice
int ac_byte_count; // number of bytes used for AC coding
ps = get_symbol(c, state, 0);
if (ps == 1) {
- f->picture.interlaced_frame = 1;
- f->picture.top_field_first = 1;
+ f->cur->interlaced_frame = 1;
+ f->cur->top_field_first = 1;
} else if (ps == 2) {
- f->picture.interlaced_frame = 1;
- f->picture.top_field_first = 0;
+ f->cur->interlaced_frame = 1;
+ f->cur->top_field_first = 0;
} else if (ps == 3) {
- f->picture.interlaced_frame = 0;
+ f->cur->interlaced_frame = 0;
}
- f->picture.sample_aspect_ratio.num = get_symbol(c, state, 0);
- f->picture.sample_aspect_ratio.den = get_symbol(c, state, 0);
+ f->cur->sample_aspect_ratio.num = get_symbol(c, state, 0);
+ f->cur->sample_aspect_ratio.den = get_symbol(c, state, 0);
return 0;
}
const int ps = (av_pix_fmt_desc_get(c->pix_fmt)->flags & PIX_FMT_PLANAR)
? (c->bits_per_raw_sample > 8) + 1
: 4;
- AVFrame *const p = &f->picture;
+ AVFrame *const p = f->cur;
if (f->version > 2) {
if (decode_slice_header(f, fs) < 0) {
}
if ((ret = ffv1_init_slice_state(f, fs)) < 0)
return ret;
- if (f->picture.key_frame)
+ if (f->cur->key_frame)
ffv1_clear_slice_state(f, fs);
width = fs->slice_width;
height = fs->slice_height;
int buf_size = avpkt->size;
FFV1Context *f = avctx->priv_data;
RangeCoder *const c = &f->slice_context[0]->c;
- AVFrame *const p = &f->picture;
int i, ret;
uint8_t keystate = 128;
const uint8_t *buf_p;
+ AVFrame *const p = data;
- AVFrame *picture = data;
-
- /* release previously stored data */
- if (p->data[0])
- avctx->release_buffer(avctx, p);
+ f->cur = p;
ff_init_range_decoder(c, buf, buf_size);
ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
p->key_frame = 0;
}
- p->reference = 3; //for error concealment
- if ((ret = ff_get_buffer(avctx, p)) < 0) {
+ if ((ret = ff_get_buffer(avctx, p, AV_GET_BUFFER_FLAG_REF)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
ff_init_range_decoder(&fs->c, buf_p, v);
} else
fs->c.bytestream_end = (uint8_t *)(buf_p + v);
+
+ fs->cur = p;
}
avctx->execute(avctx, decode_slice, &f->slice_context[0], NULL,
for (j = 0; j < 4; j++) {
int sh = (j == 1 || j == 2) ? f->chroma_h_shift : 0;
int sv = (j == 1 || j == 2) ? f->chroma_v_shift : 0;
- dst[j] = f->picture.data[j] + f->picture.linesize[j] *
+ dst[j] = p->data[j] + p->linesize[j] *
(fs->slice_y >> sv) + (fs->slice_x >> sh);
src[j] = f->last_picture.data[j] +
f->last_picture.linesize[j] *
(fs->slice_y >> sv) + (fs->slice_x >> sh);
}
- av_image_copy(dst, f->picture.linesize, (const uint8_t **)src,
+ av_image_copy(dst, p->linesize, (const uint8_t **)src,
f->last_picture.linesize,
avctx->pix_fmt, fs->slice_width,
fs->slice_height);
f->picture_number++;
- *picture = *p;
- *got_frame = 1;
+ av_frame_unref(&f->last_picture);
+ if ((ret = av_frame_ref(&f->last_picture, p)) < 0)
+ return ret;
+ f->cur = NULL;
- FFSWAP(AVFrame, f->picture, f->last_picture);
+ *got_frame = 1;
return buf_size;
}
/* get output buffer */
frame->nb_samples = s->blocksize;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
#include "avcodec.h"
#include "bytestream.h"
#include "get_bits.h"
+#include "internal.h"
typedef struct BlockInfo {
uint8_t *pos;
{
int buf_size = avpkt->size;
FlashSVContext *s = avctx->priv_data;
- int h_blocks, v_blocks, h_part, v_part, i, j;
+ int h_blocks, v_blocks, h_part, v_part, i, j, ret;
GetBitContext gb;
/* no supplementary picture */
s->image_width, s->image_height, s->block_width, s->block_height,
h_blocks, v_blocks, h_part, v_part);
- s->frame.reference = 3;
- s->frame.buffer_hints = FF_BUFFER_HINTS_VALID |
- FF_BUFFER_HINTS_PRESERVE |
- FF_BUFFER_HINTS_REUSABLE;
- if (avctx->reget_buffer(avctx, &s->frame) < 0) {
+ if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
- return -1;
+ return ret;
}
/* loop over all block columns */
s->diff_height = cur_blk_height;
if (8 * size > get_bits_left(&gb)) {
- avctx->release_buffer(avctx, &s->frame);
- s->frame.data[0] = NULL;
+ av_frame_unref(&s->frame);
return AVERROR_INVALIDDATA;
}
memcpy(s->keyframe, s->frame.data[0], s->frame.linesize[0] * avctx->height);
}
+ if ((ret = av_frame_ref(data, &s->frame)) < 0)
+ return ret;
+
*got_frame = 1;
- *(AVFrame*)data = s->frame;
if ((get_bits_count(&gb) / 8) != buf_size)
av_log(avctx, AV_LOG_ERROR, "buffer not fully consumed (%d != %d)\n",
FlashSVContext *s = avctx->priv_data;
inflateEnd(&s->zstream);
/* release the frame if needed */
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+ av_frame_unref(&s->frame);
/* free the tmpblock */
av_free(s->tmpblock);
#include "libavutil/intreadwrite.h"
#include "avcodec.h"
#include "bytestream.h"
+#include "internal.h"
#include "mathops.h"
#define FLI_256_COLOR 4
bytestream2_init(&g2, buf, buf_size);
- s->frame.reference = 1;
- s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
- if ((ret = avctx->reget_buffer(avctx, &s->frame)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
s->new_palette = 0;
}
+ if ((ret = av_frame_ref(data, &s->frame)) < 0)
+ return ret;
+
*got_frame = 1;
- *(AVFrame*)data = s->frame;
return buf_size;
}
bytestream2_init(&g2, buf, buf_size);
- s->frame.reference = 1;
- s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
- if ((ret = avctx->reget_buffer(avctx, &s->frame)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
av_log(avctx, AV_LOG_ERROR, "Processed FLI chunk where chunk size = %d " \
"and final chunk ptr = %d\n", buf_size, bytestream2_tell(&g2));
+ if ((ret = av_frame_ref(data, &s->frame)) < 0)
+ return ret;
*got_frame = 1;
- *(AVFrame*)data = s->frame;
return buf_size;
}
{
FlicDecodeContext *s = avctx->priv_data;
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+ av_frame_unref(&s->frame);
return 0;
}
#include "huffman.h"
#include "bytestream.h"
#include "dsputil.h"
+#include "internal.h"
#define FPS_TAG MKTAG('F', 'P', 'S', 'x')
{
FrapsContext * const s = avctx->priv_data;
- avctx->coded_frame = &s->frame;
avctx->pix_fmt = AV_PIX_FMT_NONE; /* set in decode_frame */
s->avctx = avctx;
pix_fmt = version & 1 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_YUVJ420P;
if (avctx->pix_fmt != pix_fmt && f->data[0]) {
- avctx->release_buffer(avctx, f);
+ av_frame_unref(f);
}
avctx->pix_fmt = pix_fmt;
return AVERROR_INVALIDDATA;
}
- f->reference = 1;
- f->buffer_hints = FF_BUFFER_HINTS_VALID |
- FF_BUFFER_HINTS_PRESERVE |
- FF_BUFFER_HINTS_REUSABLE;
- if ((ret = avctx->reget_buffer(avctx, f)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, f)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
return AVERROR_INVALIDDATA;
}
- f->reference = 1;
- f->buffer_hints = FF_BUFFER_HINTS_VALID |
- FF_BUFFER_HINTS_PRESERVE |
- FF_BUFFER_HINTS_REUSABLE;
- if ((ret = avctx->reget_buffer(avctx, f)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, f)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
* Fraps v4 is virtually the same
*/
planes = 3;
- f->reference = 1;
- f->buffer_hints = FF_BUFFER_HINTS_VALID |
- FF_BUFFER_HINTS_PRESERVE |
- FF_BUFFER_HINTS_REUSABLE;
- if ((ret = avctx->reget_buffer(avctx, f)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, f)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
case 5:
/* Virtually the same as version 4, but is for RGB24 */
planes = 3;
- f->reference = 1;
- f->buffer_hints = FF_BUFFER_HINTS_VALID |
- FF_BUFFER_HINTS_PRESERVE |
- FF_BUFFER_HINTS_REUSABLE;
- if ((ret = avctx->reget_buffer(avctx, f)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, f)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
break;
}
- *frame = *f;
+ if ((ret = av_frame_ref(frame, f)) < 0)
+ return ret;
*got_frame = 1;
return buf_size;
{
FrapsContext *s = (FrapsContext*)avctx->priv_data;
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+ av_frame_unref(&s->frame);
av_freep(&s->tmpbuf);
return 0;
}
avctx->pix_fmt = AV_PIX_FMT_UYVY422;
- avctx->coded_frame = avcodec_alloc_frame();
- if (!avctx->coded_frame)
- return AVERROR(ENOMEM);
-
return 0;
}
AVPacket *avpkt)
{
int field, ret;
- AVFrame *pic = avctx->coded_frame;
+ AVFrame *pic = data;
const uint8_t *buf = avpkt->data;
const uint8_t *buf_end = buf + avpkt->size;
- if (pic->data[0])
- avctx->release_buffer(avctx, pic);
-
if (avpkt->size < avctx->width * 2 * avctx->height + 4 + 2*8) {
av_log(avctx, AV_LOG_ERROR, "Packet is too small.\n");
return AVERROR_INVALIDDATA;
return AVERROR_INVALIDDATA;
}
- pic->reference = 0;
- if ((ret = ff_get_buffer(avctx, pic)) < 0) {
+ if ((ret = ff_get_buffer(avctx, pic, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
}
*got_frame = 1;
- *(AVFrame*)data = *pic;
return avpkt->size;
}
-static av_cold int decode_close(AVCodecContext *avctx)
-{
- AVFrame *pic = avctx->coded_frame;
- if (pic->data[0])
- avctx->release_buffer(avctx, pic);
- av_freep(&avctx->coded_frame);
-
- return 0;
-}
-
AVCodec ff_frwu_decoder = {
.name = "frwu",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_FRWU,
.init = decode_init,
- .close = decode_close,
.decode = decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Forward Uncompressed"),
/* get output buffer */
frame->nb_samples = avpkt->size * 2;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
}
frame->nb_samples = FRAME_LEN;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
/* get output buffer */
frame->nb_samples = out_samples;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
#define GCE_DISPOSAL_RESTORE 3
typedef struct GifState {
- AVFrame picture;
int screen_width;
int screen_height;
int bits_per_pixel;
static const uint8_t gif87a_sig[6] = "GIF87a";
static const uint8_t gif89a_sig[6] = "GIF89a";
-static int gif_read_image(GifState *s)
+static int gif_read_image(GifState *s, AVFrame *frame)
{
int left, top, width, height, bits_per_pixel, code_size, flags;
int is_interleaved, has_local_palette, y, pass, y1, linesize, n, i;
s->bytestream_end - s->bytestream, FF_LZW_GIF);
/* read all the image */
- linesize = s->picture.linesize[0];
- ptr1 = s->picture.data[0] + top * linesize + left;
+ linesize = frame->linesize[0];
+ ptr1 = frame->data[0] + top * linesize + left;
ptr = ptr1;
pass = 0;
y1 = 0;
return 0;
}
-static int gif_parse_next_image(GifState *s)
+static int gif_parse_next_image(GifState *s, AVFrame *frame)
{
while (s->bytestream < s->bytestream_end) {
int code = bytestream_get_byte(&s->bytestream);
switch (code) {
case ',':
- return gif_read_image(s);
+ return gif_read_image(s, frame);
case '!':
if ((ret = gif_read_extension(s)) < 0)
return ret;
s->avctx = avctx;
- avcodec_get_frame_defaults(&s->picture);
- avctx->coded_frame= &s->picture;
- s->picture.data[0] = NULL;
ff_lzw_decode_open(&s->lzw);
return 0;
}
return ret;
avcodec_set_dimensions(avctx, s->screen_width, s->screen_height);
- if (s->picture.data[0])
- avctx->release_buffer(avctx, &s->picture);
- if ((ret = ff_get_buffer(avctx, &s->picture)) < 0) {
+ if ((ret = ff_get_buffer(avctx, picture, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
- s->image_palette = (uint32_t *)s->picture.data[1];
- ret = gif_parse_next_image(s);
+ s->image_palette = (uint32_t *)picture->data[1];
+ ret = gif_parse_next_image(s, picture);
if (ret < 0)
return ret;
- *picture = s->picture;
*got_frame = 1;
return s->bytestream - buf;
}
GifState *s = avctx->priv_data;
ff_lzw_decode_close(&s->lzw);
- if(s->picture.data[0])
- avctx->release_buffer(avctx, &s->picture);
return 0;
}
/* get output buffer */
frame->nb_samples = avctx->frame_size;
- if ((res = ff_get_buffer(avctx, frame)) < 0) {
+ if ((res = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return res;
}
s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16;
- s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
+ s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
s->mv[0][0][0] = 0;
s->mv[0][0][1] = 0;
s->mb_skipped = 1;
}
if(s->mb_intra){
- s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA;
+ s->current_picture.mb_type[xy] = MB_TYPE_INTRA;
goto intra;
}
//set motion vectors
s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16;
- s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
+ s->current_picture.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
s->mv[0][0][0] = h->current_mv_x * 2;//gets divided by 2 in motion compensation
s->mv[0][0][1] = h->current_mv_y * 2;
assert(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type);
assert(s->current_picture.f.pict_type == s->pict_type);
- *pict = s->current_picture_ptr->f;
- ff_print_debug_info(s, pict);
+ if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
+ return ret;
+ ff_print_debug_info(s, s->current_picture_ptr);
*got_frame = 1;
const int wrap = s->b8_stride;
const int xy = s->block_index[0];
- s->current_picture.f.mbskip_table[mb_xy] = s->mb_skipped;
+ s->current_picture.mbskip_table[mb_xy] = s->mb_skipped;
if(s->mv_type != MV_TYPE_8X8){
int motion_x, motion_y;
s->p_field_mv_table[i][0][mb_xy][0]= s->mv[0][i][0];
s->p_field_mv_table[i][0][mb_xy][1]= s->mv[0][i][1];
}
- s->current_picture.f.ref_index[0][4*mb_xy ] =
- s->current_picture.f.ref_index[0][4*mb_xy + 1] = s->field_select[0][0];
- s->current_picture.f.ref_index[0][4*mb_xy + 2] =
- s->current_picture.f.ref_index[0][4*mb_xy + 3] = s->field_select[0][1];
+ s->current_picture.ref_index[0][4*mb_xy ] =
+ s->current_picture.ref_index[0][4*mb_xy + 1] = s->field_select[0][0];
+ s->current_picture.ref_index[0][4*mb_xy + 2] =
+ s->current_picture.ref_index[0][4*mb_xy + 3] = s->field_select[0][1];
}
/* no update if 8X8 because it has been done during parsing */
- s->current_picture.f.motion_val[0][xy][0] = motion_x;
- s->current_picture.f.motion_val[0][xy][1] = motion_y;
- s->current_picture.f.motion_val[0][xy + 1][0] = motion_x;
- s->current_picture.f.motion_val[0][xy + 1][1] = motion_y;
- s->current_picture.f.motion_val[0][xy + wrap][0] = motion_x;
- s->current_picture.f.motion_val[0][xy + wrap][1] = motion_y;
- s->current_picture.f.motion_val[0][xy + 1 + wrap][0] = motion_x;
- s->current_picture.f.motion_val[0][xy + 1 + wrap][1] = motion_y;
+ s->current_picture.motion_val[0][xy][0] = motion_x;
+ s->current_picture.motion_val[0][xy][1] = motion_y;
+ s->current_picture.motion_val[0][xy + 1][0] = motion_x;
+ s->current_picture.motion_val[0][xy + 1][1] = motion_y;
+ s->current_picture.motion_val[0][xy + wrap][0] = motion_x;
+ s->current_picture.motion_val[0][xy + wrap][1] = motion_y;
+ s->current_picture.motion_val[0][xy + 1 + wrap][0] = motion_x;
+ s->current_picture.motion_val[0][xy + 1 + wrap][1] = motion_y;
}
if(s->encoding){ //FIXME encoding MUST be cleaned up
if (s->mv_type == MV_TYPE_8X8)
- s->current_picture.f.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_8x8;
+ s->current_picture.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_8x8;
else if(s->mb_intra)
- s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA;
+ s->current_picture.mb_type[mb_xy] = MB_TYPE_INTRA;
else
- s->current_picture.f.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_16x16;
+ s->current_picture.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_16x16;
}
}
Diag Top
Left Center
*/
- if (!IS_SKIP(s->current_picture.f.mb_type[xy])) {
+ if (!IS_SKIP(s->current_picture.mb_type[xy])) {
qp_c= s->qscale;
s->dsp.h263_v_loop_filter(dest_y+8*linesize , linesize, qp_c);
s->dsp.h263_v_loop_filter(dest_y+8*linesize+8, linesize, qp_c);
if(s->mb_y){
int qp_dt, qp_tt, qp_tc;
- if (IS_SKIP(s->current_picture.f.mb_type[xy - s->mb_stride]))
+ if (IS_SKIP(s->current_picture.mb_type[xy - s->mb_stride]))
qp_tt=0;
else
- qp_tt = s->current_picture.f.qscale_table[xy - s->mb_stride];
+ qp_tt = s->current_picture.qscale_table[xy - s->mb_stride];
if(qp_c)
qp_tc= qp_c;
s->dsp.h263_h_loop_filter(dest_y-8*linesize+8 , linesize, qp_tt);
if(s->mb_x){
- if (qp_tt || IS_SKIP(s->current_picture.f.mb_type[xy - 1 - s->mb_stride]))
+ if (qp_tt || IS_SKIP(s->current_picture.mb_type[xy - 1 - s->mb_stride]))
qp_dt= qp_tt;
else
- qp_dt = s->current_picture.f.qscale_table[xy - 1 - s->mb_stride];
+ qp_dt = s->current_picture.qscale_table[xy - 1 - s->mb_stride];
if(qp_dt){
const int chroma_qp= s->chroma_qscale_table[qp_dt];
if(s->mb_x){
int qp_lc;
- if (qp_c || IS_SKIP(s->current_picture.f.mb_type[xy - 1]))
+ if (qp_c || IS_SKIP(s->current_picture.mb_type[xy - 1]))
qp_lc= qp_c;
else
- qp_lc = s->current_picture.f.qscale_table[xy - 1];
+ qp_lc = s->current_picture.qscale_table[xy - 1];
if(qp_lc){
s->dsp.h263_h_loop_filter(dest_y, linesize, qp_lc);
static const int off[4]= {2, 1, 1, -1};
wrap = s->b8_stride;
- mot_val = s->current_picture.f.motion_val[dir] + s->block_index[block];
+ mot_val = s->current_picture.motion_val[dir] + s->block_index[block];
A = mot_val[ - 1];
/* special case for first (slice) line */
if (buf_size == 0) {
/* special case for last picture */
if (s->low_delay==0 && s->next_picture_ptr) {
- *pict = s->next_picture_ptr->f;
+ if ((ret = av_frame_ref(pict, &s->next_picture_ptr->f)) < 0)
+ return ret;
s->next_picture_ptr= NULL;
*got_frame = 1;
assert(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type);
assert(s->current_picture.f.pict_type == s->pict_type);
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
- *pict = s->current_picture_ptr->f;
+ if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
+ return ret;
+ ff_print_debug_info(s, s->current_picture_ptr);
} else if (s->last_picture_ptr != NULL) {
- *pict = s->last_picture_ptr->f;
+ if ((ret = av_frame_ref(pict, &s->last_picture_ptr->f)) < 0)
+ return ret;
+ ff_print_debug_info(s, s->last_picture_ptr);
}
if(s->last_picture_ptr || s->low_delay){
*got_frame = 1;
- ff_print_debug_info(s, pict);
}
#ifdef PRINT_FRAME_TIME
* practice then correct remapping should be added. */
if (ref >= h->ref_count[0])
ref = 0;
- fill_rectangle(&h->cur_pic.f.ref_index[0][4 * h->mb_xy],
+ fill_rectangle(&h->cur_pic.ref_index[0][4 * h->mb_xy],
2, 2, 2, ref, 1);
fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
fill_rectangle(h->mv_cache[0][scan8[0]], 4, 4, 8,
}
}
-static void free_frame_buffer(H264Context *h, Picture *pic)
-{
- ff_thread_release_buffer(h->avctx, &pic->f);
- av_freep(&pic->f.hwaccel_picture_private);
-}
-
-static void free_picture(H264Context *h, Picture *pic)
+static void unref_picture(H264Context *h, Picture *pic)
{
+ int off = offsetof(Picture, tf) + sizeof(pic->tf);
int i;
- if (pic->f.data[0])
- free_frame_buffer(h, pic);
+ if (!pic->f.data[0])
+ return;
+
+ ff_thread_release_buffer(h->avctx, &pic->tf);
+ av_buffer_unref(&pic->hwaccel_priv_buf);
- av_freep(&pic->qscale_table_base);
- pic->f.qscale_table = NULL;
- av_freep(&pic->mb_type_base);
- pic->f.mb_type = NULL;
+ av_buffer_unref(&pic->qscale_table_buf);
+ av_buffer_unref(&pic->mb_type_buf);
for (i = 0; i < 2; i++) {
- av_freep(&pic->motion_val_base[i]);
- av_freep(&pic->f.ref_index[i]);
- pic->f.motion_val[i] = NULL;
+ av_buffer_unref(&pic->motion_val_buf[i]);
+ av_buffer_unref(&pic->ref_index_buf[i]);
}
+
+ memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
}
static void release_unused_pictures(H264Context *h, int remove_current)
int i;
/* release non reference frames */
- for (i = 0; i < h->picture_count; i++) {
- if (h->DPB[i].f.data[0] && !h->DPB[i].f.reference &&
- (!h->DPB[i].owner2 || h->DPB[i].owner2 == h) &&
+ for (i = 0; i < MAX_PICTURE_COUNT; i++) {
+ if (h->DPB[i].f.data[0] && !h->DPB[i].reference &&
(remove_current || &h->DPB[i] != h->cur_pic_ptr)) {
- free_frame_buffer(h, &h->DPB[i]);
+ unref_picture(h, &h->DPB[i]);
}
}
}
+static int ref_picture(H264Context *h, Picture *dst, Picture *src)
+{
+ int ret, i;
+
+ av_assert0(!dst->f.buf[0]);
+ av_assert0(src->f.buf[0]);
+
+ src->tf.f = &src->f;
+ dst->tf.f = &dst->f;
+ ret = ff_thread_ref_frame(&dst->tf, &src->tf);
+ if (ret < 0)
+ goto fail;
+
+
+ dst->qscale_table_buf = av_buffer_ref(src->qscale_table_buf);
+ dst->mb_type_buf = av_buffer_ref(src->mb_type_buf);
+ if (!dst->qscale_table_buf || !dst->mb_type_buf)
+ goto fail;
+ dst->qscale_table = src->qscale_table;
+ dst->mb_type = src->mb_type;
+
+ for (i = 0; i < 2; i ++) {
+ dst->motion_val_buf[i] = av_buffer_ref(src->motion_val_buf[i]);
+ dst->ref_index_buf[i] = av_buffer_ref(src->ref_index_buf[i]);
+ if (!dst->motion_val_buf[i] || !dst->ref_index_buf[i])
+ goto fail;
+ dst->motion_val[i] = src->motion_val[i];
+ dst->ref_index[i] = src->ref_index[i];
+ }
+
+ if (src->hwaccel_picture_private) {
+ dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
+ if (!dst->hwaccel_priv_buf)
+ goto fail;
+ dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
+ }
+
+ for (i = 0; i < 2; i++)
+ dst->field_poc[i] = src->field_poc[i];
+
+ memcpy(dst->ref_poc, src->ref_poc, sizeof(src->ref_poc));
+ memcpy(dst->ref_count, src->ref_count, sizeof(src->ref_count));
+
+ dst->poc = src->poc;
+ dst->frame_num = src->frame_num;
+ dst->mmco_reset = src->mmco_reset;
+ dst->pic_id = src->pic_id;
+ dst->long_ref = src->long_ref;
+ dst->mbaff = src->mbaff;
+ dst->field_picture = src->field_picture;
+ dst->needs_realloc = src->needs_realloc;
+ dst->reference = src->reference;
+
+ return 0;
+fail:
+ unref_picture(h, dst);
+ return ret;
+}
+
+
static int alloc_scratch_buffers(H264Context *h, int linesize)
{
int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
return 0;
}
-static int alloc_picture(H264Context *h, Picture *pic)
+static int init_table_pools(H264Context *h)
{
const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
const int mb_array_size = h->mb_stride * h->mb_height;
const int b4_stride = h->mb_width * 4 + 1;
const int b4_array_size = b4_stride * h->mb_height * 4;
+
+ h->qscale_table_pool = av_buffer_pool_init(big_mb_num + h->mb_stride,
+ av_buffer_allocz);
+ h->mb_type_pool = av_buffer_pool_init((big_mb_num + h->mb_stride) *
+ sizeof(uint32_t), av_buffer_allocz);
+ h->motion_val_pool = av_buffer_pool_init(2 * (b4_array_size + 4) *
+ sizeof(int16_t), av_buffer_allocz);
+ h->ref_index_pool = av_buffer_pool_init(4 * mb_array_size, av_buffer_allocz);
+
+ if (!h->qscale_table_pool || !h->mb_type_pool || !h->motion_val_pool ||
+ !h->ref_index_pool) {
+ av_buffer_pool_uninit(&h->qscale_table_pool);
+ av_buffer_pool_uninit(&h->mb_type_pool);
+ av_buffer_pool_uninit(&h->motion_val_pool);
+ av_buffer_pool_uninit(&h->ref_index_pool);
+ return AVERROR(ENOMEM);
+ }
+
+ return 0;
+}
+
+static int alloc_picture(H264Context *h, Picture *pic)
+{
int i, ret = 0;
av_assert0(!pic->f.data[0]);
if (h->avctx->hwaccel) {
const AVHWAccel *hwaccel = h->avctx->hwaccel;
- av_assert0(!pic->f.hwaccel_picture_private);
+ av_assert0(!pic->hwaccel_picture_private);
if (hwaccel->priv_data_size) {
- pic->f.hwaccel_picture_private = av_mallocz(hwaccel->priv_data_size);
- if (!pic->f.hwaccel_picture_private)
+ pic->hwaccel_priv_buf = av_buffer_allocz(hwaccel->priv_data_size);
+ if (!pic->hwaccel_priv_buf)
return AVERROR(ENOMEM);
+ pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
}
}
- ret = ff_thread_get_buffer(h->avctx, &pic->f);
+ pic->tf.f = &pic->f;
+ ret = ff_thread_get_buffer(h->avctx, &pic->tf, pic->reference ?
+ AV_GET_BUFFER_FLAG_REF : 0);
if (ret < 0)
goto fail;
h->linesize = pic->f.linesize[0];
h->uvlinesize = pic->f.linesize[1];
- if (pic->f.qscale_table == NULL) {
- FF_ALLOCZ_OR_GOTO(h->avctx, pic->qscale_table_base,
- (big_mb_num + h->mb_stride) * sizeof(uint8_t),
- fail)
- FF_ALLOCZ_OR_GOTO(h->avctx, pic->mb_type_base,
- (big_mb_num + h->mb_stride) * sizeof(uint32_t),
- fail)
- pic->f.mb_type = pic->mb_type_base + 2 * h->mb_stride + 1;
- pic->f.qscale_table = pic->qscale_table_base + 2 * h->mb_stride + 1;
+ if (!h->qscale_table_pool) {
+ ret = init_table_pools(h);
+ if (ret < 0)
+ goto fail;
+ }
- for (i = 0; i < 2; i++) {
- FF_ALLOCZ_OR_GOTO(h->avctx, pic->motion_val_base[i],
- 2 * (b4_array_size + 4) * sizeof(int16_t),
- fail)
- pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
- FF_ALLOCZ_OR_GOTO(h->avctx, pic->f.ref_index[i],
- 4 * mb_array_size * sizeof(uint8_t), fail)
- }
- pic->f.motion_subsample_log2 = 2;
+ pic->qscale_table_buf = av_buffer_pool_get(h->qscale_table_pool);
+ pic->mb_type_buf = av_buffer_pool_get(h->mb_type_pool);
+ if (!pic->qscale_table_buf || !pic->mb_type_buf)
+ goto fail;
- pic->f.qstride = h->mb_stride;
- }
+ pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
+ pic->qscale_table = pic->qscale_table_buf->data + 2 * h->mb_stride + 1;
+
+ for (i = 0; i < 2; i++) {
+ pic->motion_val_buf[i] = av_buffer_pool_get(h->motion_val_pool);
+ pic->ref_index_buf[i] = av_buffer_pool_get(h->ref_index_pool);
+ if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
+ goto fail;
- pic->owner2 = h;
+ pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
+ pic->ref_index[i] = pic->ref_index_buf[i]->data;
+ }
+ pic->f.motion_subsample_log2 = 2;
return 0;
fail:
- free_frame_buffer(h, pic);
+ unref_picture(h, pic);
return (ret < 0) ? ret : AVERROR(ENOMEM);
}
{
if (pic->f.data[0] == NULL)
return 1;
- if (pic->needs_realloc && !(pic->f.reference & DELAYED_PIC_REF))
- if (!pic->owner2 || pic->owner2 == h)
- return 1;
+ if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
+ return 1;
return 0;
}
{
int i;
- for (i = h->picture_range_start; i < h->picture_range_end; i++) {
+ for (i = 0; i < MAX_PICTURE_COUNT; i++) {
if (pic_is_unused(h, &h->DPB[i]))
break;
}
- if (i == h->picture_range_end)
+ if (i == MAX_PICTURE_COUNT)
return AVERROR_INVALIDDATA;
if (h->DPB[i].needs_realloc) {
h->DPB[i].needs_realloc = 0;
- free_picture(h, &h->DPB[i]);
- avcodec_get_frame_defaults(&h->DPB[i].f);
+ unref_picture(h, &h->DPB[i]);
}
return i;
// Error resilience puts the current picture in the ref list.
// Don't try to wait on these as it will cause a deadlock.
// Fields can wait on each other, though.
- if (ref->f.thread_opaque != h->cur_pic.f.thread_opaque ||
- (ref->f.reference & 3) != h->picture_structure) {
+ if (ref->tf.progress->data != h->cur_pic.tf.progress->data ||
+ (ref->reference & 3) != h->picture_structure) {
my = get_lowest_part_list_y(h, ref, n, height, y_offset, 0);
if (refs[0][ref_n] < 0)
nrefs[0] += 1;
int ref_n = h->ref_cache[1][scan8[n]];
Picture *ref = &h->ref_list[1][ref_n];
- if (ref->f.thread_opaque != h->cur_pic.f.thread_opaque ||
- (ref->f.reference & 3) != h->picture_structure) {
+ if (ref->tf.progress->data != h->cur_pic.tf.progress->data ||
+ (ref->reference & 3) != h->picture_structure) {
my = get_lowest_part_list_y(h, ref, n, height, y_offset, 1);
if (refs[1][ref_n] < 0)
nrefs[1] += 1;
static void await_references(H264Context *h)
{
const int mb_xy = h->mb_xy;
- const int mb_type = h->cur_pic.f.mb_type[mb_xy];
+ const int mb_type = h->cur_pic.mb_type[mb_xy];
int refs[2][48];
int nrefs[2] = { 0 };
int ref, list;
int row = refs[list][ref];
if (row >= 0) {
Picture *ref_pic = &h->ref_list[list][ref];
- int ref_field = ref_pic->f.reference - 1;
+ int ref_field = ref_pic->reference - 1;
int ref_field_picture = ref_pic->field_picture;
int pic_height = 16 * h->mb_height >> ref_field_picture;
nrefs[list]--;
if (!FIELD_PICTURE && ref_field_picture) { // frame referencing two fields
- ff_thread_await_progress(&ref_pic->f,
+ ff_thread_await_progress(&ref_pic->tf,
FFMIN((row >> 1) - !(row & 1),
pic_height - 1),
1);
- ff_thread_await_progress(&ref_pic->f,
+ ff_thread_await_progress(&ref_pic->tf,
FFMIN((row >> 1), pic_height - 1),
0);
} else if (FIELD_PICTURE && !ref_field_picture) { // field referencing one field of a frame
- ff_thread_await_progress(&ref_pic->f,
+ ff_thread_await_progress(&ref_pic->tf,
FFMIN(row * 2 + ref_field,
pic_height - 1),
0);
} else if (FIELD_PICTURE) {
- ff_thread_await_progress(&ref_pic->f,
+ ff_thread_await_progress(&ref_pic->tf,
FFMIN(row, pic_height - 1),
ref_field);
} else {
- ff_thread_await_progress(&ref_pic->f,
+ ff_thread_await_progress(&ref_pic->tf,
FFMIN(row, pic_height - 1),
0);
}
ysh = 3 - (chroma_idc == 2 /* yuv422 */);
if (chroma_idc == 1 /* yuv420 */ && MB_FIELD) {
// chroma offset when predicting from a field of opposite parity
- my += 2 * ((h->mb_y & 1) - (pic->f.reference - 1));
+ my += 2 * ((h->mb_y & 1) - (pic->reference - 1));
emu |= (my >> 3) < 0 || (my >> 3) + 8 >= (pic_height >> 1);
}
av_freep(&h->mb2b_xy);
av_freep(&h->mb2br_xy);
- if (free_rbsp) {
- for (i = 0; i < h->picture_count && !h->avctx->internal->is_copy; i++)
- free_picture(h, &h->DPB[i]);
+ av_buffer_pool_uninit(&h->qscale_table_pool);
+ av_buffer_pool_uninit(&h->mb_type_pool);
+ av_buffer_pool_uninit(&h->motion_val_pool);
+ av_buffer_pool_uninit(&h->ref_index_pool);
+
+ if (free_rbsp && h->DPB) {
+ for (i = 0; i < MAX_PICTURE_COUNT; i++)
+ unref_picture(h, &h->DPB[i]);
av_freep(&h->DPB);
- h->picture_count = 0;
} else if (h->DPB) {
- for (i = 0; i < h->picture_count; i++)
+ for (i = 0; i < MAX_PICTURE_COUNT; i++)
h->DPB[i].needs_realloc = 1;
}
init_dequant_tables(h);
if (!h->DPB) {
- h->picture_count = MAX_PICTURE_COUNT * FFMAX(1, h->avctx->thread_count);
- h->DPB = av_mallocz_array(h->picture_count, sizeof(*h->DPB));
+ h->DPB = av_mallocz_array(MAX_PICTURE_COUNT, sizeof(*h->DPB));
if (!h->DPB)
return AVERROR(ENOMEM);
- for (i = 0; i < h->picture_count; i++)
+ for (i = 0; i < MAX_PICTURE_COUNT; i++)
avcodec_get_frame_defaults(&h->DPB[i].f);
avcodec_get_frame_defaults(&h->cur_pic.f);
}
common_init(h);
h->picture_structure = PICT_FRAME;
- h->picture_range_start = 0;
- h->picture_range_end = MAX_PICTURE_COUNT;
h->slice_context_count = 1;
h->workaround_bugs = avctx->workaround_bugs;
h->flags = avctx->flags;
h->low_delay = 0;
}
+ avctx->internal->allocate_progress = 1;
+
return 0;
}
#undef REBASE_PICTURE
#define REBASE_PICTURE(pic, new_ctx, old_ctx) \
((pic && pic >= old_ctx->DPB && \
- pic < old_ctx->DPB + old_ctx->picture_count) ? \
+ pic < old_ctx->DPB + MAX_PICTURE_COUNT) ? \
&new_ctx->DPB[pic - old_ctx->DPB] : NULL)
static void copy_picture_range(Picture **to, Picture **from, int count,
for (i = 0; i < count; i++) {
assert((IN_RANGE(from[i], old_base, sizeof(*old_base)) ||
IN_RANGE(from[i], old_base->DPB,
- sizeof(Picture) * old_base->picture_count) ||
+ sizeof(Picture) * MAX_PICTURE_COUNT) ||
!from[i]));
to[i] = REBASE_PICTURE(from[i], new_base, old_base);
}
H264Context *h = dst->priv_data, *h1 = src->priv_data;
int inited = h->context_initialized, err = 0;
int context_reinitialized = 0;
- int i;
+ int i, ret;
if (dst == src || !h1->context_initialized)
return 0;
memset(&h->me, 0, sizeof(h->me));
h->context_initialized = 0;
- h->picture_range_start += MAX_PICTURE_COUNT;
- h->picture_range_end += MAX_PICTURE_COUNT;
+ memset(&h->cur_pic, 0, sizeof(h->cur_pic));
+ avcodec_get_frame_defaults(&h->cur_pic.f);
+ h->cur_pic.tf.f = &h->cur_pic.f;
h->avctx = dst;
h->DPB = NULL;
- h->cur_pic.f.extended_data = h->cur_pic.f.data;
+ h->qscale_table_pool = NULL;
+ h->mb_type_pool = NULL;
+ h->ref_index_pool = NULL;
+ h->motion_val_pool = NULL;
if (ff_h264_alloc_tables(h) < 0) {
av_log(dst, AV_LOG_ERROR, "Could not allocate memory for h264\n");
h->data_partitioning = h1->data_partitioning;
h->low_delay = h1->low_delay;
- memcpy(h->DPB, h1->DPB, h1->picture_count * sizeof(*h1->DPB));
-
- // reset s->picture[].f.extended_data to s->picture[].f.data
- for (i = 0; i < h->picture_count; i++)
- h->DPB[i].f.extended_data = h->DPB[i].f.data;
+ for (i = 0; i < MAX_PICTURE_COUNT; i++) {
+ unref_picture(h, &h->DPB[i]);
+ if (h1->DPB[i].f.data[0] &&
+ (ret = ref_picture(h, &h->DPB[i], &h1->DPB[i])) < 0)
+ return ret;
+ }
h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1);
- h->cur_pic = h1->cur_pic;
- h->cur_pic.f.extended_data = h->cur_pic.f.data;
+ unref_picture(h, &h->cur_pic);
+ if ((ret = ref_picture(h, &h->cur_pic, &h1->cur_pic)) < 0)
+ return ret;
h->workaround_bugs = h1->workaround_bugs;
h->low_delay = h1->low_delay;
}
pic = &h->DPB[i];
- pic->f.reference = h->droppable ? 0 : h->picture_structure;
+ pic->reference = h->droppable ? 0 : h->picture_structure;
pic->f.coded_picture_number = h->coded_picture_number++;
pic->field_picture = h->picture_structure != PICT_FRAME;
/*
return ret;
h->cur_pic_ptr = pic;
- h->cur_pic = *h->cur_pic_ptr;
- h->cur_pic.f.extended_data = h->cur_pic.f.data;
+ unref_picture(h, &h->cur_pic);
+ if ((ret = ref_picture(h, &h->cur_pic, h->cur_pic_ptr)) < 0)
+ return ret;
ff_er_frame_start(&h->er);
* get released even with set reference, besides SVQ3 and others do not
* mark frames as reference later "naturally". */
if (h->avctx->codec_id != AV_CODEC_ID_SVQ3)
- h->cur_pic_ptr->f.reference = 0;
+ h->cur_pic_ptr->reference = 0;
h->cur_pic_ptr->field_poc[0] = h->cur_pic_ptr->field_poc[1] = INT_MAX;
int i, pics, out_of_order, out_idx;
int invalid = 0, cnt = 0;
- h->cur_pic_ptr->f.qscale_type = FF_QSCALE_TYPE_H264;
h->cur_pic_ptr->f.pict_type = h->pict_type;
if (h->next_output_pic)
assert(pics <= MAX_DELAYED_PIC_COUNT);
h->delayed_pic[pics++] = cur;
- if (cur->f.reference == 0)
- cur->f.reference = DELAYED_PIC_REF;
+ if (cur->reference == 0)
+ cur->reference = DELAYED_PIC_REF;
/* Frame reordering. This code takes pictures from coding order and sorts
* them by their incremental POC value into display order. It supports POC
}
if (pics > h->avctx->has_b_frames) {
- out->f.reference &= ~DELAYED_PIC_REF;
+ out->reference &= ~DELAYED_PIC_REF;
// for frame threading, the owner must be the second field's thread or
// else the first thread can release the picture and reuse it unsafely
- out->owner2 = h;
for (i = out_idx; h->delayed_pic[i]; i++)
h->delayed_pic[i] = h->delayed_pic[i + 1];
}
void ff_h264_hl_decode_mb(H264Context *h)
{
const int mb_xy = h->mb_xy;
- const int mb_type = h->cur_pic.f.mb_type[mb_xy];
+ const int mb_type = h->cur_pic.mb_type[mb_xy];
int is_complex = CONFIG_SMALL || h->is_complex || IS_INTRA_PCM(mb_type) || h->qscale == 0;
if (CHROMA444) {
h->prev_interlaced_frame = 1;
idr(h);
if (h->cur_pic_ptr)
- h->cur_pic_ptr->f.reference = 0;
+ h->cur_pic_ptr->reference = 0;
h->first_field = 0;
memset(h->ref_list[0], 0, sizeof(h->ref_list[0]));
memset(h->ref_list[1], 0, sizeof(h->ref_list[1]));
for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) {
if (h->delayed_pic[i])
- h->delayed_pic[i]->f.reference = 0;
+ h->delayed_pic[i]->reference = 0;
h->delayed_pic[i] = NULL;
}
flush_change(h);
- for (i = 0; i < h->picture_count; i++) {
- if (h->DPB[i].f.data[0])
- free_frame_buffer(h, &h->DPB[i]);
- }
+ for (i = 0; i < MAX_PICTURE_COUNT; i++)
+ unref_picture(h, &h->DPB[i]);
h->cur_pic_ptr = NULL;
+ unref_picture(h, &h->cur_pic);
h->mb_x = h->mb_y = 0;
h->mb_y = 0;
if (!in_setup && !h->droppable)
- ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX,
+ ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
h->picture_structure == PICT_BOTTOM_FIELD);
if (CONFIG_H264_VDPAU_DECODER &&
h0->current_slice = 0;
if (!h0->first_field) {
- if (h->cur_pic_ptr && !h->droppable &&
- h->cur_pic_ptr->owner2 == h) {
- ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX,
+ if (h->cur_pic_ptr && !h->droppable) {
+ ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
h->picture_structure == PICT_BOTTOM_FIELD);
}
h->cur_pic_ptr = NULL;
if (h0->first_field) {
assert(h0->cur_pic_ptr);
assert(h0->cur_pic_ptr->f.data[0]);
- assert(h0->cur_pic_ptr->f.reference != DELAYED_PIC_REF);
-
- /* Mark old field/frame as completed */
- if (!last_pic_droppable && h0->cur_pic_ptr->owner2 == h0) {
- ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX,
- last_pic_structure == PICT_BOTTOM_FIELD);
- }
+ assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF);
/* figure out if we have a complementary field pair */
if (!FIELD_PICTURE || h->picture_structure == last_pic_structure) {
/* Previous field is unmatched. Don't display it, but let it
* remain for reference if marked as such. */
if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
- ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX,
+ ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
last_pic_structure == PICT_TOP_FIELD);
}
} else {
&nbs