6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
};
- static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int left){
+ static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst,
- uint8_t *src, int w, int left)
++ const uint8_t *src, int w, int left)
+ {
int i;
- if(w<32){
- for(i=0; i<w; i++){
- const int temp= src[i];
- dst[i]= temp - left;
- left= temp;
+ if (w < 32) {
+ for (i = 0; i < w; i++) {
+ const int temp = src[i];
+ dst[i] = temp - left;
+ left = temp;
}
return left;
- }else{
- for(i=0; i<16; i++){
- const int temp= src[i];
- dst[i]= temp - left;
- left= temp;
+ } else {
+ for (i = 0; i < 16; i++) {
+ const int temp = src[i];
+ dst[i] = temp - left;
+ left = temp;
}
- s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
+ s->dsp.diff_bytes(dst + 16, src + 16, src + 15, w - 16);
return src[w-1];
}
}
- static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue, int *alpha){
+ static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst,
- uint8_t *src, int w,
- int *red, int *green, int *blue)
++ const uint8_t *src, int w,
++ int *red, int *green, int *blue, int *alpha)
+ {
int i;
- int r,g,b;
+ int r,g,b,a;
- r= *red;
- g= *green;
- b= *blue;
- a= *alpha;
- for(i=0; i<FFMIN(w,4); i++){
- const int rt= src[i*4+R];
- const int gt= src[i*4+G];
- const int bt= src[i*4+B];
- const int at= src[i*4+A];
- dst[i*4+R]= rt - r;
- dst[i*4+G]= gt - g;
- dst[i*4+B]= bt - b;
- dst[i*4+A]= at - a;
+ r = *red;
+ g = *green;
+ b = *blue;
-
++ a = *alpha;
+ for (i = 0; i < FFMIN(w, 4); i++) {
+ const int rt = src[i * 4 + R];
+ const int gt = src[i * 4 + G];
+ const int bt = src[i * 4 + B];
++ const int at = src[i * 4 + A];
+ dst[i * 4 + R] = rt - r;
+ dst[i * 4 + G] = gt - g;
+ dst[i * 4 + B] = bt - b;
++ dst[i * 4 + A] = at - a;
r = rt;
g = gt;
b = bt;
+ a = at;
}
- s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
- *red= src[(w-1)*4+R];
- *green= src[(w-1)*4+G];
- *blue= src[(w-1)*4+B];
- *alpha= src[(w-1)*4+A];
+
+ s->dsp.diff_bytes(dst + 16, src + 16, src + 12, w * 4 - 16);
+
+ *red = src[(w - 1) * 4 + R];
+ *green = src[(w - 1) * 4 + G];
+ *blue = src[(w - 1) * 4 + B];
++ *alpha = src[(w - 1) * 4 + A];
+}
+
+static inline void sub_left_prediction_rgb24(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue){
+ int i;
+ int r,g,b;
- r= *red;
- g= *green;
- b= *blue;
- for(i=0; i<FFMIN(w,16); i++){
- const int rt= src[i*3+0];
- const int gt= src[i*3+1];
- const int bt= src[i*3+2];
- dst[i*3+0]= rt - r;
- dst[i*3+1]= gt - g;
- dst[i*3+2]= bt - b;
++ r = *red;
++ g = *green;
++ b = *blue;
++ for (i = 0; i < FFMIN(w,16); i++) {
++ const int rt = src[i*3 + 0];
++ const int gt = src[i*3 + 1];
++ const int bt = src[i*3 + 2];
++ dst[i*3 + 0] = rt - r;
++ dst[i*3 + 1] = gt - g;
++ dst[i*3 + 2] = bt - b;
+ r = rt;
+ g = gt;
+ b = bt;
+ }
- s->dsp.diff_bytes(dst+48, src+48, src+48-3, w*3-48);
- *red= src[(w-1)*3+0];
- *green= src[(w-1)*3+1];
- *blue= src[(w-1)*3+2];
++
++ s->dsp.diff_bytes(dst + 48, src + 48, src + 48 - 3, w*3 - 48);
++
++ *red = src[(w - 1)*3 + 0];
++ *green = src[(w - 1)*3 + 1];
++ *blue = src[(w - 1)*3 + 2];
}
- static int read_len_table(uint8_t *dst, GetBitContext *gb){
+ static int read_len_table(uint8_t *dst, GetBitContext *gb)
+ {
int i, val, repeat;
- for(i=0; i<256;){
- repeat= get_bits(gb, 3);
- val = get_bits(gb, 5);
- if(repeat==0)
- repeat= get_bits(gb, 8);
- //printf("%d %d\n", val, repeat);
- if(i+repeat > 256 || get_bits_left(gb) < 0) {
+ for (i = 0; i < 256;) {
+ repeat = get_bits(gb, 3);
+ val = get_bits(gb, 5);
+ if (repeat == 0)
+ repeat = get_bits(gb, 8);
+ if (i + repeat > 256 || get_bits_left(gb) < 0) {
av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
return -1;
}
return 0;
}
- static void generate_joint_tables(HYuvContext *s){
- uint16_t symbols[1<<VLC_BITS];
- uint16_t bits[1<<VLC_BITS];
- uint8_t len[1<<VLC_BITS];
- if(s->bitstream_bpp < 24){
-#if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
-typedef struct {
- uint64_t val;
- int name;
-} HeapElem;
-
-static void heap_sift(HeapElem *h, int root, int size)
-{
- while (root * 2 + 1 < size) {
- int child = root * 2 + 1;
- if (child < size - 1 && h[child].val > h[child + 1].val)
- child++;
- if (h[root].val > h[child].val) {
- FFSWAP(HeapElem, h[root], h[child]);
- root = child;
- } else
- break;
- }
-}
-
-static void generate_len_table(uint8_t *dst, const uint64_t *stats)
-{
- HeapElem h[256];
- int up[2*256];
- int len[2*256];
- int offset, i, next;
- int size = 256;
-
- for (offset = 1; ; offset <<= 1) {
- for (i = 0; i < size; i++) {
- h[i].name = i;
- h[i].val = (stats[i] << 8) + offset;
- }
- for (i = size / 2 - 1; i >= 0; i--)
- heap_sift(h, i, size);
-
- for (next = size; next < size * 2 - 1; next++) {
- // merge the two smallest entries, and put it back in the heap
- uint64_t min1v = h[0].val;
- up[h[0].name] = next;
- h[0].val = INT64_MAX;
- heap_sift(h, 0, size);
- up[h[0].name] = next;
- h[0].name = next;
- h[0].val += min1v;
- heap_sift(h, 0, size);
- }
-
- len[2 * size - 2] = 0;
- for (i = 2 * size - 3; i >= size; i--)
- len[i] = len[up[i]] + 1;
- for (i = 0; i < size; i++) {
- dst[i] = len[up[i]] + 1;
- if (dst[i] >= 32) break;
- }
- if (i==size) break;
- }
-}
-#endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
-
+ static void generate_joint_tables(HYuvContext *s)
+ {
+ uint16_t symbols[1 << VLC_BITS];
+ uint16_t bits[1 << VLC_BITS];
+ uint8_t len[1 << VLC_BITS];
+ if (s->bitstream_bpp < 24) {
int p, i, y, u;
- for(p=0; p<3; p++){
- for(i=y=0; y<256; y++){
+ for (p = 0; p < 3; p++) {
+ for (i = y = 0; y < 256; y++) {
int len0 = s->len[0][y];
int limit = VLC_BITS - len0;
if(limit <= 0)
generate_joint_tables(s);
- return (get_bits_count(&gb)+7)/8;
+ return (get_bits_count(&gb) + 7) / 8;
}
- static int read_old_huffman_tables(HYuvContext *s){
+ static int read_old_huffman_tables(HYuvContext *s)
+ {
-#if 1
GetBitContext gb;
int i;
generate_joint_tables(s);
return 0;
-#else
- av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
- return -1;
-#endif
}
- static av_cold void alloc_temp(HYuvContext *s){
+ static av_cold void alloc_temp(HYuvContext *s)
+ {
int i;
- if(s->bitstream_bpp<24){
- for(i=0; i<3; i++){
+ if (s->bitstream_bpp<24) {
+ for (i=0; i<3; i++) {
s->temp[i]= av_malloc(s->width + 16);
}
- }else{
+ } else {
s->temp[0]= av_mallocz(4*s->width + 16);
}
}
ff_dsputil_init(&s->dsp, avctx);
- s->width= avctx->width;
- s->height= avctx->height;
- av_assert1(s->width>0 && s->height>0);
+ s->width = avctx->width;
+ s->height = avctx->height;
- assert(s->width>0 && s->height>0);
++ av_assert1(s->width > 0 && s->height > 0);
return 0;
}
HYuvContext *s = avctx->priv_data;
common_init(avctx);
- memset(s->vlc, 0, 3*sizeof(VLC));
+ memset(s->vlc, 0, 3 * sizeof(VLC));
- avctx->coded_frame= &s->picture;
+ avctx->coded_frame = &s->picture;
+ avcodec_get_frame_defaults(&s->picture);
- s->interlaced= s->height > 288;
-
- s->bgr32=1;
- //if(avctx->extradata)
- // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
- if(avctx->extradata_size){
- if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
- s->version=1; // do such files exist at all?
+ s->interlaced = s->height > 288;
+
+ s->bgr32 = 1;
+
+ if (avctx->extradata_size) {
+ if ((avctx->bits_per_coded_sample & 7) &&
+ avctx->bits_per_coded_sample != 12)
+ s->version = 1; // do such files exist at all?
else
- s->version=2;
- }else
- s->version=0;
+ s->version = 2;
+ } else
+ s->version = 0;
- if(s->version==2){
+ if (s->version == 2) {
int method, interlace;
if (avctx->extradata_size < 4)
return AVERROR_INVALIDDATA;
}
+ if ((avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P) && avctx->width & 1) {
+ av_log(avctx, AV_LOG_ERROR, "width must be even for this colorspace\n");
+ return AVERROR_INVALIDDATA;
+ }
+
alloc_temp(s);
- // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
-
return 0;
}
#endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
#if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
- static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf){
+ static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf)
+ {
int i;
- int index= 0;
+ int index = 0;
- for(i=0; i<256;){
- int val= len[i];
- int repeat=0;
+ for (i = 0; i < 256;) {
+ int val = len[i];
+ int repeat = 0;
- for(; i<256 && len[i]==val && repeat<255; i++)
+ for (; i < 256 && len[i] == val && repeat < 255; i++)
repeat++;
- assert(val < 32 && val >0 && repeat<256 && repeat>0);
- if ( repeat > 7) {
+ av_assert0(val < 32 && val >0 && repeat<256 && repeat>0);
- if(repeat>7){
- buf[index++]= val;
- buf[index++]= repeat;
- }else{
- buf[index++]= val | (repeat<<5);
++ if (repeat > 7) {
+ buf[index++] = val;
+ buf[index++] = repeat;
+ } else {
+ buf[index++] = val | (repeat << 5);
}
}
common_init(avctx);
- avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
- avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
- s->version=2;
+ avctx->extradata = av_mallocz(1024*30); // 256*3+4 == 772
+ avctx->stats_out = av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
+ s->version = 2;
- avctx->coded_frame= &s->picture;
+ avctx->coded_frame = &s->picture;
- switch(avctx->pix_fmt){
+ switch (avctx->pix_fmt) {
case PIX_FMT_YUV420P:
- s->bitstream_bpp = 12;
- break;
case PIX_FMT_YUV422P:
- s->bitstream_bpp = 16;
+ if (s->width & 1) {
+ av_log(avctx, AV_LOG_ERROR, "width must be even for this colorspace\n");
+ return AVERROR(EINVAL);
+ }
+ s->bitstream_bpp = avctx->pix_fmt == PIX_FMT_YUV420P ? 12 : 16;
break;
case PIX_FMT_RGB32:
- s->bitstream_bpp= 32;
++ s->bitstream_bpp = 32;
+ break;
+ case PIX_FMT_RGB24:
- s->bitstream_bpp= 24;
+ s->bitstream_bpp = 24;
break;
default:
av_log(avctx, AV_LOG_ERROR, "format not supported\n");
}
}
- for(i=0; i<3; i++){
+ for (i = 0; i < 3; i++) {
- generate_len_table(s->len[i], s->stats[i]);
+ ff_generate_len_table(s->len[i], s->stats[i]);
- if(generate_bits_table(s->bits[i], s->len[i])<0){
+ if (generate_bits_table(s->bits[i], s->len[i]) < 0) {
return -1;
}
}
}
- static inline int encode_bgra_bitstream(HYuvContext *s, int count, int planes){
-static int encode_bgr_bitstream(HYuvContext *s, int count)
++static inline int encode_bgra_bitstream(HYuvContext *s, int count, int planes)
+ {
int i;
- if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*planes*count){
- if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 3 * 4 * count) {
++ if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*planes*count) {
av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
return -1;
}
#define LOAD3\
- int g= s->temp[0][planes==3 ? 3*i+1 : 4*i+G];\
- int b= (s->temp[0][planes==3 ? 3*i+2 : 4*i+B] - g) & 0xff;\
- int r= (s->temp[0][planes==3 ? 3*i+0 : 4*i+R] - g) & 0xff;\
- int a= s->temp[0][planes*i+A];
- int g = s->temp[0][4 * i + G];\
- int b = (s->temp[0][4 * i + B] - g) & 0xff;\
- int r = (s->temp[0][4 * i + R] - g) & 0xff;
++ int g = s->temp[0][planes==3 ? 3*i + 1 : 4*i + G];\
++ int b = (s->temp[0][planes==3 ? 3*i + 2 : 4*i + B] - g) & 0xff;\
++ int r = (s->temp[0][planes==3 ? 3*i + 0 : 4*i + R] - g) & 0xff;\
++ int a = s->temp[0][planes*i + A];
#define STAT3\
s->stats[0][b]++;\
s->stats[1][g]++;\
#define WRITE3\
put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
- put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
+ put_bits(&s->pb, s->len[2][r], s->bits[2][r]);\
+ if(planes==4) put_bits(&s->pb, s->len[2][a], s->bits[2][a]);
- if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
- for(i=0; i<count; i++){
+ if ((s->flags & CODEC_FLAG_PASS1) &&
+ (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)) {
+ for (i = 0; i < count; i++) {
LOAD3;
STAT3;
}
const AVFrame *pict, int *got_packet)
{
HYuvContext *s = avctx->priv_data;
- const int width= s->width;
- const int width2= s->width>>1;
- const int height= s->height;
- const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
- const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
- const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
- AVFrame * const p= &s->picture;
+ const int width = s->width;
+ const int width2 = s->width>>1;
+ const int height = s->height;
+ const int fake_ystride = s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
+ const int fake_ustride = s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
+ const int fake_vstride = s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
+ AVFrame * const p = &s->picture;
int i, j, size = 0, ret;
- if (!pkt->data &&
- (ret = av_new_packet(pkt, width * height * 3 * 4 + FF_MIN_BUFFER_SIZE)) < 0) {
- av_log(avctx, AV_LOG_ERROR, "Error allocating output packet.\n");
+ if ((ret = ff_alloc_packet2(avctx, pkt, width * height * 3 * 4 + FF_MIN_BUFFER_SIZE)) < 0)
return ret;
- }
*p = *pict;
- p->pict_type= AV_PICTURE_TYPE_I;
- p->key_frame= 1;
+ p->pict_type = AV_PICTURE_TYPE_I;
+ p->key_frame = 1;
- if(s->context){
- for(i=0; i<3; i++){
+ if (s->context) {
+ for (i = 0; i < 3; i++) {
- generate_len_table(s->len[i], s->stats[i]);
+ ff_generate_len_table(s->len[i], s->stats[i]);
- if(generate_bits_table(s->bits[i], s->len[i])<0)
+ if (generate_bits_table(s->bits[i], s->len[i]) < 0)
return -1;
size += store_table(s, s->len[i], &pkt->data[size]);
}
const int stride = -p->linesize[0];
const int fake_stride = -fake_ystride;
int y;
- int leftr, leftg, leftb;
+ int leftr, leftg, leftb, lefta;
- put_bits(&s->pb, 8, lefta= data[A]);
- put_bits(&s->pb, 8, leftr= data[R]);
- put_bits(&s->pb, 8, leftg= data[G]);
- put_bits(&s->pb, 8, leftb= data[B]);
++ put_bits(&s->pb, 8, lefta = data[A]);
+ put_bits(&s->pb, 8, leftr = data[R]);
+ put_bits(&s->pb, 8, leftg = data[G]);
+ put_bits(&s->pb, 8, leftb = data[B]);
- put_bits(&s->pb, 8, 0);
- sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb, &lefta);
- encode_bgra_bitstream(s, width-1, 4);
- sub_left_prediction_bgr32(s, s->temp[0], data + 4, width - 1, &leftr, &leftg, &leftb);
- encode_bgr_bitstream(s, width - 1);
++ sub_left_prediction_bgr32(s, s->temp[0], data + 4, width - 1, &leftr, &leftg, &leftb, &lefta);
++ encode_bgra_bitstream(s, width - 1, 4);
- for(y=1; y<s->height; y++){
+ for (y = 1; y < s->height; y++) {
uint8_t *dst = data + y*stride;
- if(s->predictor == PLANE && s->interlaced < y){
- s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
+ if (s->predictor == PLANE && s->interlaced < y) {
+ s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width * 4);
- sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
+ sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb, &lefta);
- }else{
+ } else {
- sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
+ sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb, &lefta);
+ }
+ encode_bgra_bitstream(s, width, 4);
+ }
+ }else if(avctx->pix_fmt == PIX_FMT_RGB24){
+ uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
+ const int stride = -p->linesize[0];
+ const int fake_stride = -fake_ystride;
+ int y;
+ int leftr, leftg, leftb;
+
+ put_bits(&s->pb, 8, leftr= data[0]);
+ put_bits(&s->pb, 8, leftg= data[1]);
+ put_bits(&s->pb, 8, leftb= data[2]);
+ put_bits(&s->pb, 8, 0);
+
+ sub_left_prediction_rgb24(s, s->temp[0], data+3, width-1, &leftr, &leftg, &leftb);
+ encode_bgra_bitstream(s, width-1, 3);
+
+ for(y=1; y<s->height; y++){
+ uint8_t *dst = data + y*stride;
+ if(s->predictor == PLANE && s->interlaced < y){
+ s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*3);
+ sub_left_prediction_rgb24(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
+ }else{
+ sub_left_prediction_rgb24(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
}
- encode_bgr_bitstream(s, width);
+ encode_bgra_bitstream(s, width, 3);
}
- }else{
+ } else {
av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
}
emms_c();