4b5fc2961051ba35155d12f76d2372259997f573
[ffmpeg.git] / libavcodec / huffyuv.c
1 /*
2  * huffyuv codec for libavcodec
3  *
4  * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  *
22  * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
23  * the algorithm used
24  */
25
26 /**
27  * @file huffyuv.c
28  * huffyuv codec for libavcodec.
29  */
30
31 #include "common.h"
32 #include "bitstream.h"
33 #include "avcodec.h"
34 #include "dsputil.h"
35
36 #define VLC_BITS 11
37
38 #ifdef WORDS_BIGENDIAN
39 #define B 3
40 #define G 2
41 #define R 1
42 #else
43 #define B 0
44 #define G 1
45 #define R 2
46 #endif
47
48 typedef enum Predictor{
49     LEFT= 0,
50     PLANE,
51     MEDIAN,
52 } Predictor;
53
54 typedef struct HYuvContext{
55     AVCodecContext *avctx;
56     Predictor predictor;
57     GetBitContext gb;
58     PutBitContext pb;
59     int interlaced;
60     int decorrelate;
61     int bitstream_bpp;
62     int version;
63     int yuy2;                               //use yuy2 instead of 422P
64     int bgr32;                              //use bgr32 instead of bgr24
65     int width, height;
66     int flags;
67     int context;
68     int picture_number;
69     int last_slice_end;
70     uint8_t *temp[3];
71     uint64_t stats[3][256];
72     uint8_t len[3][256];
73     uint32_t bits[3][256];
74     VLC vlc[3];
75     AVFrame picture;
76     uint8_t *bitstream_buffer;
77     unsigned int bitstream_buffer_size;
78     DSPContext dsp;
79 }HYuvContext;
80
81 static const unsigned char classic_shift_luma[] = {
82   34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
83   16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
84   69,68, 0
85 };
86
87 static const unsigned char classic_shift_chroma[] = {
88   66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
89   56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
90   214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
91 };
92
93 static const unsigned char classic_add_luma[256] = {
94     3,  9,  5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
95    73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
96    68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
97    35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
98    37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
99    35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
100    27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
101    15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
102    12, 17, 19, 13,  4,  9,  2, 11,  1,  7,  8,  0, 16,  3, 14,  6,
103    12, 10,  5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
104    18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
105    28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
106    28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
107    62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
108    54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
109    46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13,  7,  8,
110 };
111
112 static const unsigned char classic_add_chroma[256] = {
113     3,  1,  2,  2,  2,  2,  3,  3,  7,  5,  7,  5,  8,  6, 11,  9,
114     7, 13, 11, 10,  9,  8,  7,  5,  9,  7,  6,  4,  7,  5,  8,  7,
115    11,  8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
116    43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
117   143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
118    80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
119    17, 14,  5,  6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
120   112,113,114,115,  4,117,118, 92, 94,121,122,  3,124,103,  2,  1,
121     0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
122   135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
123    52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
124    19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10,  9,  8, 36,
125     7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
126    83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
127    14, 16, 17, 18, 20, 21, 12, 14, 15,  9, 10,  6,  9,  6,  5,  8,
128     6, 12,  8, 10,  7,  9,  6,  4,  6,  2,  2,  3,  3,  3,  3,  2,
129 };
130
131 static inline int add_left_prediction(uint8_t *dst, uint8_t *src, int w, int acc){
132     int i;
133
134     for(i=0; i<w-1; i++){
135         acc+= src[i];
136         dst[i]= acc;
137         i++;
138         acc+= src[i];
139         dst[i]= acc;
140     }
141
142     for(; i<w; i++){
143         acc+= src[i];
144         dst[i]= acc;
145     }
146
147     return acc;
148 }
149
150 static inline void add_median_prediction(uint8_t *dst, uint8_t *src1, uint8_t *diff, int w, int *left, int *left_top){
151     int i;
152     uint8_t l, lt;
153
154     l= *left;
155     lt= *left_top;
156
157     for(i=0; i<w; i++){
158         l= mid_pred(l, src1[i], (l + src1[i] - lt)&0xFF) + diff[i];
159         lt= src1[i];
160         dst[i]= l;
161     }
162
163     *left= l;
164     *left_top= lt;
165 }
166
167 static inline void add_left_prediction_bgr32(uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
168     int i;
169     int r,g,b;
170     r= *red;
171     g= *green;
172     b= *blue;
173
174     for(i=0; i<w; i++){
175         b+= src[4*i+B];
176         g+= src[4*i+G];
177         r+= src[4*i+R];
178
179         dst[4*i+B]= b;
180         dst[4*i+G]= g;
181         dst[4*i+R]= r;
182     }
183
184     *red= r;
185     *green= g;
186     *blue= b;
187 }
188
189 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
190     int i;
191     if(w<32){
192         for(i=0; i<w; i++){
193             const int temp= src[i];
194             dst[i]= temp - left;
195             left= temp;
196         }
197         return left;
198     }else{
199         for(i=0; i<16; i++){
200             const int temp= src[i];
201             dst[i]= temp - left;
202             left= temp;
203         }
204         s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
205         return src[w-1];
206     }
207 }
208
209 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
210     int i;
211     int r,g,b;
212     r= *red;
213     g= *green;
214     b= *blue;
215     for(i=0; i<FFMIN(w,4); i++){
216         const int rt= src[i*4+R];
217         const int gt= src[i*4+G];
218         const int bt= src[i*4+B];
219         dst[i*4+R]= rt - r;
220         dst[i*4+G]= gt - g;
221         dst[i*4+B]= bt - b;
222         r = rt;
223         g = gt;
224         b = bt;
225     }
226     s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
227     *red=   src[(w-1)*4+R];
228     *green= src[(w-1)*4+G];
229     *blue=  src[(w-1)*4+B];
230 }
231
232 static void read_len_table(uint8_t *dst, GetBitContext *gb){
233     int i, val, repeat;
234
235     for(i=0; i<256;){
236         repeat= get_bits(gb, 3);
237         val   = get_bits(gb, 5);
238         if(repeat==0)
239             repeat= get_bits(gb, 8);
240 //printf("%d %d\n", val, repeat);
241         while (repeat--)
242             dst[i++] = val;
243     }
244 }
245
246 static int generate_bits_table(uint32_t *dst, uint8_t *len_table){
247     int len, index;
248     uint32_t bits=0;
249
250     for(len=32; len>0; len--){
251         for(index=0; index<256; index++){
252             if(len_table[index]==len)
253                 dst[index]= bits++;
254         }
255         if(bits & 1){
256             av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
257             return -1;
258         }
259         bits >>= 1;
260     }
261     return 0;
262 }
263
264 #ifdef CONFIG_ENCODERS
265 static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){
266     uint64_t counts[2*size];
267     int up[2*size];
268     int offset, i, next;
269
270     for(offset=1; ; offset<<=1){
271         for(i=0; i<size; i++){
272             counts[i]= stats[i] + offset - 1;
273         }
274
275         for(next=size; next<size*2; next++){
276             uint64_t min1, min2;
277             int min1_i, min2_i;
278
279             min1=min2= INT64_MAX;
280             min1_i= min2_i=-1;
281
282             for(i=0; i<next; i++){
283                 if(min2 > counts[i]){
284                     if(min1 > counts[i]){
285                         min2= min1;
286                         min2_i= min1_i;
287                         min1= counts[i];
288                         min1_i= i;
289                     }else{
290                         min2= counts[i];
291                         min2_i= i;
292                     }
293                 }
294             }
295
296             if(min2==INT64_MAX) break;
297
298             counts[next]= min1 + min2;
299             counts[min1_i]=
300             counts[min2_i]= INT64_MAX;
301             up[min1_i]=
302             up[min2_i]= next;
303             up[next]= -1;
304         }
305
306         for(i=0; i<size; i++){
307             int len;
308             int index=i;
309
310             for(len=0; up[index] != -1; len++)
311                 index= up[index];
312
313             if(len >= 32) break;
314
315             dst[i]= len;
316         }
317         if(i==size) break;
318     }
319 }
320 #endif /* CONFIG_ENCODERS */
321
322 static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){
323     GetBitContext gb;
324     int i;
325
326     init_get_bits(&gb, src, length*8);
327
328     for(i=0; i<3; i++){
329         read_len_table(s->len[i], &gb);
330
331         if(generate_bits_table(s->bits[i], s->len[i])<0){
332             return -1;
333         }
334 #if 0
335 for(j=0; j<256; j++){
336 printf("%6X, %2d,  %3d\n", s->bits[i][j], s->len[i][j], j);
337 }
338 #endif
339         free_vlc(&s->vlc[i]);
340         init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
341     }
342
343     return (get_bits_count(&gb)+7)/8;
344 }
345
346 static int read_old_huffman_tables(HYuvContext *s){
347 #if 1
348     GetBitContext gb;
349     int i;
350
351     init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
352     read_len_table(s->len[0], &gb);
353     init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
354     read_len_table(s->len[1], &gb);
355
356     for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma  [i];
357     for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
358
359     if(s->bitstream_bpp >= 24){
360         memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
361         memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
362     }
363     memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
364     memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
365
366     for(i=0; i<3; i++){
367         free_vlc(&s->vlc[i]);
368         init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
369     }
370
371     return 0;
372 #else
373     av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
374     return -1;
375 #endif
376 }
377
378 static void alloc_temp(HYuvContext *s){
379     int i;
380
381     if(s->bitstream_bpp<24){
382         for(i=0; i<3; i++){
383             s->temp[i]= av_malloc(s->width + 16);
384         }
385     }else{
386         for(i=0; i<2; i++){
387             s->temp[i]= av_malloc(4*s->width + 16);
388         }
389     }
390 }
391
392 static int common_init(AVCodecContext *avctx){
393     HYuvContext *s = avctx->priv_data;
394
395     s->avctx= avctx;
396     s->flags= avctx->flags;
397
398     dsputil_init(&s->dsp, avctx);
399
400     s->width= avctx->width;
401     s->height= avctx->height;
402     assert(s->width>0 && s->height>0);
403
404     return 0;
405 }
406
407 #ifdef CONFIG_DECODERS
408 static int decode_init(AVCodecContext *avctx)
409 {
410     HYuvContext *s = avctx->priv_data;
411
412     common_init(avctx);
413     memset(s->vlc, 0, 3*sizeof(VLC));
414
415     avctx->coded_frame= &s->picture;
416     s->interlaced= s->height > 288;
417
418 s->bgr32=1;
419 //if(avctx->extradata)
420 //  printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
421     if(avctx->extradata_size){
422         if((avctx->bits_per_sample&7) && avctx->bits_per_sample != 12)
423             s->version=1; // do such files exist at all?
424         else
425             s->version=2;
426     }else
427         s->version=0;
428
429     if(s->version==2){
430         int method, interlace;
431
432         method= ((uint8_t*)avctx->extradata)[0];
433         s->decorrelate= method&64 ? 1 : 0;
434         s->predictor= method&63;
435         s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
436         if(s->bitstream_bpp==0)
437             s->bitstream_bpp= avctx->bits_per_sample&~7;
438         interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
439         s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
440         s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
441
442         if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
443             return -1;
444     }else{
445         switch(avctx->bits_per_sample&7){
446         case 1:
447             s->predictor= LEFT;
448             s->decorrelate= 0;
449             break;
450         case 2:
451             s->predictor= LEFT;
452             s->decorrelate= 1;
453             break;
454         case 3:
455             s->predictor= PLANE;
456             s->decorrelate= avctx->bits_per_sample >= 24;
457             break;
458         case 4:
459             s->predictor= MEDIAN;
460             s->decorrelate= 0;
461             break;
462         default:
463             s->predictor= LEFT; //OLD
464             s->decorrelate= 0;
465             break;
466         }
467         s->bitstream_bpp= avctx->bits_per_sample & ~7;
468         s->context= 0;
469
470         if(read_old_huffman_tables(s) < 0)
471             return -1;
472     }
473
474     switch(s->bitstream_bpp){
475     case 12:
476         avctx->pix_fmt = PIX_FMT_YUV420P;
477         break;
478     case 16:
479         if(s->yuy2){
480             avctx->pix_fmt = PIX_FMT_YUYV422;
481         }else{
482             avctx->pix_fmt = PIX_FMT_YUV422P;
483         }
484         break;
485     case 24:
486     case 32:
487         if(s->bgr32){
488             avctx->pix_fmt = PIX_FMT_RGB32;
489         }else{
490             avctx->pix_fmt = PIX_FMT_BGR24;
491         }
492         break;
493     default:
494         assert(0);
495     }
496
497     alloc_temp(s);
498
499 //    av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_sample, s->interlaced);
500
501     return 0;
502 }
503 #endif
504
505 #ifdef CONFIG_ENCODERS
506 static int store_table(HYuvContext *s, uint8_t *len, uint8_t *buf){
507     int i;
508     int index= 0;
509
510     for(i=0; i<256;){
511         int val= len[i];
512         int repeat=0;
513
514         for(; i<256 && len[i]==val && repeat<255; i++)
515             repeat++;
516
517         assert(val < 32 && val >0 && repeat<256 && repeat>0);
518         if(repeat>7){
519             buf[index++]= val;
520             buf[index++]= repeat;
521         }else{
522             buf[index++]= val | (repeat<<5);
523         }
524     }
525
526     return index;
527 }
528
529 static int encode_init(AVCodecContext *avctx)
530 {
531     HYuvContext *s = avctx->priv_data;
532     int i, j;
533
534     common_init(avctx);
535
536     avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
537     avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
538     s->version=2;
539
540     avctx->coded_frame= &s->picture;
541
542     switch(avctx->pix_fmt){
543     case PIX_FMT_YUV420P:
544         s->bitstream_bpp= 12;
545         break;
546     case PIX_FMT_YUV422P:
547         s->bitstream_bpp= 16;
548         break;
549     case PIX_FMT_RGB32:
550         s->bitstream_bpp= 24;
551         break;
552     default:
553         av_log(avctx, AV_LOG_ERROR, "format not supported\n");
554         return -1;
555     }
556     avctx->bits_per_sample= s->bitstream_bpp;
557     s->decorrelate= s->bitstream_bpp >= 24;
558     s->predictor= avctx->prediction_method;
559     s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
560     if(avctx->context_model==1){
561         s->context= avctx->context_model;
562         if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
563             av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
564             return -1;
565         }
566     }else s->context= 0;
567
568     if(avctx->codec->id==CODEC_ID_HUFFYUV){
569         if(avctx->pix_fmt==PIX_FMT_YUV420P){
570             av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
571             return -1;
572         }
573         if(avctx->context_model){
574             av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
575             return -1;
576         }
577         if(s->interlaced != ( s->height > 288 ))
578             av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
579     }
580
581     if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
582         av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
583         return -1;
584     }
585
586     ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
587     ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
588     ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
589     if(s->context)
590         ((uint8_t*)avctx->extradata)[2]|= 0x40;
591     ((uint8_t*)avctx->extradata)[3]= 0;
592     s->avctx->extradata_size= 4;
593
594     if(avctx->stats_in){
595         char *p= avctx->stats_in;
596
597         for(i=0; i<3; i++)
598             for(j=0; j<256; j++)
599                 s->stats[i][j]= 1;
600
601         for(;;){
602             for(i=0; i<3; i++){
603                 char *next;
604
605                 for(j=0; j<256; j++){
606                     s->stats[i][j]+= strtol(p, &next, 0);
607                     if(next==p) return -1;
608                     p=next;
609                 }
610             }
611             if(p[0]==0 || p[1]==0 || p[2]==0) break;
612         }
613     }else{
614         for(i=0; i<3; i++)
615             for(j=0; j<256; j++){
616                 int d= FFMIN(j, 256-j);
617
618                 s->stats[i][j]= 100000000/(d+1);
619             }
620     }
621
622     for(i=0; i<3; i++){
623         generate_len_table(s->len[i], s->stats[i], 256);
624
625         if(generate_bits_table(s->bits[i], s->len[i])<0){
626             return -1;
627         }
628
629         s->avctx->extradata_size+=
630         store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
631     }
632
633     if(s->context){
634         for(i=0; i<3; i++){
635             int pels = s->width*s->height / (i?40:10);
636             for(j=0; j<256; j++){
637                 int d= FFMIN(j, 256-j);
638                 s->stats[i][j]= pels/(d+1);
639             }
640         }
641     }else{
642         for(i=0; i<3; i++)
643             for(j=0; j<256; j++)
644                 s->stats[i][j]= 0;
645     }
646
647 //    printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_sample, s->interlaced);
648
649     alloc_temp(s);
650
651     s->picture_number=0;
652
653     return 0;
654 }
655 #endif /* CONFIG_ENCODERS */
656
657 static void decode_422_bitstream(HYuvContext *s, int count){
658     int i;
659
660     count/=2;
661
662     for(i=0; i<count; i++){
663         s->temp[0][2*i  ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
664         s->temp[1][  i  ]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
665         s->temp[0][2*i+1]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
666         s->temp[2][  i  ]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
667     }
668 }
669
670 static void decode_gray_bitstream(HYuvContext *s, int count){
671     int i;
672
673     count/=2;
674
675     for(i=0; i<count; i++){
676         s->temp[0][2*i  ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
677         s->temp[0][2*i+1]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
678     }
679 }
680
681 #ifdef CONFIG_ENCODERS
682 static int encode_422_bitstream(HYuvContext *s, int count){
683     int i;
684
685     if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
686         av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
687         return -1;
688     }
689
690     count/=2;
691     if(s->flags&CODEC_FLAG_PASS1){
692         for(i=0; i<count; i++){
693             s->stats[0][ s->temp[0][2*i  ] ]++;
694             s->stats[1][ s->temp[1][  i  ] ]++;
695             s->stats[0][ s->temp[0][2*i+1] ]++;
696             s->stats[2][ s->temp[2][  i  ] ]++;
697         }
698     }
699     if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
700         return 0;
701     if(s->context){
702         for(i=0; i<count; i++){
703             s->stats[0][ s->temp[0][2*i  ] ]++;
704             put_bits(&s->pb, s->len[0][ s->temp[0][2*i  ] ], s->bits[0][ s->temp[0][2*i  ] ]);
705             s->stats[1][ s->temp[1][  i  ] ]++;
706             put_bits(&s->pb, s->len[1][ s->temp[1][  i  ] ], s->bits[1][ s->temp[1][  i  ] ]);
707             s->stats[0][ s->temp[0][2*i+1] ]++;
708             put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
709             s->stats[2][ s->temp[2][  i  ] ]++;
710             put_bits(&s->pb, s->len[2][ s->temp[2][  i  ] ], s->bits[2][ s->temp[2][  i  ] ]);
711         }
712     }else{
713         for(i=0; i<count; i++){
714             put_bits(&s->pb, s->len[0][ s->temp[0][2*i  ] ], s->bits[0][ s->temp[0][2*i  ] ]);
715             put_bits(&s->pb, s->len[1][ s->temp[1][  i  ] ], s->bits[1][ s->temp[1][  i  ] ]);
716             put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
717             put_bits(&s->pb, s->len[2][ s->temp[2][  i  ] ], s->bits[2][ s->temp[2][  i  ] ]);
718         }
719     }
720     return 0;
721 }
722
723 static int encode_gray_bitstream(HYuvContext *s, int count){
724     int i;
725
726     if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
727         av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
728         return -1;
729     }
730
731     count/=2;
732     if(s->flags&CODEC_FLAG_PASS1){
733         for(i=0; i<count; i++){
734             s->stats[0][ s->temp[0][2*i  ] ]++;
735             s->stats[0][ s->temp[0][2*i+1] ]++;
736         }
737     }
738     if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
739         return 0;
740
741     if(s->context){
742         for(i=0; i<count; i++){
743             s->stats[0][ s->temp[0][2*i  ] ]++;
744             put_bits(&s->pb, s->len[0][ s->temp[0][2*i  ] ], s->bits[0][ s->temp[0][2*i  ] ]);
745             s->stats[0][ s->temp[0][2*i+1] ]++;
746             put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
747         }
748     }else{
749         for(i=0; i<count; i++){
750             put_bits(&s->pb, s->len[0][ s->temp[0][2*i  ] ], s->bits[0][ s->temp[0][2*i  ] ]);
751             put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
752         }
753     }
754     return 0;
755 }
756 #endif /* CONFIG_ENCODERS */
757
758 static void decode_bgr_bitstream(HYuvContext *s, int count){
759     int i;
760
761     if(s->decorrelate){
762         if(s->bitstream_bpp==24){
763             for(i=0; i<count; i++){
764                 s->temp[0][4*i+G]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
765                 s->temp[0][4*i+B]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
766                 s->temp[0][4*i+R]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
767             }
768         }else{
769             for(i=0; i<count; i++){
770                 s->temp[0][4*i+G]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
771                 s->temp[0][4*i+B]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
772                 s->temp[0][4*i+R]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
773                                    get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
774             }
775         }
776     }else{
777         if(s->bitstream_bpp==24){
778             for(i=0; i<count; i++){
779                 s->temp[0][4*i+B]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
780                 s->temp[0][4*i+G]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
781                 s->temp[0][4*i+R]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
782             }
783         }else{
784             for(i=0; i<count; i++){
785                 s->temp[0][4*i+B]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
786                 s->temp[0][4*i+G]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
787                 s->temp[0][4*i+R]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
788                                    get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
789             }
790         }
791     }
792 }
793
794 #ifdef CONFIG_DECODERS
795 static int encode_bgr_bitstream(HYuvContext *s, int count){
796     int i;
797
798     if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){
799         av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
800         return -1;
801     }
802
803     if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
804         for(i=0; i<count; i++){
805             int g= s->temp[0][4*i+G];
806             int b= (s->temp[0][4*i+B] - g) & 0xff;
807             int r= (s->temp[0][4*i+R] - g) & 0xff;
808             s->stats[0][b]++;
809             s->stats[1][g]++;
810             s->stats[2][r]++;
811         }
812     }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
813         for(i=0; i<count; i++){
814             int g= s->temp[0][4*i+G];
815             int b= (s->temp[0][4*i+B] - g) & 0xff;
816             int r= (s->temp[0][4*i+R] - g) & 0xff;
817             s->stats[0][b]++;
818             s->stats[1][g]++;
819             s->stats[2][r]++;
820             put_bits(&s->pb, s->len[1][g], s->bits[1][g]);
821             put_bits(&s->pb, s->len[0][b], s->bits[0][b]);
822             put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
823         }
824     }else{
825         for(i=0; i<count; i++){
826             int g= s->temp[0][4*i+G];
827             int b= (s->temp[0][4*i+B] - g) & 0xff;
828             int r= (s->temp[0][4*i+R] - g) & 0xff;
829             put_bits(&s->pb, s->len[1][g], s->bits[1][g]);
830             put_bits(&s->pb, s->len[0][b], s->bits[0][b]);
831             put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
832         }
833     }
834     return 0;
835 }
836
837 static void draw_slice(HYuvContext *s, int y){
838     int h, cy;
839     int offset[4];
840
841     if(s->avctx->draw_horiz_band==NULL)
842         return;
843
844     h= y - s->last_slice_end;
845     y -= h;
846
847     if(s->bitstream_bpp==12){
848         cy= y>>1;
849     }else{
850         cy= y;
851     }
852
853     offset[0] = s->picture.linesize[0]*y;
854     offset[1] = s->picture.linesize[1]*cy;
855     offset[2] = s->picture.linesize[2]*cy;
856     offset[3] = 0;
857     emms_c();
858
859     s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
860
861     s->last_slice_end= y + h;
862 }
863
864 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size){
865     HYuvContext *s = avctx->priv_data;
866     const int width= s->width;
867     const int width2= s->width>>1;
868     const int height= s->height;
869     int fake_ystride, fake_ustride, fake_vstride;
870     AVFrame * const p= &s->picture;
871     int table_size= 0;
872
873     AVFrame *picture = data;
874
875     s->bitstream_buffer= av_fast_realloc(s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
876
877     s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (uint32_t*)buf, buf_size/4);
878
879     if(p->data[0])
880         avctx->release_buffer(avctx, p);
881
882     p->reference= 0;
883     if(avctx->get_buffer(avctx, p) < 0){
884         av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
885         return -1;
886     }
887
888     if(s->context){
889         table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
890         if(table_size < 0)
891             return -1;
892     }
893
894     if((unsigned)(buf_size-table_size) >= INT_MAX/8)
895         return -1;
896
897     init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
898
899     fake_ystride= s->interlaced ? p->linesize[0]*2  : p->linesize[0];
900     fake_ustride= s->interlaced ? p->linesize[1]*2  : p->linesize[1];
901     fake_vstride= s->interlaced ? p->linesize[2]*2  : p->linesize[2];
902
903     s->last_slice_end= 0;
904
905     if(s->bitstream_bpp<24){
906         int y, cy;
907         int lefty, leftu, leftv;
908         int lefttopy, lefttopu, lefttopv;
909
910         if(s->yuy2){
911             p->data[0][3]= get_bits(&s->gb, 8);
912             p->data[0][2]= get_bits(&s->gb, 8);
913             p->data[0][1]= get_bits(&s->gb, 8);
914             p->data[0][0]= get_bits(&s->gb, 8);
915
916             av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
917             return -1;
918         }else{
919
920             leftv= p->data[2][0]= get_bits(&s->gb, 8);
921             lefty= p->data[0][1]= get_bits(&s->gb, 8);
922             leftu= p->data[1][0]= get_bits(&s->gb, 8);
923                    p->data[0][0]= get_bits(&s->gb, 8);
924
925             switch(s->predictor){
926             case LEFT:
927             case PLANE:
928                 decode_422_bitstream(s, width-2);
929                 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
930                 if(!(s->flags&CODEC_FLAG_GRAY)){
931                     leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
932                     leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
933                 }
934
935                 for(cy=y=1; y<s->height; y++,cy++){
936                     uint8_t *ydst, *udst, *vdst;
937
938                     if(s->bitstream_bpp==12){
939                         decode_gray_bitstream(s, width);
940
941                         ydst= p->data[0] + p->linesize[0]*y;
942
943                         lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
944                         if(s->predictor == PLANE){
945                             if(y>s->interlaced)
946                                 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
947                         }
948                         y++;
949                         if(y>=s->height) break;
950                     }
951
952                     draw_slice(s, y);
953
954                     ydst= p->data[0] + p->linesize[0]*y;
955                     udst= p->data[1] + p->linesize[1]*cy;
956                     vdst= p->data[2] + p->linesize[2]*cy;
957
958                     decode_422_bitstream(s, width);
959                     lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
960                     if(!(s->flags&CODEC_FLAG_GRAY)){
961                         leftu= add_left_prediction(udst, s->temp[1], width2, leftu);
962                         leftv= add_left_prediction(vdst, s->temp[2], width2, leftv);
963                     }
964                     if(s->predictor == PLANE){
965                         if(cy>s->interlaced){
966                             s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
967                             if(!(s->flags&CODEC_FLAG_GRAY)){
968                                 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
969                                 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
970                             }
971                         }
972                     }
973                 }
974                 draw_slice(s, height);
975
976                 break;
977             case MEDIAN:
978                 /* first line except first 2 pixels is left predicted */
979                 decode_422_bitstream(s, width-2);
980                 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
981                 if(!(s->flags&CODEC_FLAG_GRAY)){
982                     leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
983                     leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
984                 }
985
986                 cy=y=1;
987
988                 /* second line is left predicted for interlaced case */
989                 if(s->interlaced){
990                     decode_422_bitstream(s, width);
991                     lefty= add_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
992                     if(!(s->flags&CODEC_FLAG_GRAY)){
993                         leftu= add_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
994                         leftv= add_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
995                     }
996                     y++; cy++;
997                 }
998
999                 /* next 4 pixels are left predicted too */
1000                 decode_422_bitstream(s, 4);
1001                 lefty= add_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1002                 if(!(s->flags&CODEC_FLAG_GRAY)){
1003                     leftu= add_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1004                     leftv= add_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1005                 }
1006
1007                 /* next line except the first 4 pixels is median predicted */
1008                 lefttopy= p->data[0][3];
1009                 decode_422_bitstream(s, width-4);
1010                 add_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1011                 if(!(s->flags&CODEC_FLAG_GRAY)){
1012                     lefttopu= p->data[1][1];
1013                     lefttopv= p->data[2][1];
1014                     add_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
1015                     add_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
1016                 }
1017                 y++; cy++;
1018
1019                 for(; y<height; y++,cy++){
1020                     uint8_t *ydst, *udst, *vdst;
1021
1022                     if(s->bitstream_bpp==12){
1023                         while(2*cy > y){
1024                             decode_gray_bitstream(s, width);
1025                             ydst= p->data[0] + p->linesize[0]*y;
1026                             add_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1027                             y++;
1028                         }
1029                         if(y>=height) break;
1030                     }
1031                     draw_slice(s, y);
1032
1033                     decode_422_bitstream(s, width);
1034
1035                     ydst= p->data[0] + p->linesize[0]*y;
1036                     udst= p->data[1] + p->linesize[1]*cy;
1037                     vdst= p->data[2] + p->linesize[2]*cy;
1038
1039                     add_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1040                     if(!(s->flags&CODEC_FLAG_GRAY)){
1041                         add_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1042                         add_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1043                     }
1044                 }
1045
1046                 draw_slice(s, height);
1047                 break;
1048             }
1049         }
1050     }else{
1051         int y;
1052         int leftr, leftg, leftb;
1053         const int last_line= (height-1)*p->linesize[0];
1054
1055         if(s->bitstream_bpp==32){
1056             skip_bits(&s->gb, 8);
1057             leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1058             leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1059             leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1060         }else{
1061             leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1062             leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1063             leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1064             skip_bits(&s->gb, 8);
1065         }
1066
1067         if(s->bgr32){
1068             switch(s->predictor){
1069             case LEFT:
1070             case PLANE:
1071                 decode_bgr_bitstream(s, width-1);
1072                 add_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb);
1073
1074                 for(y=s->height-2; y>=0; y--){ //yes its stored upside down
1075                     decode_bgr_bitstream(s, width);
1076
1077                     add_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb);
1078                     if(s->predictor == PLANE){
1079                         if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1080                             s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1081                                              p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1082                         }
1083                     }
1084                 }
1085                 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1086                 break;
1087             default:
1088                 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1089             }
1090         }else{
1091
1092             av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1093             return -1;
1094         }
1095     }
1096     emms_c();
1097
1098     *picture= *p;
1099     *data_size = sizeof(AVFrame);
1100
1101     return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1102 }
1103 #endif
1104
1105 static int common_end(HYuvContext *s){
1106     int i;
1107
1108     for(i=0; i<3; i++){
1109         av_freep(&s->temp[i]);
1110     }
1111     return 0;
1112 }
1113
1114 #ifdef CONFIG_DECODERS
1115 static int decode_end(AVCodecContext *avctx)
1116 {
1117     HYuvContext *s = avctx->priv_data;
1118     int i;
1119
1120     common_end(s);
1121     av_freep(&s->bitstream_buffer);
1122
1123     for(i=0; i<3; i++){
1124         free_vlc(&s->vlc[i]);
1125     }
1126
1127     return 0;
1128 }
1129 #endif
1130
1131 #ifdef CONFIG_ENCODERS
1132 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1133     HYuvContext *s = avctx->priv_data;
1134     AVFrame *pict = data;
1135     const int width= s->width;
1136     const int width2= s->width>>1;
1137     const int height= s->height;
1138     const int fake_ystride= s->interlaced ? pict->linesize[0]*2  : pict->linesize[0];
1139     const int fake_ustride= s->interlaced ? pict->linesize[1]*2  : pict->linesize[1];
1140     const int fake_vstride= s->interlaced ? pict->linesize[2]*2  : pict->linesize[2];
1141     AVFrame * const p= &s->picture;
1142     int i, j, size=0;
1143
1144     *p = *pict;
1145     p->pict_type= FF_I_TYPE;
1146     p->key_frame= 1;
1147
1148     if(s->context){
1149         for(i=0; i<3; i++){
1150             generate_len_table(s->len[i], s->stats[i], 256);
1151             if(generate_bits_table(s->bits[i], s->len[i])<0)
1152                 return -1;
1153             size+= store_table(s, s->len[i], &buf[size]);
1154         }
1155
1156         for(i=0; i<3; i++)
1157             for(j=0; j<256; j++)
1158                 s->stats[i][j] >>= 1;
1159     }
1160
1161     init_put_bits(&s->pb, buf+size, buf_size-size);
1162
1163     if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1164         int lefty, leftu, leftv, y, cy;
1165
1166         put_bits(&s->pb, 8, leftv= p->data[2][0]);
1167         put_bits(&s->pb, 8, lefty= p->data[0][1]);
1168         put_bits(&s->pb, 8, leftu= p->data[1][0]);
1169         put_bits(&s->pb, 8,        p->data[0][0]);
1170
1171         lefty= sub_left_prediction(s, s->temp[0], p->data[0]+2, width-2 , lefty);
1172         leftu= sub_left_prediction(s, s->temp[1], p->data[1]+1, width2-1, leftu);
1173         leftv= sub_left_prediction(s, s->temp[2], p->data[2]+1, width2-1, leftv);
1174
1175         encode_422_bitstream(s, width-2);
1176
1177         if(s->predictor==MEDIAN){
1178             int lefttopy, lefttopu, lefttopv;
1179             cy=y=1;
1180             if(s->interlaced){
1181                 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1182                 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1183                 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1184
1185                 encode_422_bitstream(s, width);
1186                 y++; cy++;
1187             }
1188
1189             lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1190             leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1191             leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1192
1193             encode_422_bitstream(s, 4);
1194
1195             lefttopy= p->data[0][3];
1196             lefttopu= p->data[1][1];
1197             lefttopv= p->data[2][1];
1198             s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1199             s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1200             s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1201             encode_422_bitstream(s, width-4);
1202             y++; cy++;
1203
1204             for(; y<height; y++,cy++){
1205                 uint8_t *ydst, *udst, *vdst;
1206
1207                 if(s->bitstream_bpp==12){
1208                     while(2*cy > y){
1209                         ydst= p->data[0] + p->linesize[0]*y;
1210                         s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1211                         encode_gray_bitstream(s, width);
1212                         y++;
1213                     }
1214                     if(y>=height) break;
1215                 }
1216                 ydst= p->data[0] + p->linesize[0]*y;
1217                 udst= p->data[1] + p->linesize[1]*cy;
1218                 vdst= p->data[2] + p->linesize[2]*cy;
1219
1220                 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1221                 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1222                 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1223
1224                 encode_422_bitstream(s, width);
1225             }
1226         }else{
1227             for(cy=y=1; y<height; y++,cy++){
1228                 uint8_t *ydst, *udst, *vdst;
1229
1230                 /* encode a luma only line & y++ */
1231                 if(s->bitstream_bpp==12){
1232                     ydst= p->data[0] + p->linesize[0]*y;
1233
1234                     if(s->predictor == PLANE && s->interlaced < y){
1235                         s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1236
1237                         lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1238                     }else{
1239                         lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1240                     }
1241                     encode_gray_bitstream(s, width);
1242                     y++;
1243                     if(y>=height) break;
1244                 }
1245
1246                 ydst= p->data[0] + p->linesize[0]*y;
1247                 udst= p->data[1] + p->linesize[1]*cy;
1248                 vdst= p->data[2] + p->linesize[2]*cy;
1249
1250                 if(s->predictor == PLANE && s->interlaced < cy){
1251                     s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1252                     s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1253                     s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1254
1255                     lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1256                     leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1257                     leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1258                 }else{
1259                     lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1260                     leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1261                     leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1262                 }
1263
1264                 encode_422_bitstream(s, width);
1265             }
1266         }
1267     }else if(avctx->pix_fmt == PIX_FMT_RGB32){
1268         uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1269         const int stride = -p->linesize[0];
1270         const int fake_stride = -fake_ystride;
1271         int y;
1272         int leftr, leftg, leftb;
1273
1274         put_bits(&s->pb, 8, leftr= data[R]);
1275         put_bits(&s->pb, 8, leftg= data[G]);
1276         put_bits(&s->pb, 8, leftb= data[B]);
1277         put_bits(&s->pb, 8, 0);
1278
1279         sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb);
1280         encode_bgr_bitstream(s, width-1);
1281
1282         for(y=1; y<s->height; y++){
1283             uint8_t *dst = data + y*stride;
1284             if(s->predictor == PLANE && s->interlaced < y){
1285                 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
1286                 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1287             }else{
1288                 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1289             }
1290             encode_bgr_bitstream(s, width);
1291         }
1292     }else{
1293         av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1294     }
1295     emms_c();
1296
1297     size+= (put_bits_count(&s->pb)+31)/8;
1298     size/= 4;
1299
1300     if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1301         int j;
1302         char *p= avctx->stats_out;
1303         char *end= p + 1024*30;
1304         for(i=0; i<3; i++){
1305             for(j=0; j<256; j++){
1306                 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1307                 p+= strlen(p);
1308                 s->stats[i][j]= 0;
1309             }
1310             snprintf(p, end-p, "\n");
1311             p++;
1312         }
1313     }
1314     if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1315         flush_put_bits(&s->pb);
1316         s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1317         avctx->stats_out[0] = '\0';
1318     }
1319
1320     s->picture_number++;
1321
1322     return size*4;
1323 }
1324
1325 static int encode_end(AVCodecContext *avctx)
1326 {
1327     HYuvContext *s = avctx->priv_data;
1328
1329     common_end(s);
1330
1331     av_freep(&avctx->extradata);
1332     av_freep(&avctx->stats_out);
1333
1334     return 0;
1335 }
1336 #endif /* CONFIG_ENCODERS */
1337
1338 #ifdef CONFIG_DECODERS
1339 AVCodec huffyuv_decoder = {
1340     "huffyuv",
1341     CODEC_TYPE_VIDEO,
1342     CODEC_ID_HUFFYUV,
1343     sizeof(HYuvContext),
1344     decode_init,
1345     NULL,
1346     decode_end,
1347     decode_frame,
1348     CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1349     NULL
1350 };
1351
1352 AVCodec ffvhuff_decoder = {
1353     "ffvhuff",
1354     CODEC_TYPE_VIDEO,
1355     CODEC_ID_FFVHUFF,
1356     sizeof(HYuvContext),
1357     decode_init,
1358     NULL,
1359     decode_end,
1360     decode_frame,
1361     CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1362     NULL
1363 };
1364 #endif
1365
1366 #ifdef CONFIG_ENCODERS
1367
1368 AVCodec huffyuv_encoder = {
1369     "huffyuv",
1370     CODEC_TYPE_VIDEO,
1371     CODEC_ID_HUFFYUV,
1372     sizeof(HYuvContext),
1373     encode_init,
1374     encode_frame,
1375     encode_end,
1376     .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, -1},
1377 };
1378
1379 AVCodec ffvhuff_encoder = {
1380     "ffvhuff",
1381     CODEC_TYPE_VIDEO,
1382     CODEC_ID_FFVHUFF,
1383     sizeof(HYuvContext),
1384     encode_init,
1385     encode_frame,
1386     encode_end,
1387     .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, -1},
1388 };
1389
1390 #endif //CONFIG_ENCODERS