2 * Copyright (C) 2003-2004 the ffmpeg project
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * On2 VP3 Video Decoder
24 * VP3 Video Decoder by Mike Melanson (mike at multimedia.cx)
25 * For more information about the VP3 coding process, visit:
26 * http://multimedia.cx/
28 * Theora decoder by Alex Beregszaszi
39 #include "mpegvideo.h"
43 #define FRAGMENT_PIXELS 8
48 * Define one or more of the following compile-time variables to 1 to obtain
49 * elaborate information about certain aspects of the decoding process.
51 * KEYFRAMES_ONLY: set this to 1 to only see keyframes (VP3 slideshow mode)
52 * DEBUG_VP3: high-level decoding flow
53 * DEBUG_INIT: initialization parameters
54 * DEBUG_DEQUANTIZERS: display how the dequanization tables are built
55 * DEBUG_BLOCK_CODING: unpacking the superblock/macroblock/fragment coding
56 * DEBUG_MODES: unpacking the coding modes for individual fragments
57 * DEBUG_VECTORS: display the motion vectors
58 * DEBUG_TOKEN: display exhaustive information about each DCT token
59 * DEBUG_VLC: display the VLCs as they are extracted from the stream
60 * DEBUG_DC_PRED: display the process of reversing DC prediction
61 * DEBUG_IDCT: show every detail of the IDCT process
64 #define KEYFRAMES_ONLY 0
68 #define DEBUG_DEQUANTIZERS 0
69 #define DEBUG_BLOCK_CODING 0
71 #define DEBUG_VECTORS 0
74 #define DEBUG_DC_PRED 0
78 #define debug_vp3 printf
80 static inline void debug_vp3(const char *format, ...) { }
84 #define debug_init printf
86 static inline void debug_init(const char *format, ...) { }
89 #if DEBUG_DEQUANTIZERS
90 #define debug_dequantizers printf
92 static inline void debug_dequantizers(const char *format, ...) { }
95 #if DEBUG_BLOCK_CODING
96 #define debug_block_coding printf
98 static inline void debug_block_coding(const char *format, ...) { }
102 #define debug_modes printf
104 static inline void debug_modes(const char *format, ...) { }
108 #define debug_vectors printf
110 static inline void debug_vectors(const char *format, ...) { }
114 #define debug_token printf
116 static inline void debug_token(const char *format, ...) { }
120 #define debug_vlc printf
122 static inline void debug_vlc(const char *format, ...) { }
126 #define debug_dc_pred printf
128 static inline void debug_dc_pred(const char *format, ...) { }
132 #define debug_idct printf
134 static inline void debug_idct(const char *format, ...) { }
137 typedef struct Coeff {
143 //FIXME split things out into their own arrays
144 typedef struct Vp3Fragment {
146 /* address of first pixel taking into account which plane the fragment
147 * lives on as well as the plane stride */
149 /* this is the macroblock that the fragment belongs to */
151 uint8_t coding_method;
157 #define SB_NOT_CODED 0
158 #define SB_PARTIALLY_CODED 1
159 #define SB_FULLY_CODED 2
161 #define MODE_INTER_NO_MV 0
163 #define MODE_INTER_PLUS_MV 2
164 #define MODE_INTER_LAST_MV 3
165 #define MODE_INTER_PRIOR_LAST 4
166 #define MODE_USING_GOLDEN 5
167 #define MODE_GOLDEN_MV 6
168 #define MODE_INTER_FOURMV 7
169 #define CODING_MODE_COUNT 8
171 /* special internal mode */
174 /* There are 6 preset schemes, plus a free-form scheme */
175 static int ModeAlphabet[7][CODING_MODE_COUNT] =
177 /* this is the custom scheme */
178 { 0, 0, 0, 0, 0, 0, 0, 0 },
180 /* scheme 1: Last motion vector dominates */
181 { MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST,
182 MODE_INTER_PLUS_MV, MODE_INTER_NO_MV,
183 MODE_INTRA, MODE_USING_GOLDEN,
184 MODE_GOLDEN_MV, MODE_INTER_FOURMV },
187 { MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST,
188 MODE_INTER_NO_MV, MODE_INTER_PLUS_MV,
189 MODE_INTRA, MODE_USING_GOLDEN,
190 MODE_GOLDEN_MV, MODE_INTER_FOURMV },
193 { MODE_INTER_LAST_MV, MODE_INTER_PLUS_MV,
194 MODE_INTER_PRIOR_LAST, MODE_INTER_NO_MV,
195 MODE_INTRA, MODE_USING_GOLDEN,
196 MODE_GOLDEN_MV, MODE_INTER_FOURMV },
199 { MODE_INTER_LAST_MV, MODE_INTER_PLUS_MV,
200 MODE_INTER_NO_MV, MODE_INTER_PRIOR_LAST,
201 MODE_INTRA, MODE_USING_GOLDEN,
202 MODE_GOLDEN_MV, MODE_INTER_FOURMV },
204 /* scheme 5: No motion vector dominates */
205 { MODE_INTER_NO_MV, MODE_INTER_LAST_MV,
206 MODE_INTER_PRIOR_LAST, MODE_INTER_PLUS_MV,
207 MODE_INTRA, MODE_USING_GOLDEN,
208 MODE_GOLDEN_MV, MODE_INTER_FOURMV },
211 { MODE_INTER_NO_MV, MODE_USING_GOLDEN,
212 MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST,
213 MODE_INTER_PLUS_MV, MODE_INTRA,
214 MODE_GOLDEN_MV, MODE_INTER_FOURMV },
218 #define MIN_DEQUANT_VAL 2
220 typedef struct Vp3DecodeContext {
221 AVCodecContext *avctx;
222 int theora, theora_tables;
225 AVFrame golden_frame;
227 AVFrame current_frame;
233 int last_quality_index;
235 int superblock_count;
236 int superblock_width;
237 int superblock_height;
238 int y_superblock_width;
239 int y_superblock_height;
240 int c_superblock_width;
241 int c_superblock_height;
242 int u_superblock_start;
243 int v_superblock_start;
244 unsigned char *superblock_coding;
246 int macroblock_count;
247 int macroblock_width;
248 int macroblock_height;
254 Vp3Fragment *all_fragments;
257 int u_fragment_start;
258 int v_fragment_start;
263 uint16_t coded_dc_scale_factor[64];
264 uint32_t coded_ac_scale_factor[64];
265 uint16_t coded_intra_y_dequant[64];
266 uint16_t coded_intra_c_dequant[64];
267 uint16_t coded_inter_dequant[64];
269 /* this is a list of indices into the all_fragments array indicating
270 * which of the fragments are coded */
271 int *coded_fragment_list;
272 int coded_fragment_list_index;
273 int pixel_addresses_inited;
281 VLC superblock_run_length_vlc;
282 VLC fragment_run_length_vlc;
284 VLC motion_vector_vlc;
286 /* these arrays need to be on 16-byte boundaries since SSE2 operations
288 int16_t __align16 intra_y_dequant[64];
289 int16_t __align16 intra_c_dequant[64];
290 int16_t __align16 inter_dequant[64];
292 /* This table contains superblock_count * 16 entries. Each set of 16
293 * numbers corresponds to the fragment indices 0..15 of the superblock.
294 * An entry will be -1 to indicate that no entry corresponds to that
296 int *superblock_fragments;
298 /* This table contains superblock_count * 4 entries. Each set of 4
299 * numbers corresponds to the macroblock indices 0..3 of the superblock.
300 * An entry will be -1 to indicate that no entry corresponds to that
302 int *superblock_macroblocks;
304 /* This table contains macroblock_count * 6 entries. Each set of 6
305 * numbers corresponds to the fragment indices 0..5 which comprise
306 * the macroblock (4 Y fragments and 2 C fragments). */
307 int *macroblock_fragments;
308 /* This is an array that indicates how a particular macroblock
310 unsigned char *macroblock_coding;
312 int first_coded_y_fragment;
313 int first_coded_c_fragment;
314 int last_coded_y_fragment;
315 int last_coded_c_fragment;
317 uint8_t edge_emu_buffer[9*2048]; //FIXME dynamic alloc
318 uint8_t qscale_table[2048]; //FIXME dynamic alloc (width+15)/16
321 static int theora_decode_comments(AVCodecContext *avctx, GetBitContext gb);
322 static int theora_decode_tables(AVCodecContext *avctx, GetBitContext gb);
324 /************************************************************************
325 * VP3 specific functions
326 ************************************************************************/
329 * This function sets up all of the various blocks mappings:
330 * superblocks <-> fragments, macroblocks <-> fragments,
331 * superblocks <-> macroblocks
333 * Returns 0 is successful; returns 1 if *anything* went wrong.
335 static int init_block_mapping(Vp3DecodeContext *s)
338 signed int hilbert_walk_y[16];
339 signed int hilbert_walk_c[16];
340 signed int hilbert_walk_mb[4];
342 int current_fragment = 0;
343 int current_width = 0;
344 int current_height = 0;
347 int superblock_row_inc = 0;
349 int mapping_index = 0;
351 int current_macroblock;
354 signed char travel_width[16] = {
361 signed char travel_height[16] = {
368 signed char travel_width_mb[4] = {
372 signed char travel_height_mb[4] = {
376 debug_vp3(" vp3: initialize block mapping tables\n");
378 /* figure out hilbert pattern per these frame dimensions */
379 hilbert_walk_y[0] = 1;
380 hilbert_walk_y[1] = 1;
381 hilbert_walk_y[2] = s->fragment_width;
382 hilbert_walk_y[3] = -1;
383 hilbert_walk_y[4] = s->fragment_width;
384 hilbert_walk_y[5] = s->fragment_width;
385 hilbert_walk_y[6] = 1;
386 hilbert_walk_y[7] = -s->fragment_width;
387 hilbert_walk_y[8] = 1;
388 hilbert_walk_y[9] = s->fragment_width;
389 hilbert_walk_y[10] = 1;
390 hilbert_walk_y[11] = -s->fragment_width;
391 hilbert_walk_y[12] = -s->fragment_width;
392 hilbert_walk_y[13] = -1;
393 hilbert_walk_y[14] = -s->fragment_width;
394 hilbert_walk_y[15] = 1;
396 hilbert_walk_c[0] = 1;
397 hilbert_walk_c[1] = 1;
398 hilbert_walk_c[2] = s->fragment_width / 2;
399 hilbert_walk_c[3] = -1;
400 hilbert_walk_c[4] = s->fragment_width / 2;
401 hilbert_walk_c[5] = s->fragment_width / 2;
402 hilbert_walk_c[6] = 1;
403 hilbert_walk_c[7] = -s->fragment_width / 2;
404 hilbert_walk_c[8] = 1;
405 hilbert_walk_c[9] = s->fragment_width / 2;
406 hilbert_walk_c[10] = 1;
407 hilbert_walk_c[11] = -s->fragment_width / 2;
408 hilbert_walk_c[12] = -s->fragment_width / 2;
409 hilbert_walk_c[13] = -1;
410 hilbert_walk_c[14] = -s->fragment_width / 2;
411 hilbert_walk_c[15] = 1;
413 hilbert_walk_mb[0] = 1;
414 hilbert_walk_mb[1] = s->macroblock_width;
415 hilbert_walk_mb[2] = 1;
416 hilbert_walk_mb[3] = -s->macroblock_width;
418 /* iterate through each superblock (all planes) and map the fragments */
419 for (i = 0; i < s->superblock_count; i++) {
420 debug_init(" superblock %d (u starts @ %d, v starts @ %d)\n",
421 i, s->u_superblock_start, s->v_superblock_start);
423 /* time to re-assign the limits? */
426 /* start of Y superblocks */
427 right_edge = s->fragment_width;
428 bottom_edge = s->fragment_height;
431 superblock_row_inc = 3 * s->fragment_width -
432 (s->y_superblock_width * 4 - s->fragment_width);
433 hilbert = hilbert_walk_y;
435 /* the first operation for this variable is to advance by 1 */
436 current_fragment = -1;
438 } else if (i == s->u_superblock_start) {
440 /* start of U superblocks */
441 right_edge = s->fragment_width / 2;
442 bottom_edge = s->fragment_height / 2;
445 superblock_row_inc = 3 * (s->fragment_width / 2) -
446 (s->c_superblock_width * 4 - s->fragment_width / 2);
447 hilbert = hilbert_walk_c;
449 /* the first operation for this variable is to advance by 1 */
450 current_fragment = s->u_fragment_start - 1;
452 } else if (i == s->v_superblock_start) {
454 /* start of V superblocks */
455 right_edge = s->fragment_width / 2;
456 bottom_edge = s->fragment_height / 2;
459 superblock_row_inc = 3 * (s->fragment_width / 2) -
460 (s->c_superblock_width * 4 - s->fragment_width / 2);
461 hilbert = hilbert_walk_c;
463 /* the first operation for this variable is to advance by 1 */
464 current_fragment = s->v_fragment_start - 1;
468 if (current_width >= right_edge - 1) {
469 /* reset width and move to next superblock row */
473 /* fragment is now at the start of a new superblock row */
474 current_fragment += superblock_row_inc;
477 /* iterate through all 16 fragments in a superblock */
478 for (j = 0; j < 16; j++) {
479 current_fragment += hilbert[j];
480 current_width += travel_width[j];
481 current_height += travel_height[j];
483 /* check if the fragment is in bounds */
484 if ((current_width < right_edge) &&
485 (current_height < bottom_edge)) {
486 s->superblock_fragments[mapping_index] = current_fragment;
487 debug_init(" mapping fragment %d to superblock %d, position %d (%d/%d x %d/%d)\n",
488 s->superblock_fragments[mapping_index], i, j,
489 current_width, right_edge, current_height, bottom_edge);
491 s->superblock_fragments[mapping_index] = -1;
492 debug_init(" superblock %d, position %d has no fragment (%d/%d x %d/%d)\n",
494 current_width, right_edge, current_height, bottom_edge);
501 /* initialize the superblock <-> macroblock mapping; iterate through
502 * all of the Y plane superblocks to build this mapping */
503 right_edge = s->macroblock_width;
504 bottom_edge = s->macroblock_height;
507 superblock_row_inc = s->macroblock_width -
508 (s->y_superblock_width * 2 - s->macroblock_width);;
509 hilbert = hilbert_walk_mb;
511 current_macroblock = -1;
512 for (i = 0; i < s->u_superblock_start; i++) {
514 if (current_width >= right_edge - 1) {
515 /* reset width and move to next superblock row */
519 /* macroblock is now at the start of a new superblock row */
520 current_macroblock += superblock_row_inc;
523 /* iterate through each potential macroblock in the superblock */
524 for (j = 0; j < 4; j++) {
525 current_macroblock += hilbert_walk_mb[j];
526 current_width += travel_width_mb[j];
527 current_height += travel_height_mb[j];
529 /* check if the macroblock is in bounds */
530 if ((current_width < right_edge) &&
531 (current_height < bottom_edge)) {
532 s->superblock_macroblocks[mapping_index] = current_macroblock;
533 debug_init(" mapping macroblock %d to superblock %d, position %d (%d/%d x %d/%d)\n",
534 s->superblock_macroblocks[mapping_index], i, j,
535 current_width, right_edge, current_height, bottom_edge);
537 s->superblock_macroblocks[mapping_index] = -1;
538 debug_init(" superblock %d, position %d has no macroblock (%d/%d x %d/%d)\n",
540 current_width, right_edge, current_height, bottom_edge);
547 /* initialize the macroblock <-> fragment mapping */
548 current_fragment = 0;
549 current_macroblock = 0;
551 for (i = 0; i < s->fragment_height; i += 2) {
553 for (j = 0; j < s->fragment_width; j += 2) {
555 debug_init(" macroblock %d contains fragments: ", current_macroblock);
556 s->all_fragments[current_fragment].macroblock = current_macroblock;
557 s->macroblock_fragments[mapping_index++] = current_fragment;
558 debug_init("%d ", current_fragment);
560 if (j + 1 < s->fragment_width) {
561 s->all_fragments[current_fragment + 1].macroblock = current_macroblock;
562 s->macroblock_fragments[mapping_index++] = current_fragment + 1;
563 debug_init("%d ", current_fragment + 1);
565 s->macroblock_fragments[mapping_index++] = -1;
567 if (i + 1 < s->fragment_height) {
568 s->all_fragments[current_fragment + s->fragment_width].macroblock =
570 s->macroblock_fragments[mapping_index++] =
571 current_fragment + s->fragment_width;
572 debug_init("%d ", current_fragment + s->fragment_width);
574 s->macroblock_fragments[mapping_index++] = -1;
576 if ((j + 1 < s->fragment_width) && (i + 1 < s->fragment_height)) {
577 s->all_fragments[current_fragment + s->fragment_width + 1].macroblock =
579 s->macroblock_fragments[mapping_index++] =
580 current_fragment + s->fragment_width + 1;
581 debug_init("%d ", current_fragment + s->fragment_width + 1);
583 s->macroblock_fragments[mapping_index++] = -1;
586 c_fragment = s->u_fragment_start +
587 (i * s->fragment_width / 4) + (j / 2);
588 s->all_fragments[c_fragment].macroblock = s->macroblock_count;
589 s->macroblock_fragments[mapping_index++] = c_fragment;
590 debug_init("%d ", c_fragment);
592 c_fragment = s->v_fragment_start +
593 (i * s->fragment_width / 4) + (j / 2);
594 s->all_fragments[c_fragment].macroblock = s->macroblock_count;
595 s->macroblock_fragments[mapping_index++] = c_fragment;
596 debug_init("%d ", c_fragment);
600 if (j + 2 <= s->fragment_width)
601 current_fragment += 2;
604 current_macroblock++;
607 current_fragment += s->fragment_width;
610 return 0; /* successful path out */
614 * This function unpacks a single token (which should be in the range 0..31)
615 * and returns a zero run (number of zero coefficients in current DCT matrix
616 * before next non-zero coefficient), the next DCT coefficient, and the
617 * number of consecutive, non-EOB'd DCT blocks to EOB.
619 static void unpack_token(GetBitContext *gb, int token, int *zero_run,
620 DCTELEM *coeff, int *eob_run)
628 debug_token(" vp3 token %d: ", token);
632 debug_token("DCT_EOB_TOKEN, EOB next block\n");
637 debug_token("DCT_EOB_PAIR_TOKEN, EOB next 2 blocks\n");
642 debug_token("DCT_EOB_TRIPLE_TOKEN, EOB next 3 blocks\n");
647 debug_token("DCT_REPEAT_RUN_TOKEN, ");
648 *eob_run = get_bits(gb, 2) + 4;
649 debug_token("EOB the next %d blocks\n", *eob_run);
653 debug_token("DCT_REPEAT_RUN2_TOKEN, ");
654 *eob_run = get_bits(gb, 3) + 8;
655 debug_token("EOB the next %d blocks\n", *eob_run);
659 debug_token("DCT_REPEAT_RUN3_TOKEN, ");
660 *eob_run = get_bits(gb, 4) + 16;
661 debug_token("EOB the next %d blocks\n", *eob_run);
665 debug_token("DCT_REPEAT_RUN4_TOKEN, ");
666 *eob_run = get_bits(gb, 12);
667 debug_token("EOB the next %d blocks\n", *eob_run);
671 debug_token("DCT_SHORT_ZRL_TOKEN, ");
672 /* note that this token actually indicates that (3 extra bits) + 1 0s
673 * should be output; this case specifies a run of (3 EBs) 0s and a
674 * coefficient of 0. */
675 *zero_run = get_bits(gb, 3);
677 debug_token("skip the next %d positions in output matrix\n", *zero_run + 1);
681 debug_token("DCT_ZRL_TOKEN, ");
682 /* note that this token actually indicates that (6 extra bits) + 1 0s
683 * should be output; this case specifies a run of (6 EBs) 0s and a
684 * coefficient of 0. */
685 *zero_run = get_bits(gb, 6);
687 debug_token("skip the next %d positions in output matrix\n", *zero_run + 1);
691 debug_token("ONE_TOKEN, output 1\n");
696 debug_token("MINUS_ONE_TOKEN, output -1\n");
701 debug_token("TWO_TOKEN, output 2\n");
706 debug_token("MINUS_TWO_TOKEN, output -2\n");
714 debug_token("LOW_VAL_TOKENS, ");
716 *coeff = -(3 + (token - 13));
718 *coeff = 3 + (token - 13);
719 debug_token("output %d\n", *coeff);
723 debug_token("DCT_VAL_CATEGORY3, ");
724 sign = get_bits(gb, 1);
725 *coeff = 7 + get_bits(gb, 1);
728 debug_token("output %d\n", *coeff);
732 debug_token("DCT_VAL_CATEGORY4, ");
733 sign = get_bits(gb, 1);
734 *coeff = 9 + get_bits(gb, 2);
737 debug_token("output %d\n", *coeff);
741 debug_token("DCT_VAL_CATEGORY5, ");
742 sign = get_bits(gb, 1);
743 *coeff = 13 + get_bits(gb, 3);
746 debug_token("output %d\n", *coeff);
750 debug_token("DCT_VAL_CATEGORY6, ");
751 sign = get_bits(gb, 1);
752 *coeff = 21 + get_bits(gb, 4);
755 debug_token("output %d\n", *coeff);
759 debug_token("DCT_VAL_CATEGORY7, ");
760 sign = get_bits(gb, 1);
761 *coeff = 37 + get_bits(gb, 5);
764 debug_token("output %d\n", *coeff);
768 debug_token("DCT_VAL_CATEGORY8, ");
769 sign = get_bits(gb, 1);
770 *coeff = 69 + get_bits(gb, 9);
773 debug_token("output %d\n", *coeff);
781 debug_token("DCT_RUN_CATEGORY1, ");
782 *zero_run = token - 22;
787 debug_token("output %d 0s, then %d\n", *zero_run, *coeff);
791 debug_token("DCT_RUN_CATEGORY1B, ");
796 *zero_run = 6 + get_bits(gb, 2);
797 debug_token("output %d 0s, then %d\n", *zero_run, *coeff);
801 debug_token("DCT_RUN_CATEGORY1C, ");
806 *zero_run = 10 + get_bits(gb, 3);
807 debug_token("output %d 0s, then %d\n", *zero_run, *coeff);
811 debug_token("DCT_RUN_CATEGORY2, ");
812 sign = get_bits(gb, 1);
813 *coeff = 2 + get_bits(gb, 1);
817 debug_token("output %d 0s, then %d\n", *zero_run, *coeff);
821 debug_token("DCT_RUN_CATEGORY2, ");
822 sign = get_bits(gb, 1);
823 *coeff = 2 + get_bits(gb, 1);
826 *zero_run = 2 + get_bits(gb, 1);
827 debug_token("output %d 0s, then %d\n", *zero_run, *coeff);
831 av_log(NULL, AV_LOG_ERROR, " vp3: help! Got a bad token: %d > 31\n", token);
838 * This function wipes out all of the fragment data.
840 static void init_frame(Vp3DecodeContext *s, GetBitContext *gb)
844 /* zero out all of the fragment information */
845 s->coded_fragment_list_index = 0;
846 for (i = 0; i < s->fragment_count; i++) {
847 s->all_fragments[i].coeff_count = 0;
848 s->all_fragments[i].motion_x = 0xbeef;
849 s->all_fragments[i].motion_y = 0xbeef;
850 s->all_fragments[i].next_coeff= NULL;
852 s->coeffs[i].coeff=0;
853 s->coeffs[i].next= NULL;
858 * This function sets of the dequantization tables used for a particular
861 static void init_dequantizer(Vp3DecodeContext *s)
864 int ac_scale_factor = s->coded_ac_scale_factor[s->quality_index];
865 int dc_scale_factor = s->coded_dc_scale_factor[s->quality_index];
868 debug_vp3(" vp3: initializing dequantization tables\n");
871 * Scale dequantizers:
877 * where sf = dc_scale_factor for DC quantizer
878 * or ac_scale_factor for AC quantizer
880 * Then, saturate the result to a lower limit of MIN_DEQUANT_VAL.
884 /* scale DC quantizers */
885 s->intra_y_dequant[0] = s->coded_intra_y_dequant[0] * dc_scale_factor / 100;
886 if (s->intra_y_dequant[0] < MIN_DEQUANT_VAL * 2)
887 s->intra_y_dequant[0] = MIN_DEQUANT_VAL * 2;
888 s->intra_y_dequant[0] *= SCALER;
890 s->intra_c_dequant[0] = s->coded_intra_c_dequant[0] * dc_scale_factor / 100;
891 if (s->intra_c_dequant[0] < MIN_DEQUANT_VAL * 2)
892 s->intra_c_dequant[0] = MIN_DEQUANT_VAL * 2;
893 s->intra_c_dequant[0] *= SCALER;
895 s->inter_dequant[0] = s->coded_inter_dequant[0] * dc_scale_factor / 100;
896 if (s->inter_dequant[0] < MIN_DEQUANT_VAL * 4)
897 s->inter_dequant[0] = MIN_DEQUANT_VAL * 4;
898 s->inter_dequant[0] *= SCALER;
900 /* scale AC quantizers, zigzag at the same time in preparation for
901 * the dequantization phase */
902 for (i = 1; i < 64; i++) {
903 int k= s->scantable.scantable[i];
904 j = s->scantable.permutated[i];
906 s->intra_y_dequant[j] = s->coded_intra_y_dequant[k] * ac_scale_factor / 100;
907 if (s->intra_y_dequant[j] < MIN_DEQUANT_VAL)
908 s->intra_y_dequant[j] = MIN_DEQUANT_VAL;
909 s->intra_y_dequant[j] *= SCALER;
911 s->intra_c_dequant[j] = s->coded_intra_c_dequant[k] * ac_scale_factor / 100;
912 if (s->intra_c_dequant[j] < MIN_DEQUANT_VAL)
913 s->intra_c_dequant[j] = MIN_DEQUANT_VAL;
914 s->intra_c_dequant[j] *= SCALER;
916 s->inter_dequant[j] = s->coded_inter_dequant[k] * ac_scale_factor / 100;
917 if (s->inter_dequant[j] < MIN_DEQUANT_VAL * 2)
918 s->inter_dequant[j] = MIN_DEQUANT_VAL * 2;
919 s->inter_dequant[j] *= SCALER;
922 memset(s->qscale_table, (FFMAX(s->intra_y_dequant[1], s->intra_c_dequant[1])+8)/16, 512); //FIXME finetune
924 /* print debug information as requested */
925 debug_dequantizers("intra Y dequantizers:\n");
926 for (i = 0; i < 8; i++) {
927 for (j = i * 8; j < i * 8 + 8; j++) {
928 debug_dequantizers(" %4d,", s->intra_y_dequant[j]);
930 debug_dequantizers("\n");
932 debug_dequantizers("\n");
934 debug_dequantizers("intra C dequantizers:\n");
935 for (i = 0; i < 8; i++) {
936 for (j = i * 8; j < i * 8 + 8; j++) {
937 debug_dequantizers(" %4d,", s->intra_c_dequant[j]);
939 debug_dequantizers("\n");
941 debug_dequantizers("\n");
943 debug_dequantizers("interframe dequantizers:\n");
944 for (i = 0; i < 8; i++) {
945 for (j = i * 8; j < i * 8 + 8; j++) {
946 debug_dequantizers(" %4d,", s->inter_dequant[j]);
948 debug_dequantizers("\n");
950 debug_dequantizers("\n");
954 * This function is used to fetch runs of 1s or 0s from the bitstream for
955 * use in determining which superblocks are fully and partially coded.
964 * 111111xxxxxxxxxxxx 34-4129
966 static int get_superblock_run_length(GetBitContext *gb)
969 if (get_bits(gb, 1) == 0)
972 else if (get_bits(gb, 1) == 0)
973 return (2 + get_bits(gb, 1));
975 else if (get_bits(gb, 1) == 0)
976 return (4 + get_bits(gb, 1));
978 else if (get_bits(gb, 1) == 0)
979 return (6 + get_bits(gb, 2));
981 else if (get_bits(gb, 1) == 0)
982 return (10 + get_bits(gb, 3));
984 else if (get_bits(gb, 1) == 0)
985 return (18 + get_bits(gb, 4));
988 return (34 + get_bits(gb, 12));
993 * This function is used to fetch runs of 1s or 0s from the bitstream for
994 * use in determining which particular fragments are coded.
1004 static int get_fragment_run_length(GetBitContext *gb)
1007 if (get_bits(gb, 1) == 0)
1008 return (1 + get_bits(gb, 1));
1010 else if (get_bits(gb, 1) == 0)
1011 return (3 + get_bits(gb, 1));
1013 else if (get_bits(gb, 1) == 0)
1014 return (5 + get_bits(gb, 1));
1016 else if (get_bits(gb, 1) == 0)
1017 return (7 + get_bits(gb, 2));
1019 else if (get_bits(gb, 1) == 0)
1020 return (11 + get_bits(gb, 2));
1023 return (15 + get_bits(gb, 4));
1028 * This function decodes a VLC from the bitstream and returns a number
1029 * that ranges from 0..7. The number indicates which of the 8 coding
1043 static int get_mode_code(GetBitContext *gb)
1046 if (get_bits(gb, 1) == 0)
1049 else if (get_bits(gb, 1) == 0)
1052 else if (get_bits(gb, 1) == 0)
1055 else if (get_bits(gb, 1) == 0)
1058 else if (get_bits(gb, 1) == 0)
1061 else if (get_bits(gb, 1) == 0)
1064 else if (get_bits(gb, 1) == 0)
1073 * This function extracts a motion vector from the bitstream using a VLC
1074 * scheme. 3 bits are fetched from the bitstream and 1 of 8 actions is
1075 * taken depending on the value on those 3 bits:
1080 * 3: if (next bit is 1) return -2, else return 2
1081 * 4: if (next bit is 1) return -3, else return 3
1082 * 5: return 4 + (next 2 bits), next bit is sign
1083 * 6: return 8 + (next 3 bits), next bit is sign
1084 * 7: return 16 + (next 4 bits), next bit is sign
1086 static int get_motion_vector_vlc(GetBitContext *gb)
1090 bits = get_bits(gb, 3);
1107 if (get_bits(gb, 1) == 0)
1114 if (get_bits(gb, 1) == 0)
1121 bits = 4 + get_bits(gb, 2);
1122 if (get_bits(gb, 1) == 1)
1127 bits = 8 + get_bits(gb, 3);
1128 if (get_bits(gb, 1) == 1)
1133 bits = 16 + get_bits(gb, 4);
1134 if (get_bits(gb, 1) == 1)
1144 * This function fetches a 5-bit number from the stream followed by
1145 * a sign and calls it a motion vector.
1147 static int get_motion_vector_fixed(GetBitContext *gb)
1152 bits = get_bits(gb, 5);
1154 if (get_bits(gb, 1) == 1)
1161 * This function unpacks all of the superblock/macroblock/fragment coding
1162 * information from the bitstream.
1164 static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
1167 int current_superblock = 0;
1168 int current_run = 0;
1169 int decode_fully_flags = 0;
1170 int decode_partial_blocks = 0;
1171 int first_c_fragment_seen;
1174 int current_fragment;
1176 debug_vp3(" vp3: unpacking superblock coding\n");
1180 debug_vp3(" keyframe-- all superblocks are fully coded\n");
1181 memset(s->superblock_coding, SB_FULLY_CODED, s->superblock_count);
1185 /* unpack the list of partially-coded superblocks */
1186 bit = get_bits(gb, 1);
1187 /* toggle the bit because as soon as the first run length is
1188 * fetched the bit will be toggled again */
1190 while (current_superblock < s->superblock_count) {
1191 if (current_run-- == 0) {
1194 current_run = get_vlc2(gb,
1195 s->superblock_run_length_vlc.table, 6, 2);
1196 if (current_run == 33)
1197 current_run += get_bits(gb, 12);
1199 current_run = get_superblock_run_length(gb);
1201 debug_block_coding(" setting superblocks %d..%d to %s\n",
1203 current_superblock + current_run - 1,
1204 (bit) ? "partially coded" : "not coded");
1206 /* if any of the superblocks are not partially coded, flag
1207 * a boolean to decode the list of fully-coded superblocks */
1209 decode_fully_flags = 1;
1212 /* make a note of the fact that there are partially coded
1214 decode_partial_blocks = 1;
1217 s->superblock_coding[current_superblock++] = bit;
1220 /* unpack the list of fully coded superblocks if any of the blocks were
1221 * not marked as partially coded in the previous step */
1222 if (decode_fully_flags) {
1224 current_superblock = 0;
1226 bit = get_bits(gb, 1);
1227 /* toggle the bit because as soon as the first run length is
1228 * fetched the bit will be toggled again */
1230 while (current_superblock < s->superblock_count) {
1232 /* skip any superblocks already marked as partially coded */
1233 if (s->superblock_coding[current_superblock] == SB_NOT_CODED) {
1235 if (current_run-- == 0) {
1238 current_run = get_vlc2(gb,
1239 s->superblock_run_length_vlc.table, 6, 2);
1240 if (current_run == 33)
1241 current_run += get_bits(gb, 12);
1243 current_run = get_superblock_run_length(gb);
1247 debug_block_coding(" setting superblock %d to %s\n",
1249 (bit) ? "fully coded" : "not coded");
1250 s->superblock_coding[current_superblock] = 2*bit;
1252 current_superblock++;
1256 /* if there were partial blocks, initialize bitstream for
1257 * unpacking fragment codings */
1258 if (decode_partial_blocks) {
1261 bit = get_bits(gb, 1);
1262 /* toggle the bit because as soon as the first run length is
1263 * fetched the bit will be toggled again */
1268 /* figure out which fragments are coded; iterate through each
1269 * superblock (all planes) */
1270 s->coded_fragment_list_index = 0;
1271 s->next_coeff= s->coeffs + s->fragment_count;
1272 s->first_coded_y_fragment = s->first_coded_c_fragment = 0;
1273 s->last_coded_y_fragment = s->last_coded_c_fragment = -1;
1274 first_c_fragment_seen = 0;
1275 memset(s->macroblock_coding, MODE_COPY, s->macroblock_count);
1276 for (i = 0; i < s->superblock_count; i++) {
1278 /* iterate through all 16 fragments in a superblock */
1279 for (j = 0; j < 16; j++) {
1281 /* if the fragment is in bounds, check its coding status */
1282 current_fragment = s->superblock_fragments[i * 16 + j];
1283 if (current_fragment >= s->fragment_count) {
1284 av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_superblocks(): bad fragment number (%d >= %d)\n",
1285 current_fragment, s->fragment_count);
1288 if (current_fragment != -1) {
1289 if (s->superblock_coding[i] == SB_NOT_CODED) {
1291 /* copy all the fragments from the prior frame */
1292 s->all_fragments[current_fragment].coding_method =
1295 } else if (s->superblock_coding[i] == SB_PARTIALLY_CODED) {
1297 /* fragment may or may not be coded; this is the case
1298 * that cares about the fragment coding runs */
1299 if (current_run-- == 0) {
1302 current_run = get_vlc2(gb,
1303 s->fragment_run_length_vlc.table, 5, 2);
1305 current_run = get_fragment_run_length(gb);
1310 /* default mode; actual mode will be decoded in
1312 s->all_fragments[current_fragment].coding_method =
1314 s->all_fragments[current_fragment].next_coeff= s->coeffs + current_fragment;
1315 s->coded_fragment_list[s->coded_fragment_list_index] =
1317 if ((current_fragment >= s->u_fragment_start) &&
1318 (s->last_coded_y_fragment == -1) &&
1319 (!first_c_fragment_seen)) {
1320 s->first_coded_c_fragment = s->coded_fragment_list_index;
1321 s->last_coded_y_fragment = s->first_coded_c_fragment - 1;
1322 first_c_fragment_seen = 1;
1324 s->coded_fragment_list_index++;
1325 s->macroblock_coding[s->all_fragments[current_fragment].macroblock] = MODE_INTER_NO_MV;
1326 debug_block_coding(" superblock %d is partially coded, fragment %d is coded\n",
1327 i, current_fragment);
1329 /* not coded; copy this fragment from the prior frame */
1330 s->all_fragments[current_fragment].coding_method =
1332 debug_block_coding(" superblock %d is partially coded, fragment %d is not coded\n",
1333 i, current_fragment);
1338 /* fragments are fully coded in this superblock; actual
1339 * coding will be determined in next step */
1340 s->all_fragments[current_fragment].coding_method =
1342 s->all_fragments[current_fragment].next_coeff= s->coeffs + current_fragment;
1343 s->coded_fragment_list[s->coded_fragment_list_index] =
1345 if ((current_fragment >= s->u_fragment_start) &&
1346 (s->last_coded_y_fragment == -1) &&
1347 (!first_c_fragment_seen)) {
1348 s->first_coded_c_fragment = s->coded_fragment_list_index;
1349 s->last_coded_y_fragment = s->first_coded_c_fragment - 1;
1350 first_c_fragment_seen = 1;
1352 s->coded_fragment_list_index++;
1353 s->macroblock_coding[s->all_fragments[current_fragment].macroblock] = MODE_INTER_NO_MV;
1354 debug_block_coding(" superblock %d is fully coded, fragment %d is coded\n",
1355 i, current_fragment);
1361 if (!first_c_fragment_seen)
1362 /* only Y fragments coded in this frame */
1363 s->last_coded_y_fragment = s->coded_fragment_list_index - 1;
1365 /* end the list of coded C fragments */
1366 s->last_coded_c_fragment = s->coded_fragment_list_index - 1;
1368 debug_block_coding(" %d total coded fragments, y: %d -> %d, c: %d -> %d\n",
1369 s->coded_fragment_list_index,
1370 s->first_coded_y_fragment,
1371 s->last_coded_y_fragment,
1372 s->first_coded_c_fragment,
1373 s->last_coded_c_fragment);
1379 * This function unpacks all the coding mode data for individual macroblocks
1380 * from the bitstream.
1382 static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb)
1386 int current_macroblock;
1387 int current_fragment;
1390 debug_vp3(" vp3: unpacking encoding modes\n");
1393 debug_vp3(" keyframe-- all blocks are coded as INTRA\n");
1395 for (i = 0; i < s->fragment_count; i++)
1396 s->all_fragments[i].coding_method = MODE_INTRA;
1400 /* fetch the mode coding scheme for this frame */
1401 scheme = get_bits(gb, 3);
1402 debug_modes(" using mode alphabet %d\n", scheme);
1404 /* is it a custom coding scheme? */
1406 debug_modes(" custom mode alphabet ahead:\n");
1407 for (i = 0; i < 8; i++)
1408 ModeAlphabet[scheme][get_bits(gb, 3)] = i;
1411 for (i = 0; i < 8; i++)
1412 debug_modes(" mode[%d][%d] = %d\n", scheme, i,
1413 ModeAlphabet[scheme][i]);
1415 /* iterate through all of the macroblocks that contain 1 or more
1416 * coded fragments */
1417 for (i = 0; i < s->u_superblock_start; i++) {
1419 for (j = 0; j < 4; j++) {
1420 current_macroblock = s->superblock_macroblocks[i * 4 + j];
1421 if ((current_macroblock == -1) ||
1422 (s->macroblock_coding[current_macroblock] == MODE_COPY))
1424 if (current_macroblock >= s->macroblock_count) {
1425 av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_modes(): bad macroblock number (%d >= %d)\n",
1426 current_macroblock, s->macroblock_count);
1430 /* mode 7 means get 3 bits for each coding mode */
1432 coding_mode = get_bits(gb, 3);
1436 coding_mode = ModeAlphabet[scheme]
1437 [get_vlc2(gb, s->mode_code_vlc.table, 3, 3)];
1439 coding_mode = ModeAlphabet[scheme][get_mode_code(gb)];
1443 s->macroblock_coding[current_macroblock] = coding_mode;
1444 for (k = 0; k < 6; k++) {
1446 s->macroblock_fragments[current_macroblock * 6 + k];
1447 if (current_fragment == -1)
1449 if (current_fragment >= s->fragment_count) {
1450 av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_modes(): bad fragment number (%d >= %d)\n",
1451 current_fragment, s->fragment_count);
1454 if (s->all_fragments[current_fragment].coding_method !=
1456 s->all_fragments[current_fragment].coding_method =
1460 debug_modes(" coding method for macroblock starting @ fragment %d = %d\n",
1461 s->macroblock_fragments[current_macroblock * 6], coding_mode);
1470 * This function unpacks all the motion vectors for the individual
1471 * macroblocks from the bitstream.
1473 static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
1479 int last_motion_x = 0;
1480 int last_motion_y = 0;
1481 int prior_last_motion_x = 0;
1482 int prior_last_motion_y = 0;
1483 int current_macroblock;
1484 int current_fragment;
1486 debug_vp3(" vp3: unpacking motion vectors\n");
1489 debug_vp3(" keyframe-- there are no motion vectors\n");
1493 memset(motion_x, 0, 6 * sizeof(int));
1494 memset(motion_y, 0, 6 * sizeof(int));
1496 /* coding mode 0 is the VLC scheme; 1 is the fixed code scheme */
1497 coding_mode = get_bits(gb, 1);
1498 debug_vectors(" using %s scheme for unpacking motion vectors\n",
1499 (coding_mode == 0) ? "VLC" : "fixed-length");
1501 /* iterate through all of the macroblocks that contain 1 or more
1502 * coded fragments */
1503 for (i = 0; i < s->u_superblock_start; i++) {
1505 for (j = 0; j < 4; j++) {
1506 current_macroblock = s->superblock_macroblocks[i * 4 + j];
1507 if ((current_macroblock == -1) ||
1508 (s->macroblock_coding[current_macroblock] == MODE_COPY))
1510 if (current_macroblock >= s->macroblock_count) {
1511 av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_vectors(): bad macroblock number (%d >= %d)\n",
1512 current_macroblock, s->macroblock_count);
1516 current_fragment = s->macroblock_fragments[current_macroblock * 6];
1517 if (current_fragment >= s->fragment_count) {
1518 av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_vectors(): bad fragment number (%d >= %d\n",
1519 current_fragment, s->fragment_count);
1522 switch (s->macroblock_coding[current_macroblock]) {
1524 case MODE_INTER_PLUS_MV:
1525 case MODE_GOLDEN_MV:
1526 /* all 6 fragments use the same motion vector */
1527 if (coding_mode == 0) {
1529 motion_x[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
1530 motion_y[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
1532 motion_x[0] = get_motion_vector_vlc(gb);
1533 motion_y[0] = get_motion_vector_vlc(gb);
1536 motion_x[0] = get_motion_vector_fixed(gb);
1537 motion_y[0] = get_motion_vector_fixed(gb);
1539 for (k = 1; k < 6; k++) {
1540 motion_x[k] = motion_x[0];
1541 motion_y[k] = motion_y[0];
1544 /* vector maintenance, only on MODE_INTER_PLUS_MV */
1545 if (s->macroblock_coding[current_macroblock] ==
1546 MODE_INTER_PLUS_MV) {
1547 prior_last_motion_x = last_motion_x;
1548 prior_last_motion_y = last_motion_y;
1549 last_motion_x = motion_x[0];
1550 last_motion_y = motion_y[0];
1554 case MODE_INTER_FOURMV:
1555 /* fetch 4 vectors from the bitstream, one for each
1556 * Y fragment, then average for the C fragment vectors */
1557 motion_x[4] = motion_y[4] = 0;
1558 for (k = 0; k < 4; k++) {
1559 if (coding_mode == 0) {
1561 motion_x[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
1562 motion_y[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
1564 motion_x[k] = get_motion_vector_vlc(gb);
1565 motion_y[k] = get_motion_vector_vlc(gb);
1568 motion_x[k] = get_motion_vector_fixed(gb);
1569 motion_y[k] = get_motion_vector_fixed(gb);
1571 motion_x[4] += motion_x[k];
1572 motion_y[4] += motion_y[k];
1575 if (motion_x[4] >= 0)
1576 motion_x[4] = (motion_x[4] + 2) / 4;
1578 motion_x[4] = (motion_x[4] - 2) / 4;
1579 motion_x[5] = motion_x[4];
1581 if (motion_y[4] >= 0)
1582 motion_y[4] = (motion_y[4] + 2) / 4;
1584 motion_y[4] = (motion_y[4] - 2) / 4;
1585 motion_y[5] = motion_y[4];
1587 /* vector maintenance; vector[3] is treated as the
1588 * last vector in this case */
1589 prior_last_motion_x = last_motion_x;
1590 prior_last_motion_y = last_motion_y;
1591 last_motion_x = motion_x[3];
1592 last_motion_y = motion_y[3];
1595 case MODE_INTER_LAST_MV:
1596 /* all 6 fragments use the last motion vector */
1597 motion_x[0] = last_motion_x;
1598 motion_y[0] = last_motion_y;
1599 for (k = 1; k < 6; k++) {
1600 motion_x[k] = motion_x[0];
1601 motion_y[k] = motion_y[0];
1604 /* no vector maintenance (last vector remains the
1608 case MODE_INTER_PRIOR_LAST:
1609 /* all 6 fragments use the motion vector prior to the
1610 * last motion vector */
1611 motion_x[0] = prior_last_motion_x;
1612 motion_y[0] = prior_last_motion_y;
1613 for (k = 1; k < 6; k++) {
1614 motion_x[k] = motion_x[0];
1615 motion_y[k] = motion_y[0];
1618 /* vector maintenance */
1619 prior_last_motion_x = last_motion_x;
1620 prior_last_motion_y = last_motion_y;
1621 last_motion_x = motion_x[0];
1622 last_motion_y = motion_y[0];
1626 /* covers intra, inter without MV, golden without MV */
1627 memset(motion_x, 0, 6 * sizeof(int));
1628 memset(motion_y, 0, 6 * sizeof(int));
1630 /* no vector maintenance */
1634 /* assign the motion vectors to the correct fragments */
1635 debug_vectors(" vectors for macroblock starting @ fragment %d (coding method %d):\n",
1637 s->macroblock_coding[current_macroblock]);
1638 for (k = 0; k < 6; k++) {
1640 s->macroblock_fragments[current_macroblock * 6 + k];
1641 if (current_fragment == -1)
1643 if (current_fragment >= s->fragment_count) {
1644 av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_vectors(): bad fragment number (%d >= %d)\n",
1645 current_fragment, s->fragment_count);
1648 s->all_fragments[current_fragment].motion_x = motion_x[k];
1649 s->all_fragments[current_fragment].motion_y = motion_y[k];
1650 debug_vectors(" vector %d: fragment %d = (%d, %d)\n",
1651 k, current_fragment, motion_x[k], motion_y[k]);
1661 * This function is called by unpack_dct_coeffs() to extract the VLCs from
1662 * the bitstream. The VLCs encode tokens which are used to unpack DCT
1663 * data. This function unpacks all the VLCs for either the Y plane or both
1664 * C planes, and is called for DC coefficients or different AC coefficient
1665 * levels (since different coefficient types require different VLC tables.
1667 * This function returns a residual eob run. E.g, if a particular token gave
1668 * instructions to EOB the next 5 fragments and there were only 2 fragments
1669 * left in the current fragment range, 3 would be returned so that it could
1670 * be passed into the next call to this same function.
1672 static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb,
1673 VLC *table, int coeff_index,
1674 int first_fragment, int last_fragment,
1681 Vp3Fragment *fragment;
1682 uint8_t *perm= s->scantable.permutated;
1685 if ((first_fragment >= s->fragment_count) ||
1686 (last_fragment >= s->fragment_count)) {
1688 av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_vlcs(): bad fragment number (%d -> %d ?)\n",
1689 first_fragment, last_fragment);
1693 for (i = first_fragment; i <= last_fragment; i++) {
1695 fragment = &s->all_fragments[s->coded_fragment_list[i]];
1696 if (fragment->coeff_count > coeff_index)
1700 /* decode a VLC into a token */
1701 token = get_vlc2(gb, table->table, 5, 3);
1702 debug_vlc(" token = %2d, ", token);
1703 /* use the token to get a zero run, a coefficient, and an eob run */
1706 eob_run = eob_run_base[token];
1707 if (eob_run_get_bits[token])
1708 eob_run += get_bits(gb, eob_run_get_bits[token]);
1709 coeff = zero_run = 0;
1711 bits_to_get = coeff_get_bits[token];
1713 coeff = coeff_tables[token][0];
1715 coeff = coeff_tables[token][get_bits(gb, bits_to_get)];
1717 zero_run = zero_run_base[token];
1718 if (zero_run_get_bits[token])
1719 zero_run += get_bits(gb, zero_run_get_bits[token]);
1722 unpack_token(gb, token, &zero_run, &coeff, &eob_run);
1727 fragment->coeff_count += zero_run;
1728 if (fragment->coeff_count < 64){
1729 fragment->next_coeff->coeff= coeff;
1730 fragment->next_coeff->index= perm[fragment->coeff_count++]; //FIXME perm here already?
1731 fragment->next_coeff->next= s->next_coeff;
1732 s->next_coeff->next=NULL;
1733 fragment->next_coeff= s->next_coeff++;
1735 debug_vlc(" fragment %d coeff = %d\n",
1736 s->coded_fragment_list[i], fragment->next_coeff[coeff_index]);
1738 fragment->coeff_count |= 128;
1739 debug_vlc(" fragment %d eob with %d coefficients\n",
1740 s->coded_fragment_list[i], fragment->coeff_count&127);
1749 * This function unpacks all of the DCT coefficient data from the
1752 static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
1759 int residual_eob_run = 0;
1761 /* fetch the DC table indices */
1762 dc_y_table = get_bits(gb, 4);
1763 dc_c_table = get_bits(gb, 4);
1765 /* unpack the Y plane DC coefficients */
1766 debug_vp3(" vp3: unpacking Y plane DC coefficients using table %d\n",
1768 residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_y_table], 0,
1769 s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run);
1771 /* unpack the C plane DC coefficients */
1772 debug_vp3(" vp3: unpacking C plane DC coefficients using table %d\n",
1774 residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0,
1775 s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run);
1777 /* fetch the AC table indices */
1778 ac_y_table = get_bits(gb, 4);
1779 ac_c_table = get_bits(gb, 4);
1781 /* unpack the group 1 AC coefficients (coeffs 1-5) */
1782 for (i = 1; i <= 5; i++) {
1784 debug_vp3(" vp3: unpacking level %d Y plane AC coefficients using table %d\n",
1786 residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_1[ac_y_table], i,
1787 s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run);
1789 debug_vp3(" vp3: unpacking level %d C plane AC coefficients using table %d\n",
1791 residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_1[ac_c_table], i,
1792 s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run);
1795 /* unpack the group 2 AC coefficients (coeffs 6-14) */
1796 for (i = 6; i <= 14; i++) {
1798 debug_vp3(" vp3: unpacking level %d Y plane AC coefficients using table %d\n",
1800 residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_2[ac_y_table], i,
1801 s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run);
1803 debug_vp3(" vp3: unpacking level %d C plane AC coefficients using table %d\n",
1805 residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_2[ac_c_table], i,
1806 s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run);
1809 /* unpack the group 3 AC coefficients (coeffs 15-27) */
1810 for (i = 15; i <= 27; i++) {
1812 debug_vp3(" vp3: unpacking level %d Y plane AC coefficients using table %d\n",
1814 residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_3[ac_y_table], i,
1815 s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run);
1817 debug_vp3(" vp3: unpacking level %d C plane AC coefficients using table %d\n",
1819 residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_3[ac_c_table], i,
1820 s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run);
1823 /* unpack the group 4 AC coefficients (coeffs 28-63) */
1824 for (i = 28; i <= 63; i++) {
1826 debug_vp3(" vp3: unpacking level %d Y plane AC coefficients using table %d\n",
1828 residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_4[ac_y_table], i,
1829 s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run);
1831 debug_vp3(" vp3: unpacking level %d C plane AC coefficients using table %d\n",
1833 residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_4[ac_c_table], i,
1834 s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run);
1841 * This function reverses the DC prediction for each coded fragment in
1842 * the frame. Much of this function is adapted directly from the original
1845 #define COMPATIBLE_FRAME(x) \
1846 (compatible_frame[s->all_fragments[x].coding_method] == current_frame_type)
1847 #define FRAME_CODED(x) (s->all_fragments[x].coding_method != MODE_COPY)
1848 #define DC_COEFF(u) (s->coeffs[u].index ? 0 : s->coeffs[u].coeff) //FIXME do somethin to simplify this
1849 static inline int iabs (int x) { return ((x < 0) ? -x : x); }
1851 static void reverse_dc_prediction(Vp3DecodeContext *s,
1854 int fragment_height)
1863 int i = first_fragment;
1866 * Fragment prediction groups:
1874 * Note: Groups 5 and 7 do not exist as it would mean that the
1875 * fragment's x coordinate is both 0 and (width - 1) at the same time.
1877 int predictor_group;
1880 /* validity flags for the left, up-left, up, and up-right fragments */
1881 int fl, ful, fu, fur;
1883 /* DC values for the left, up-left, up, and up-right fragments */
1884 int vl, vul, vu, vur;
1886 /* indices for the left, up-left, up, and up-right fragments */
1890 * The 6 fields mean:
1891 * 0: up-left multiplier
1893 * 2: up-right multiplier
1894 * 3: left multiplier
1896 * 5: right bit shift divisor (e.g., 7 means >>=7, a.k.a. div by 128)
1898 int predictor_transform[16][6] = {
1899 { 0, 0, 0, 0, 0, 0 },
1900 { 0, 0, 0, 1, 0, 0 }, // PL
1901 { 0, 0, 1, 0, 0, 0 }, // PUR
1902 { 0, 0, 53, 75, 127, 7 }, // PUR|PL
1903 { 0, 1, 0, 0, 0, 0 }, // PU
1904 { 0, 1, 0, 1, 1, 1 }, // PU|PL
1905 { 0, 1, 0, 0, 0, 0 }, // PU|PUR
1906 { 0, 0, 53, 75, 127, 7 }, // PU|PUR|PL
1907 { 1, 0, 0, 0, 0, 0 }, // PUL
1908 { 0, 0, 0, 1, 0, 0 }, // PUL|PL
1909 { 1, 0, 1, 0, 1, 1 }, // PUL|PUR
1910 { 0, 0, 53, 75, 127, 7 }, // PUL|PUR|PL
1911 { 0, 1, 0, 0, 0, 0 }, // PUL|PU
1912 {-26, 29, 0, 29, 31, 5 }, // PUL|PU|PL
1913 { 3, 10, 3, 0, 15, 4 }, // PUL|PU|PUR
1914 {-26, 29, 0, 29, 31, 5 } // PUL|PU|PUR|PL
1917 /* This table shows which types of blocks can use other blocks for
1918 * prediction. For example, INTRA is the only mode in this table to
1919 * have a frame number of 0. That means INTRA blocks can only predict
1920 * from other INTRA blocks. There are 2 golden frame coding types;
1921 * blocks encoding in these modes can only predict from other blocks
1922 * that were encoded with these 1 of these 2 modes. */
1923 unsigned char compatible_frame[8] = {
1924 1, /* MODE_INTER_NO_MV */
1926 1, /* MODE_INTER_PLUS_MV */
1927 1, /* MODE_INTER_LAST_MV */
1928 1, /* MODE_INTER_PRIOR_MV */
1929 2, /* MODE_USING_GOLDEN */
1930 2, /* MODE_GOLDEN_MV */
1931 1 /* MODE_INTER_FOUR_MV */
1933 int current_frame_type;
1935 /* there is a last DC predictor for each of the 3 frame types */
1940 debug_vp3(" vp3: reversing DC prediction\n");
1942 vul = vu = vur = vl = 0;
1943 last_dc[0] = last_dc[1] = last_dc[2] = 0;
1945 /* for each fragment row... */
1946 for (y = 0; y < fragment_height; y++) {
1948 /* for each fragment in a row... */
1949 for (x = 0; x < fragment_width; x++, i++) {
1951 /* reverse prediction if this block was coded */
1952 if (s->all_fragments[i].coding_method != MODE_COPY) {
1954 current_frame_type =
1955 compatible_frame[s->all_fragments[i].coding_method];
1956 predictor_group = (x == 0) + ((y == 0) << 1) +
1957 ((x + 1 == fragment_width) << 2);
1958 debug_dc_pred(" frag %d: group %d, orig DC = %d, ",
1959 i, predictor_group, DC_COEFF(i));
1961 switch (predictor_group) {
1964 /* main body of fragments; consider all 4 possible
1965 * fragments for prediction */
1967 /* calculate the indices of the predicting fragments */
1968 ul = i - fragment_width - 1;
1969 u = i - fragment_width;
1970 ur = i - fragment_width + 1;
1973 /* fetch the DC values for the predicting fragments */
1979 /* figure out which fragments are valid */
1980 ful = FRAME_CODED(ul) && COMPATIBLE_FRAME(ul);
1981 fu = FRAME_CODED(u) && COMPATIBLE_FRAME(u);
1982 fur = FRAME_CODED(ur) && COMPATIBLE_FRAME(ur);
1983 fl = FRAME_CODED(l) && COMPATIBLE_FRAME(l);
1985 /* decide which predictor transform to use */
1986 transform = (fl*PL) | (fu*PU) | (ful*PUL) | (fur*PUR);
1991 /* left column of fragments, not including top corner;
1992 * only consider up and up-right fragments */
1994 /* calculate the indices of the predicting fragments */
1995 u = i - fragment_width;
1996 ur = i - fragment_width + 1;
1998 /* fetch the DC values for the predicting fragments */
2002 /* figure out which fragments are valid */
2003 fur = FRAME_CODED(ur) && COMPATIBLE_FRAME(ur);
2004 fu = FRAME_CODED(u) && COMPATIBLE_FRAME(u);
2006 /* decide which predictor transform to use */
2007 transform = (fu*PU) | (fur*PUR);
2013 /* top row of fragments, not including top-left frag;
2014 * only consider the left fragment for prediction */
2016 /* calculate the indices of the predicting fragments */
2019 /* fetch the DC values for the predicting fragments */
2022 /* figure out which fragments are valid */
2023 fl = FRAME_CODED(l) && COMPATIBLE_FRAME(l);
2025 /* decide which predictor transform to use */
2026 transform = (fl*PL);
2031 /* top-left fragment */
2033 /* nothing to predict from in this case */
2039 /* right column of fragments, not including top corner;
2040 * consider up-left, up, and left fragments for
2043 /* calculate the indices of the predicting fragments */
2044 ul = i - fragment_width - 1;
2045 u = i - fragment_width;
2048 /* fetch the DC values for the predicting fragments */
2053 /* figure out which fragments are valid */
2054 ful = FRAME_CODED(ul) && COMPATIBLE_FRAME(ul);
2055 fu = FRAME_CODED(u) && COMPATIBLE_FRAME(u);
2056 fl = FRAME_CODED(l) && COMPATIBLE_FRAME(l);
2058 /* decide which predictor transform to use */
2059 transform = (fl*PL) | (fu*PU) | (ful*PUL);
2065 debug_dc_pred("transform = %d, ", transform);
2067 if (transform == 0) {
2069 /* if there were no fragments to predict from, use last
2071 predicted_dc = last_dc[current_frame_type];
2072 debug_dc_pred("from last DC (%d) = %d\n",
2073 current_frame_type, DC_COEFF(i));
2077 /* apply the appropriate predictor transform */
2079 (predictor_transform[transform][0] * vul) +
2080 (predictor_transform[transform][1] * vu) +
2081 (predictor_transform[transform][2] * vur) +
2082 (predictor_transform[transform][3] * vl);
2084 /* if there is a shift value in the transform, add
2085 * the sign bit before the shift */
2086 if (predictor_transform[transform][5] != 0) {
2087 predicted_dc += ((predicted_dc >> 15) &
2088 predictor_transform[transform][4]);
2089 predicted_dc >>= predictor_transform[transform][5];
2092 /* check for outranging on the [ul u l] and
2093 * [ul u ur l] predictors */
2094 if ((transform == 13) || (transform == 15)) {
2095 if (iabs(predicted_dc - vu) > 128)
2097 else if (iabs(predicted_dc - vl) > 128)
2099 else if (iabs(predicted_dc - vul) > 128)
2103 debug_dc_pred("from pred DC = %d\n",
2107 /* at long last, apply the predictor */
2108 if(s->coeffs[i].index){
2109 *s->next_coeff= s->coeffs[i];
2110 s->coeffs[i].index=0;
2111 s->coeffs[i].coeff=0;
2112 s->coeffs[i].next= s->next_coeff++;
2114 s->coeffs[i].coeff += predicted_dc;
2116 last_dc[current_frame_type] = DC_COEFF(i);
2117 if(DC_COEFF(i) && !(s->all_fragments[i].coeff_count&127)){
2118 s->all_fragments[i].coeff_count= 129;
2119 // s->all_fragments[i].next_coeff= s->next_coeff;
2120 s->coeffs[i].next= s->next_coeff;
2121 (s->next_coeff++)->next=NULL;
2129 * This function performs the final rendering of each fragment's data
2130 * onto the output frame.
2132 static void render_fragments(Vp3DecodeContext *s,
2136 int plane /* 0 = Y, 1 = U, 2 = V */)
2140 int i = first_fragment;
2141 int16_t *dequantizer;
2142 DCTELEM __align16 block[64];
2143 unsigned char *output_plane;
2144 unsigned char *last_plane;
2145 unsigned char *golden_plane;
2147 int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef;
2148 int upper_motion_limit, lower_motion_limit;
2149 int motion_halfpel_index;
2150 uint8_t *motion_source;
2152 debug_vp3(" vp3: rendering final fragments for %s\n",
2153 (plane == 0) ? "Y plane" : (plane == 1) ? "U plane" : "V plane");
2155 /* set up plane-specific parameters */
2157 output_plane = s->current_frame.data[0];
2158 last_plane = s->last_frame.data[0];
2159 golden_plane = s->golden_frame.data[0];
2160 stride = s->current_frame.linesize[0];
2161 if (!s->flipped_image) stride = -stride;
2162 upper_motion_limit = 7 * s->current_frame.linesize[0];
2163 lower_motion_limit = height * s->current_frame.linesize[0] + width - 8;
2164 } else if (plane == 1) {
2165 output_plane = s->current_frame.data[1];
2166 last_plane = s->last_frame.data[1];
2167 golden_plane = s->golden_frame.data[1];
2168 stride = s->current_frame.linesize[1];
2169 if (!s->flipped_image) stride = -stride;
2170 upper_motion_limit = 7 * s->current_frame.linesize[1];
2171 lower_motion_limit = height * s->current_frame.linesize[1] + width - 8;
2173 output_plane = s->current_frame.data[2];
2174 last_plane = s->last_frame.data[2];
2175 golden_plane = s->golden_frame.data[2];
2176 stride = s->current_frame.linesize[2];
2177 if (!s->flipped_image) stride = -stride;
2178 upper_motion_limit = 7 * s->current_frame.linesize[2];
2179 lower_motion_limit = height * s->current_frame.linesize[2] + width - 8;
2182 if(ABS(stride) > 2048)
2183 return; //various tables are fixed size
2185 /* for each fragment row... */
2186 for (y = 0; y < height; y += 8) {
2188 /* for each fragment in a row... */
2189 for (x = 0; x < width; x += 8, i++) {
2191 if ((i < 0) || (i >= s->fragment_count)) {
2192 av_log(s->avctx, AV_LOG_ERROR, " vp3:render_fragments(): bad fragment number (%d)\n", i);
2196 /* transform if this block was coded */
2197 if ((s->all_fragments[i].coding_method != MODE_COPY) &&
2198 !((s->avctx->flags & CODEC_FLAG_GRAY) && plane)) {
2200 if ((s->all_fragments[i].coding_method == MODE_USING_GOLDEN) ||
2201 (s->all_fragments[i].coding_method == MODE_GOLDEN_MV))
2202 motion_source= golden_plane;
2204 motion_source= last_plane;
2206 motion_source += s->all_fragments[i].first_pixel;
2207 motion_halfpel_index = 0;
2209 /* sort out the motion vector if this fragment is coded
2210 * using a motion vector method */
2211 if ((s->all_fragments[i].coding_method > MODE_INTRA) &&
2212 (s->all_fragments[i].coding_method != MODE_USING_GOLDEN)) {
2214 motion_x = s->all_fragments[i].motion_x;
2215 motion_y = s->all_fragments[i].motion_y;
2217 motion_x= (motion_x>>1) | (motion_x&1);
2218 motion_y= (motion_y>>1) | (motion_y&1);
2221 src_x= (motion_x>>1) + x;
2222 src_y= (motion_y>>1) + y;
2223 if ((motion_x == 0xbeef) || (motion_y == 0xbeef))
2224 av_log(s->avctx, AV_LOG_ERROR, " help! got beefy vector! (%X, %X)\n", motion_x, motion_y);
2226 motion_halfpel_index = motion_x & 0x01;
2227 motion_source += (motion_x >> 1);
2229 // motion_y = -motion_y;
2230 motion_halfpel_index |= (motion_y & 0x01) << 1;
2231 motion_source += ((motion_y >> 1) * stride);
2233 if(src_x<0 || src_y<0 || src_x + 9 >= width || src_y + 9 >= height){
2234 uint8_t *temp= s->edge_emu_buffer;
2235 if(stride<0) temp -= 9*stride;
2236 else temp += 9*stride;
2238 ff_emulated_edge_mc(temp, motion_source, stride, 9, 9, src_x, src_y, width, height);
2239 motion_source= temp;
2244 /* first, take care of copying a block from either the
2245 * previous or the golden frame */
2246 if (s->all_fragments[i].coding_method != MODE_INTRA) {
2247 //Note, it is possible to implement all MC cases with put_no_rnd_pixels_l2 which would look more like the VP3 source but this would be slower as put_no_rnd_pixels_tab is better optimzed
2248 if(motion_halfpel_index != 3){
2249 s->dsp.put_no_rnd_pixels_tab[1][motion_halfpel_index](
2250 output_plane + s->all_fragments[i].first_pixel,
2251 motion_source, stride, 8);
2253 int d= (motion_x ^ motion_y)>>31; // d is 0 if motion_x and _y have the same sign, else -1
2254 s->dsp.put_no_rnd_pixels_l2[1](
2255 output_plane + s->all_fragments[i].first_pixel,
2257 motion_source + stride + 1 + d,
2260 dequantizer = s->inter_dequant;
2263 dequantizer = s->intra_y_dequant;
2265 dequantizer = s->intra_c_dequant;
2268 /* dequantize the DCT coefficients */
2269 debug_idct("fragment %d, coding mode %d, DC = %d, dequant = %d:\n",
2270 i, s->all_fragments[i].coding_method,
2271 DC_COEFF(i), dequantizer[0]);
2273 if(s->avctx->idct_algo==FF_IDCT_VP3){
2274 Coeff *coeff= s->coeffs + i;
2275 memset(block, 0, sizeof(block));
2277 block[coeff->index]= coeff->coeff * dequantizer[coeff->index];
2281 Coeff *coeff= s->coeffs + i;
2282 memset(block, 0, sizeof(block));
2284 block[coeff->index]= (coeff->coeff * dequantizer[coeff->index] + 2)>>2;
2289 /* invert DCT and place (or add) in final output */
2291 if (s->all_fragments[i].coding_method == MODE_INTRA) {
2292 if(s->avctx->idct_algo!=FF_IDCT_VP3)
2295 output_plane + s->all_fragments[i].first_pixel,
2300 output_plane + s->all_fragments[i].first_pixel,
2305 debug_idct("block after idct_%s():\n",
2306 (s->all_fragments[i].coding_method == MODE_INTRA)?
2308 for (m = 0; m < 8; m++) {
2309 for (n = 0; n < 8; n++) {
2310 debug_idct(" %3d", *(output_plane +
2311 s->all_fragments[i].first_pixel + (m * stride + n)));
2319 /* copy directly from the previous frame */
2320 s->dsp.put_pixels_tab[1][0](
2321 output_plane + s->all_fragments[i].first_pixel,
2322 last_plane + s->all_fragments[i].first_pixel,
2332 static void horizontal_filter(unsigned char *first_pixel, int stride,
2333 int *bounding_values)
2338 for (end= first_pixel + 8*stride; first_pixel < end; first_pixel += stride) {
2340 (first_pixel[-2] - first_pixel[ 1])
2341 +3*(first_pixel[ 0] - first_pixel[-1]);
2342 filter_value = bounding_values[(filter_value + 4) >> 3];
2343 first_pixel[-1] = clip_uint8(first_pixel[-1] + filter_value);
2344 first_pixel[ 0] = clip_uint8(first_pixel[ 0] - filter_value);
2348 static void vertical_filter(unsigned char *first_pixel, int stride,
2349 int *bounding_values)
2353 const int nstride= -stride;
2355 for (end= first_pixel + 8; first_pixel < end; first_pixel++) {
2357 (first_pixel[2 * nstride] - first_pixel[ stride])
2358 +3*(first_pixel[0 ] - first_pixel[nstride]);
2359 filter_value = bounding_values[(filter_value + 4) >> 3];
2360 first_pixel[nstride] = clip_uint8(first_pixel[nstride] + filter_value);
2361 first_pixel[0] = clip_uint8(first_pixel[0] - filter_value);
2365 static void apply_loop_filter(Vp3DecodeContext *s)
2371 unsigned char *plane_data;
2373 int bounding_values_array[256];
2374 int *bounding_values= bounding_values_array+127;
2377 /* find the right loop limit value */
2378 for (x = 63; x >= 0; x--) {
2379 if (vp31_ac_scale_factor[x] >= s->quality_index)
2382 filter_limit = vp31_filter_limit_values[s->quality_index];
2384 /* set up the bounding values */
2385 memset(bounding_values_array, 0, 256 * sizeof(int));
2386 for (x = 0; x < filter_limit; x++) {
2387 bounding_values[-x - filter_limit] = -filter_limit + x;
2388 bounding_values[-x] = -x;
2389 bounding_values[x] = x;
2390 bounding_values[x + filter_limit] = filter_limit - x;
2393 for (plane = 0; plane < 3; plane++) {
2396 /* Y plane parameters */
2398 width = s->fragment_width;
2399 height = s->fragment_height;
2400 stride = s->current_frame.linesize[0];
2401 plane_data = s->current_frame.data[0];
2402 } else if (plane == 1) {
2403 /* U plane parameters */
2404 fragment = s->u_fragment_start;
2405 width = s->fragment_width / 2;
2406 height = s->fragment_height / 2;
2407 stride = s->current_frame.linesize[1];
2408 plane_data = s->current_frame.data[1];
2410 /* V plane parameters */
2411 fragment = s->v_fragment_start;
2412 width = s->fragment_width / 2;
2413 height = s->fragment_height / 2;
2414 stride = s->current_frame.linesize[2];
2415 plane_data = s->current_frame.data[2];
2418 for (y = 0; y < height; y++) {
2420 for (x = 0; x < width; x++) {
2422 /* do not perform left edge filter for left columns frags */
2424 (s->all_fragments[fragment].coding_method != MODE_COPY)) {
2426 plane_data + s->all_fragments[fragment].first_pixel - 7*stride,
2427 stride, bounding_values);
2430 /* do not perform top edge filter for top row fragments */
2432 (s->all_fragments[fragment].coding_method != MODE_COPY)) {
2434 plane_data + s->all_fragments[fragment].first_pixel + stride,
2435 stride, bounding_values);
2438 /* do not perform right edge filter for right column
2439 * fragments or if right fragment neighbor is also coded
2440 * in this frame (it will be filtered in next iteration) */
2441 if ((x < width - 1) &&
2442 (s->all_fragments[fragment].coding_method != MODE_COPY) &&
2443 (s->all_fragments[fragment + 1].coding_method == MODE_COPY)) {
2445 plane_data + s->all_fragments[fragment + 1].first_pixel - 7*stride,
2446 stride, bounding_values);
2449 /* do not perform bottom edge filter for bottom row
2450 * fragments or if bottom fragment neighbor is also coded
2451 * in this frame (it will be filtered in the next row) */
2452 if ((y < height - 1) &&
2453 (s->all_fragments[fragment].coding_method != MODE_COPY) &&
2454 (s->all_fragments[fragment + width].coding_method == MODE_COPY)) {
2456 plane_data + s->all_fragments[fragment + width].first_pixel + stride,
2457 stride, bounding_values);
2461 STOP_TIMER("loop filter")
2468 * This function computes the first pixel addresses for each fragment.
2469 * This function needs to be invoked after the first frame is allocated
2470 * so that it has access to the plane strides.
2472 static void vp3_calculate_pixel_addresses(Vp3DecodeContext *s)
2477 /* figure out the first pixel addresses for each of the fragments */
2480 for (y = s->fragment_height; y > 0; y--) {
2481 for (x = 0; x < s->fragment_width; x++) {
2482 s->all_fragments[i++].first_pixel =
2483 s->golden_frame.linesize[0] * y * FRAGMENT_PIXELS -
2484 s->golden_frame.linesize[0] +
2485 x * FRAGMENT_PIXELS;
2486 debug_init(" fragment %d, first pixel @ %d\n",
2487 i-1, s->all_fragments[i-1].first_pixel);
2492 i = s->u_fragment_start;
2493 for (y = s->fragment_height / 2; y > 0; y--) {
2494 for (x = 0; x < s->fragment_width / 2; x++) {
2495 s->all_fragments[i++].first_pixel =
2496 s->golden_frame.linesize[1] * y * FRAGMENT_PIXELS -
2497 s->golden_frame.linesize[1] +
2498 x * FRAGMENT_PIXELS;
2499 debug_init(" fragment %d, first pixel @ %d\n",
2500 i-1, s->all_fragments[i-1].first_pixel);
2505 i = s->v_fragment_start;
2506 for (y = s->fragment_height / 2; y > 0; y--) {
2507 for (x = 0; x < s->fragment_width / 2; x++) {
2508 s->all_fragments[i++].first_pixel =
2509 s->golden_frame.linesize[2] * y * FRAGMENT_PIXELS -
2510 s->golden_frame.linesize[2] +
2511 x * FRAGMENT_PIXELS;
2512 debug_init(" fragment %d, first pixel @ %d\n",
2513 i-1, s->all_fragments[i-1].first_pixel);
2518 /* FIXME: this should be merged with the above! */
2519 static void theora_calculate_pixel_addresses(Vp3DecodeContext *s)
2524 /* figure out the first pixel addresses for each of the fragments */
2527 for (y = 1; y <= s->fragment_height; y++) {
2528 for (x = 0; x < s->fragment_width; x++) {
2529 s->all_fragments[i++].first_pixel =
2530 s->golden_frame.linesize[0] * y * FRAGMENT_PIXELS -
2531 s->golden_frame.linesize[0] +
2532 x * FRAGMENT_PIXELS;
2533 debug_init(" fragment %d, first pixel @ %d\n",
2534 i-1, s->all_fragments[i-1].first_pixel);
2539 i = s->u_fragment_start;
2540 for (y = 1; y <= s->fragment_height / 2; y++) {
2541 for (x = 0; x < s->fragment_width / 2; x++) {
2542 s->all_fragments[i++].first_pixel =
2543 s->golden_frame.linesize[1] * y * FRAGMENT_PIXELS -
2544 s->golden_frame.linesize[1] +
2545 x * FRAGMENT_PIXELS;
2546 debug_init(" fragment %d, first pixel @ %d\n",
2547 i-1, s->all_fragments[i-1].first_pixel);
2552 i = s->v_fragment_start;
2553 for (y = 1; y <= s->fragment_height / 2; y++) {
2554 for (x = 0; x < s->fragment_width / 2; x++) {
2555 s->all_fragments[i++].first_pixel =
2556 s->golden_frame.linesize[2] * y * FRAGMENT_PIXELS -
2557 s->golden_frame.linesize[2] +
2558 x * FRAGMENT_PIXELS;
2559 debug_init(" fragment %d, first pixel @ %d\n",
2560 i-1, s->all_fragments[i-1].first_pixel);
2566 * This is the ffmpeg/libavcodec API init function.
2568 static int vp3_decode_init(AVCodecContext *avctx)
2570 Vp3DecodeContext *s = avctx->priv_data;
2574 int y_superblock_count;
2575 int c_superblock_count;
2577 if (avctx->codec_tag == MKTAG('V','P','3','0'))
2583 s->width = (avctx->width + 15) & 0xFFFFFFF0;
2584 s->height = (avctx->height + 15) & 0xFFFFFFF0;
2585 avctx->pix_fmt = PIX_FMT_YUV420P;
2586 avctx->has_b_frames = 0;
2587 if(avctx->idct_algo==FF_IDCT_AUTO)
2588 avctx->idct_algo=FF_IDCT_VP3;
2589 dsputil_init(&s->dsp, avctx);
2591 ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct);
2593 /* initialize to an impossible value which will force a recalculation
2594 * in the first frame decode */
2595 s->quality_index = -1;
2597 s->y_superblock_width = (s->width + 31) / 32;
2598 s->y_superblock_height = (s->height + 31) / 32;
2599 y_superblock_count = s->y_superblock_width * s->y_superblock_height;
2601 /* work out the dimensions for the C planes */
2602 c_width = s->width / 2;
2603 c_height = s->height / 2;
2604 s->c_superblock_width = (c_width + 31) / 32;
2605 s->c_superblock_height = (c_height + 31) / 32;
2606 c_superblock_count = s->c_superblock_width * s->c_superblock_height;
2608 s->superblock_count = y_superblock_count + (c_superblock_count * 2);
2609 s->u_superblock_start = y_superblock_count;
2610 s->v_superblock_start = s->u_superblock_start + c_superblock_count;
2611 s->superblock_coding = av_malloc(s->superblock_count);
2613 s->macroblock_width = (s->width + 15) / 16;
2614 s->macroblock_height = (s->height + 15) / 16;
2615 s->macroblock_count = s->macroblock_width * s->macroblock_height;
2617 s->fragment_width = s->width / FRAGMENT_PIXELS;
2618 s->fragment_height = s->height / FRAGMENT_PIXELS;
2620 /* fragment count covers all 8x8 blocks for all 3 planes */
2621 s->fragment_count = s->fragment_width * s->fragment_height * 3 / 2;
2622 s->u_fragment_start = s->fragment_width * s->fragment_height;
2623 s->v_fragment_start = s->fragment_width * s->fragment_height * 5 / 4;
2625 debug_init(" Y plane: %d x %d\n", s->width, s->height);
2626 debug_init(" C plane: %d x %d\n", c_width, c_height);
2627 debug_init(" Y superblocks: %d x %d, %d total\n",
2628 s->y_superblock_width, s->y_superblock_height, y_superblock_count);
2629 debug_init(" C superblocks: %d x %d, %d total\n",
2630 s->c_superblock_width, s->c_superblock_height, c_superblock_count);
2631 debug_init(" total superblocks = %d, U starts @ %d, V starts @ %d\n",
2632 s->superblock_count, s->u_superblock_start, s->v_superblock_start);
2633 debug_init(" macroblocks: %d x %d, %d total\n",
2634 s->macroblock_width, s->macroblock_height, s->macroblock_count);
2635 debug_init(" %d fragments, %d x %d, u starts @ %d, v starts @ %d\n",
2639 s->u_fragment_start,
2640 s->v_fragment_start);
2642 s->all_fragments = av_malloc(s->fragment_count * sizeof(Vp3Fragment));
2643 s->coeffs = av_malloc(s->fragment_count * sizeof(Coeff) * 65);
2644 s->coded_fragment_list = av_malloc(s->fragment_count * sizeof(int));
2645 s->pixel_addresses_inited = 0;
2647 if (!s->theora_tables)
2649 for (i = 0; i < 64; i++)
2650 s->coded_dc_scale_factor[i] = vp31_dc_scale_factor[i];
2651 for (i = 0; i < 64; i++)
2652 s->coded_ac_scale_factor[i] = vp31_ac_scale_factor[i];
2653 for (i = 0; i < 64; i++)
2654 s->coded_intra_y_dequant[i] = vp31_intra_y_dequant[i];
2655 for (i = 0; i < 64; i++)
2656 s->coded_intra_c_dequant[i] = vp31_intra_c_dequant[i];
2657 for (i = 0; i < 64; i++)
2658 s->coded_inter_dequant[i] = vp31_inter_dequant[i];
2661 /* init VLC tables */
2662 for (i = 0; i < 16; i++) {
2665 init_vlc(&s->dc_vlc[i], 5, 32,
2666 &dc_bias[i][0][1], 4, 2,
2667 &dc_bias[i][0][0], 4, 2, 0);
2669 /* group 1 AC histograms */
2670 init_vlc(&s->ac_vlc_1[i], 5, 32,
2671 &ac_bias_0[i][0][1], 4, 2,
2672 &ac_bias_0[i][0][0], 4, 2, 0);
2674 /* group 2 AC histograms */
2675 init_vlc(&s->ac_vlc_2[i], 5, 32,
2676 &ac_bias_1[i][0][1], 4, 2,
2677 &ac_bias_1[i][0][0], 4, 2, 0);
2679 /* group 3 AC histograms */
2680 init_vlc(&s->ac_vlc_3[i], 5, 32,
2681 &ac_bias_2[i][0][1], 4, 2,
2682 &ac_bias_2[i][0][0], 4, 2, 0);
2684 /* group 4 AC histograms */
2685 init_vlc(&s->ac_vlc_4[i], 5, 32,
2686 &ac_bias_3[i][0][1], 4, 2,
2687 &ac_bias_3[i][0][0], 4, 2, 0);
2690 init_vlc(&s->superblock_run_length_vlc, 6, 34,
2691 &superblock_run_length_vlc_table[0][1], 4, 2,
2692 &superblock_run_length_vlc_table[0][0], 4, 2, 0);
2694 init_vlc(&s->fragment_run_length_vlc, 5, 31,
2695 &fragment_run_length_vlc_table[0][1], 4, 2,
2696 &fragment_run_length_vlc_table[0][0], 4, 2, 0);
2698 init_vlc(&s->mode_code_vlc, 3, 8,
2699 &mode_code_vlc_table[0][1], 2, 1,
2700 &mode_code_vlc_table[0][0], 2, 1, 0);
2702 init_vlc(&s->motion_vector_vlc, 6, 63,
2703 &motion_vector_vlc_table[0][1], 2, 1,
2704 &motion_vector_vlc_table[0][0], 2, 1, 0);
2706 /* work out the block mapping tables */
2707 s->superblock_fragments = av_malloc(s->superblock_count * 16 * sizeof(int));
2708 s->superblock_macroblocks = av_malloc(s->superblock_count * 4 * sizeof(int));
2709 s->macroblock_fragments = av_malloc(s->macroblock_count * 6 * sizeof(int));
2710 s->macroblock_coding = av_malloc(s->macroblock_count + 1);
2711 init_block_mapping(s);
2713 for (i = 0; i < 3; i++) {
2714 s->current_frame.data[i] = NULL;
2715 s->last_frame.data[i] = NULL;
2716 s->golden_frame.data[i] = NULL;
2723 * This is the ffmpeg/libavcodec API frame decode function.
2725 static int vp3_decode_frame(AVCodecContext *avctx,
2726 void *data, int *data_size,
2727 uint8_t *buf, int buf_size)
2729 Vp3DecodeContext *s = avctx->priv_data;
2731 static int counter = 0;
2733 init_get_bits(&gb, buf, buf_size * 8);
2735 if (s->theora && get_bits1(&gb))
2737 int ptype = get_bits(&gb, 7);
2739 skip_bits(&gb, 6*8); /* "theora" */
2744 theora_decode_comments(avctx, gb);
2747 theora_decode_tables(avctx, gb);
2748 init_dequantizer(s);
2751 av_log(avctx, AV_LOG_ERROR, "Unknown Theora config packet: %d\n", ptype);
2756 s->keyframe = !get_bits1(&gb);
2759 s->last_quality_index = s->quality_index;
2760 s->quality_index = get_bits(&gb, 6);
2761 if (s->theora >= 0x030200)
2764 if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2765 av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%d: Q index = %d\n",
2766 s->keyframe?"key":"", counter, s->quality_index);
2769 if (s->quality_index != s->last_quality_index)
2770 init_dequantizer(s);
2775 skip_bits(&gb, 4); /* width code */
2776 skip_bits(&gb, 4); /* height code */
2779 s->version = get_bits(&gb, 5);
2781 av_log(s->avctx, AV_LOG_DEBUG, "VP version: %d\n", s->version);
2784 if (s->version || s->theora)
2787 av_log(s->avctx, AV_LOG_ERROR, "Warning, unsupported keyframe coding type?!\n");
2788 skip_bits(&gb, 2); /* reserved? */
2791 if (s->last_frame.data[0] == s->golden_frame.data[0]) {
2792 if (s->golden_frame.data[0])
2793 avctx->release_buffer(avctx, &s->golden_frame);
2794 s->last_frame= s->golden_frame; /* ensure that we catch any access to this released frame */
2796 if (s->golden_frame.data[0])
2797 avctx->release_buffer(avctx, &s->golden_frame);
2798 if (s->last_frame.data[0])
2799 avctx->release_buffer(avctx, &s->last_frame);
2802 s->golden_frame.reference = 3;
2803 if(avctx->get_buffer(avctx, &s->golden_frame) < 0) {
2804 av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n");
2808 /* golden frame is also the current frame */
2809 memcpy(&s->current_frame, &s->golden_frame, sizeof(AVFrame));
2811 /* time to figure out pixel addresses? */
2812 if (!s->pixel_addresses_inited)
2814 if (!s->flipped_image)
2815 vp3_calculate_pixel_addresses(s);
2817 theora_calculate_pixel_addresses(s);
2820 /* allocate a new current frame */
2821 s->current_frame.reference = 3;
2822 if(avctx->get_buffer(avctx, &s->current_frame) < 0) {
2823 av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n");
2828 s->current_frame.qscale_table= s->qscale_table; //FIXME allocate individual tables per AVFrame
2829 s->current_frame.qstride= 0;
2833 STOP_TIMER("init_frame")}
2838 memcpy(s->current_frame.data[0], s->golden_frame.data[0],
2839 s->current_frame.linesize[0] * s->height);
2840 memcpy(s->current_frame.data[1], s->golden_frame.data[1],
2841 s->current_frame.linesize[1] * s->height / 2);
2842 memcpy(s->current_frame.data[2], s->golden_frame.data[2],
2843 s->current_frame.linesize[2] * s->height / 2);
2849 if (unpack_superblocks(s, &gb)){
2850 av_log(s->avctx, AV_LOG_ERROR, "error in unpack_superblocks\n");
2853 STOP_TIMER("unpack_superblocks")}
2855 if (unpack_modes(s, &gb)){
2856 av_log(s->avctx, AV_LOG_ERROR, "error in unpack_modes\n");
2859 STOP_TIMER("unpack_modes")}
2861 if (unpack_vectors(s, &gb)){
2862 av_log(s->avctx, AV_LOG_ERROR, "error in unpack_vectors\n");
2865 STOP_TIMER("unpack_vectors")}
2867 if (unpack_dct_coeffs(s, &gb)){
2868 av_log(s->avctx, AV_LOG_ERROR, "error in unpack_dct_coeffs\n");
2871 STOP_TIMER("unpack_dct_coeffs")}
2874 reverse_dc_prediction(s, 0, s->fragment_width, s->fragment_height);
2875 STOP_TIMER("reverse_dc_prediction")}
2877 render_fragments(s, 0, s->width, s->height, 0);
2878 STOP_TIMER("render_fragments")}
2880 if ((avctx->flags & CODEC_FLAG_GRAY) == 0) {
2881 reverse_dc_prediction(s, s->u_fragment_start,
2882 s->fragment_width / 2, s->fragment_height / 2);
2883 reverse_dc_prediction(s, s->v_fragment_start,
2884 s->fragment_width / 2, s->fragment_height / 2);
2885 render_fragments(s, s->u_fragment_start, s->width / 2, s->height / 2, 1);
2886 render_fragments(s, s->v_fragment_start, s->width / 2, s->height / 2, 2);
2888 memset(s->current_frame.data[1], 0x80, s->width * s->height / 4);
2889 memset(s->current_frame.data[2], 0x80, s->width * s->height / 4);
2893 apply_loop_filter(s);
2894 STOP_TIMER("apply_loop_filter")}
2899 *data_size=sizeof(AVFrame);
2900 *(AVFrame*)data= s->current_frame;
2902 /* release the last frame, if it is allocated and if it is not the
2904 if ((s->last_frame.data[0]) &&
2905 (s->last_frame.data[0] != s->golden_frame.data[0]))
2906 avctx->release_buffer(avctx, &s->last_frame);
2908 /* shuffle frames (last = current) */
2909 memcpy(&s->last_frame, &s->current_frame, sizeof(AVFrame));
2910 s->current_frame.data[0]= NULL; /* ensure that we catch any access to this released frame */
2916 * This is the ffmpeg/libavcodec API module cleanup function.
2918 static int vp3_decode_end(AVCodecContext *avctx)
2920 Vp3DecodeContext *s = avctx->priv_data;
2922 av_free(s->all_fragments);
2924 av_free(s->coded_fragment_list);
2925 av_free(s->superblock_fragments);
2926 av_free(s->superblock_macroblocks);
2927 av_free(s->macroblock_fragments);
2928 av_free(s->macroblock_coding);
2930 /* release all frames */
2931 if (s->golden_frame.data[0] && s->golden_frame.data[0] != s->last_frame.data[0])
2932 avctx->release_buffer(avctx, &s->golden_frame);
2933 if (s->last_frame.data[0])
2934 avctx->release_buffer(avctx, &s->last_frame);
2935 /* no need to release the current_frame since it will always be pointing
2936 * to the same frame as either the golden or last frame */
2941 static int theora_decode_header(AVCodecContext *avctx, GetBitContext gb)
2943 Vp3DecodeContext *s = avctx->priv_data;
2944 int major, minor, micro;
2946 major = get_bits(&gb, 8); /* version major */
2947 minor = get_bits(&gb, 8); /* version minor */
2948 micro = get_bits(&gb, 8); /* version micro */
2949 av_log(avctx, AV_LOG_INFO, "Theora bitstream version %d.%d.%d\n",
2950 major, minor, micro);
2952 /* FIXME: endianess? */
2953 s->theora = (major << 16) | (minor << 8) | micro;
2955 /* 3.2.0 aka alpha3 has the same frame orientation as original vp3 */
2956 /* but previous versions have the image flipped relative to vp3 */
2957 if (s->theora < 0x030200)
2959 s->flipped_image = 1;
2960 av_log(avctx, AV_LOG_DEBUG, "Old (<alpha3) Theora bitstream, flipped image\n");
2963 s->width = get_bits(&gb, 16) << 4;
2964 s->height = get_bits(&gb, 16) << 4;
2966 if(avcodec_check_dimensions(avctx, s->width, s->height)){
2967 s->width= s->height= 0;
2971 skip_bits(&gb, 24); /* frame width */
2972 skip_bits(&gb, 24); /* frame height */
2974 skip_bits(&gb, 8); /* offset x */
2975 skip_bits(&gb, 8); /* offset y */
2977 skip_bits(&gb, 32); /* fps numerator */
2978 skip_bits(&gb, 32); /* fps denumerator */
2979 skip_bits(&gb, 24); /* aspect numerator */
2980 skip_bits(&gb, 24); /* aspect denumerator */
2982 if (s->theora < 0x030200)
2983 skip_bits(&gb, 5); /* keyframe frequency force */
2984 skip_bits(&gb, 8); /* colorspace */
2985 skip_bits(&gb, 24); /* bitrate */
2987 skip_bits(&gb, 6); /* last(?) quality index */
2989 if (s->theora >= 0x030200)
2991 skip_bits(&gb, 5); /* keyframe frequency force */
2992 skip_bits(&gb, 5); /* spare bits */
2995 // align_get_bits(&gb);
2997 avctx->width = s->width;
2998 avctx->height = s->height;
3003 static int theora_decode_comments(AVCodecContext *avctx, GetBitContext gb)
3005 int nb_comments, i, tmp;
3007 tmp = get_bits_long(&gb, 32);
3008 tmp = be2me_32(tmp);
3012 nb_comments = get_bits_long(&gb, 32);
3013 nb_comments = be2me_32(nb_comments);
3014 for (i = 0; i < nb_comments; i++)
3016 tmp = get_bits_long(&gb, 32);
3017 tmp = be2me_32(tmp);
3025 static int theora_decode_tables(AVCodecContext *avctx, GetBitContext gb)
3027 Vp3DecodeContext *s = avctx->priv_data;
3030 if (s->theora >= 0x030200) {
3031 n = get_bits(&gb, 3);
3032 /* loop filter table */
3033 for (i = 0; i < 64; i++)
3037 if (s->theora >= 0x030200)
3038 n = get_bits(&gb, 4) + 1;
3041 /* quality threshold table */
3042 for (i = 0; i < 64; i++)
3043 s->coded_ac_scale_factor[i] = get_bits(&gb, n);
3045 if (s->theora >= 0x030200)
3046 n = get_bits(&gb, 4) + 1;
3049 /* dc scale factor table */
3050 for (i = 0; i < 64; i++)
3051 s->coded_dc_scale_factor[i] = get_bits(&gb, n);
3053 if (s->theora >= 0x030200)
3054 n = get_bits(&gb, 9) + 1;
3058 av_log(NULL,AV_LOG_ERROR, "unsupported nbms : %d\n", n);
3062 for (i = 0; i < 64; i++)
3063 s->coded_intra_y_dequant[i] = get_bits(&gb, 8);
3066 for (i = 0; i < 64; i++)
3067 s->coded_intra_c_dequant[i] = get_bits(&gb, 8);
3070 for (i = 0; i < 64; i++)
3071 s->coded_inter_dequant[i] = get_bits(&gb, 8);
3073 /* FIXME: read huffmann tree.. */
3075 s->theora_tables = 1;
3080 static int theora_decode_init(AVCodecContext *avctx)
3082 Vp3DecodeContext *s = avctx->priv_data;
3085 uint8_t *p= avctx->extradata;
3090 if (!avctx->extradata_size)
3094 op_bytes = *(p++)<<8;
3097 init_get_bits(&gb, p, op_bytes);
3100 ptype = get_bits(&gb, 8);
3101 debug_vp3("Theora headerpacket type: %x\n", ptype);
3103 if (!(ptype & 0x80))
3106 skip_bits(&gb, 6*8); /* "theora" */
3111 theora_decode_header(avctx, gb);
3114 theora_decode_comments(avctx, gb);
3117 theora_decode_tables(avctx, gb);
3122 vp3_decode_init(avctx);
3126 AVCodec vp3_decoder = {
3130 sizeof(Vp3DecodeContext),
3139 #ifndef CONFIG_LIBTHEORA
3140 AVCodec theora_decoder = {
3144 sizeof(Vp3DecodeContext),