2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2006-2007 Konstantin Shishkov
4 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * VC-1 and WMV3 decoder
32 #include "mpegvideo.h"
34 #include "vc1acdata.h"
39 extern const uint32_t ff_table0_dc_lum[120][2], ff_table1_dc_lum[120][2];
40 extern const uint32_t ff_table0_dc_chroma[120][2], ff_table1_dc_chroma[120][2];
41 extern VLC ff_msmp4_dc_luma_vlc[2], ff_msmp4_dc_chroma_vlc[2];
42 #define MB_INTRA_VLC_BITS 9
43 extern VLC ff_msmp4_mb_i_vlc;
44 extern const uint16_t ff_msmp4_mb_i_table[64][2];
47 static const uint16_t table_mb_intra[64][2];
49 /** Markers used if VC-1 AP frame data */
52 VC1_CODE_RES0 = 0x00000100,
53 VC1_CODE_ENDOFSEQ = 0x0000010A,
62 /** Available Profiles */
67 PROFILE_COMPLEX, ///< TODO: WMV9 specific
72 /** Sequence quantizer mode */
75 QUANT_FRAME_IMPLICIT, ///< Implicitly specified at frame level
76 QUANT_FRAME_EXPLICIT, ///< Explicitly specified at frame level
77 QUANT_NON_UNIFORM, ///< Non-uniform quant used for all frames
78 QUANT_UNIFORM ///< Uniform quant used for all frames
82 /** Where quant can be changed */
86 DQPROFILE_DOUBLE_EDGES,
87 DQPROFILE_SINGLE_EDGE,
92 /** @name Where quant can be changed
103 /** Which pair of edges is quantized with ALTPQUANT */
106 DQDOUBLE_BEDGE_TOPLEFT,
107 DQDOUBLE_BEDGE_TOPRIGHT,
108 DQDOUBLE_BEDGE_BOTTOMRIGHT,
109 DQDOUBLE_BEDGE_BOTTOMLEFT
113 /** MV modes for P frames */
116 MV_PMODE_1MV_HPEL_BILIN,
120 MV_PMODE_INTENSITY_COMP
124 /** @name MV types for B frames */
129 BMV_TYPE_INTERPOLATED
133 /** @name Block types for P/B frames */
135 enum TransformTypes {
139 TT_8X4, //Both halves
142 TT_4X8, //Both halves
147 /** Table for conversion between TTBLK and TTMB */
148 static const int ttblk_to_tt[3][8] = {
149 { TT_8X4, TT_4X8, TT_8X8, TT_4X4, TT_8X4_TOP, TT_8X4_BOTTOM, TT_4X8_RIGHT, TT_4X8_LEFT },
150 { TT_8X8, TT_4X8_RIGHT, TT_4X8_LEFT, TT_4X4, TT_8X4, TT_4X8, TT_8X4_BOTTOM, TT_8X4_TOP },
151 { TT_8X8, TT_4X8, TT_4X4, TT_8X4_BOTTOM, TT_4X8_RIGHT, TT_4X8_LEFT, TT_8X4, TT_8X4_TOP }
154 static const int ttfrm_to_tt[4] = { TT_8X8, TT_8X4, TT_4X8, TT_4X4 };
156 /** MV P mode - the 5th element is only used for mode 1 */
157 static const uint8_t mv_pmode_table[2][5] = {
158 { MV_PMODE_1MV_HPEL_BILIN, MV_PMODE_1MV, MV_PMODE_1MV_HPEL, MV_PMODE_INTENSITY_COMP, MV_PMODE_MIXED_MV },
159 { MV_PMODE_1MV, MV_PMODE_MIXED_MV, MV_PMODE_1MV_HPEL, MV_PMODE_INTENSITY_COMP, MV_PMODE_1MV_HPEL_BILIN }
161 static const uint8_t mv_pmode_table2[2][4] = {
162 { MV_PMODE_1MV_HPEL_BILIN, MV_PMODE_1MV, MV_PMODE_1MV_HPEL, MV_PMODE_MIXED_MV },
163 { MV_PMODE_1MV, MV_PMODE_MIXED_MV, MV_PMODE_1MV_HPEL, MV_PMODE_1MV_HPEL_BILIN }
166 /** One more frame type */
169 static const int fps_nr[5] = { 24, 25, 30, 50, 60 },
170 fps_dr[2] = { 1000, 1001 };
171 static const uint8_t pquant_table[3][32] = {
172 { /* Implicit quantizer */
173 0, 1, 2, 3, 4, 5, 6, 7, 8, 6, 7, 8, 9, 10, 11, 12,
174 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 29, 31
176 { /* Explicit quantizer, pquantizer uniform */
177 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
178 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
180 { /* Explicit quantizer, pquantizer non-uniform */
181 0, 1, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
182 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 29, 31
186 /** @name VC-1 VLC tables and defines
187 * @todo TODO move this into the context
190 #define VC1_BFRACTION_VLC_BITS 7
191 static VLC vc1_bfraction_vlc;
192 #define VC1_IMODE_VLC_BITS 4
193 static VLC vc1_imode_vlc;
194 #define VC1_NORM2_VLC_BITS 3
195 static VLC vc1_norm2_vlc;
196 #define VC1_NORM6_VLC_BITS 9
197 static VLC vc1_norm6_vlc;
198 /* Could be optimized, one table only needs 8 bits */
199 #define VC1_TTMB_VLC_BITS 9 //12
200 static VLC vc1_ttmb_vlc[3];
201 #define VC1_MV_DIFF_VLC_BITS 9 //15
202 static VLC vc1_mv_diff_vlc[4];
203 #define VC1_CBPCY_P_VLC_BITS 9 //14
204 static VLC vc1_cbpcy_p_vlc[4];
205 #define VC1_4MV_BLOCK_PATTERN_VLC_BITS 6
206 static VLC vc1_4mv_block_pattern_vlc[4];
207 #define VC1_TTBLK_VLC_BITS 5
208 static VLC vc1_ttblk_vlc[3];
209 #define VC1_SUBBLKPAT_VLC_BITS 6
210 static VLC vc1_subblkpat_vlc[3];
212 static VLC vc1_ac_coeff_table[8];
216 CS_HIGH_MOT_INTRA = 0,
226 /** @name Overlap conditions for Advanced Profile */
237 * @fixme Change size wherever another size is more efficient
238 * Many members are only used for Advanced Profile
240 typedef struct VC1Context{
245 /** Simple/Main Profile sequence header */
247 int res_sm; ///< reserved, 2b
248 int res_x8; ///< reserved
249 int multires; ///< frame-level RESPIC syntax element present
250 int res_fasttx; ///< reserved, always 1
251 int res_transtab; ///< reserved, always 0
252 int rangered; ///< RANGEREDFRM (range reduction) syntax element present
254 int res_rtm_flag; ///< reserved, set to 1
255 int reserved; ///< reserved
258 /** Advanced Profile */
260 int level; ///< 3bits, for Advanced/Simple Profile, provided by TS layer
261 int chromaformat; ///< 2bits, 2=4:2:0, only defined
262 int postprocflag; ///< Per-frame processing suggestion flag present
263 int broadcast; ///< TFF/RFF present
264 int interlace; ///< Progressive/interlaced (RPTFTM syntax element)
265 int tfcntrflag; ///< TFCNTR present
266 int panscanflag; ///< NUMPANSCANWIN, TOPLEFT{X,Y}, BOTRIGHT{X,Y} present
267 int extended_dmv; ///< Additional extended dmv range at P/B frame-level
268 int color_prim; ///< 8bits, chroma coordinates of the color primaries
269 int transfer_char; ///< 8bits, Opto-electronic transfer characteristics
270 int matrix_coef; ///< 8bits, Color primaries->YCbCr transform matrix
271 int hrd_param_flag; ///< Presence of Hypothetical Reference
272 ///< Decoder parameters
273 int psf; ///< Progressive Segmented Frame
276 /** Sequence header data for all Profiles
277 * TODO: choose between ints, uint8_ts and monobit flags
280 int profile; ///< 2bits, Profile
281 int frmrtq_postproc; ///< 3bits,
282 int bitrtq_postproc; ///< 5bits, quantized framerate-based postprocessing strength
283 int fastuvmc; ///< Rounding of qpel vector to hpel ? (not in Simple)
284 int extended_mv; ///< Ext MV in P/B (not in Simple)
285 int dquant; ///< How qscale varies with MBs, 2bits (not in Simple)
286 int vstransform; ///< variable-size [48]x[48] transform type + info
287 int overlap; ///< overlapped transforms in use
288 int quantizer_mode; ///< 2bits, quantizer mode used for sequence, see QUANT_*
289 int finterpflag; ///< INTERPFRM present
292 /** Frame decoding info for all profiles */
294 uint8_t mv_mode; ///< MV coding monde
295 uint8_t mv_mode2; ///< Secondary MV coding mode (B frames)
296 int k_x; ///< Number of bits for MVs (depends on MV range)
297 int k_y; ///< Number of bits for MVs (depends on MV range)
298 int range_x, range_y; ///< MV range
299 uint8_t pq, altpq; ///< Current/alternate frame quantizer scale
300 /** pquant parameters */
307 /** AC coding set indexes
308 * @see 8.1.1.10, p(1)10
311 int c_ac_table_index; ///< Chroma index from ACFRM element
312 int y_ac_table_index; ///< Luma index from AC2FRM element
314 int ttfrm; ///< Transform type info present at frame level
315 uint8_t ttmbf; ///< Transform type flag
316 uint8_t ttblk4x4; ///< Value of ttblk which indicates a 4x4 transform
317 int codingset; ///< index of current table set from 11.8 to use for luma block decoding
318 int codingset2; ///< index of current table set from 11.8 to use for chroma block decoding
319 int pqindex; ///< raw pqindex used in coding set selection
320 int a_avail, c_avail;
321 uint8_t *mb_type_base, *mb_type[3];
324 /** Luma compensation parameters */
329 int16_t bfraction; ///< Relative position % anchors=> how to scale MVs
330 uint8_t halfpq; ///< Uniform quant over image and qp+.5
331 uint8_t respic; ///< Frame-level flag for resized images
332 int buffer_fullness; ///< HRD info
334 * -# 0 -> [-64n 63.f] x [-32, 31.f]
335 * -# 1 -> [-128, 127.f] x [-64, 63.f]
336 * -# 2 -> [-512, 511.f] x [-128, 127.f]
337 * -# 3 -> [-1024, 1023.f] x [-256, 255.f]
340 uint8_t pquantizer; ///< Uniform (over sequence) quantizer in use
341 VLC *cbpcy_vlc; ///< CBPCY VLC table
342 int tt_index; ///< Index for Transform Type tables
343 uint8_t* mv_type_mb_plane; ///< bitplane for mv_type == (4MV)
344 uint8_t* direct_mb_plane; ///< bitplane for "direct" MBs
345 int mv_type_is_raw; ///< mv type mb plane is not coded
346 int dmb_is_raw; ///< direct mb plane is raw
347 int skip_is_raw; ///< skip mb plane is not coded
348 uint8_t luty[256], lutuv[256]; // lookup tables used for intensity compensation
349 int use_ic; ///< use intensity compensation in B-frames
350 int rnd; ///< rounding control
352 /** Frame decoding info for S/M profiles only */
354 uint8_t rangeredfrm; ///< out_sample = CLIP((in_sample-128)*2+128)
358 /** Frame decoding info for Advanced profile */
360 uint8_t fcm; ///< 0->Progressive, 2->Frame-Interlace, 3->Field-Interlace
361 uint8_t numpanscanwin;
363 uint8_t rptfrm, tff, rff;
366 uint16_t bottomrightx;
367 uint16_t bottomrighty;
370 int hrd_num_leaky_buckets;
371 uint8_t bit_rate_exponent;
372 uint8_t buffer_size_exponent;
373 uint8_t* acpred_plane; ///< AC prediction flags bitplane
375 uint8_t* over_flags_plane; ///< Overflags bitplane
378 uint16_t *hrd_rate, *hrd_buffer;
379 uint8_t *hrd_fullness;
380 uint8_t range_mapy_flag;
381 uint8_t range_mapuv_flag;
391 * Get unary code of limited length
392 * @fixme FIXME Slow and ugly
393 * @param gb GetBitContext
394 * @param[in] stop The bitstop value (unary code of 1's or 0's)
395 * @param[in] len Maximum length
396 * @return Unary length/index
398 static int get_prefix(GetBitContext *gb, int stop, int len)
403 for(i = 0; i < len && get_bits1(gb) != stop; i++);
405 /* int i = 0, tmp = !stop;
407 while (i != len && tmp != stop)
409 tmp = get_bits(gb, 1);
412 if (i == len && tmp != stop) return len+1;
419 UPDATE_CACHE(re, gb);
420 buf=GET_CACHE(re, gb); //Still not sure
421 if (stop) buf = ~buf;
423 log= av_log2(-buf); //FIXME: -?
425 LAST_SKIP_BITS(re, gb, log+1);
426 CLOSE_READER(re, gb);
430 LAST_SKIP_BITS(re, gb, limit);
431 CLOSE_READER(re, gb);
436 static inline int decode210(GetBitContext *gb){
442 return 2 - get_bits1(gb);
446 * Init VC-1 specific tables and VC1Context members
447 * @param v The VC1Context to initialize
450 static int vc1_init_common(VC1Context *v)
455 v->hrd_rate = v->hrd_buffer = NULL;
461 init_vlc(&vc1_bfraction_vlc, VC1_BFRACTION_VLC_BITS, 23,
462 vc1_bfraction_bits, 1, 1,
463 vc1_bfraction_codes, 1, 1, 1);
464 init_vlc(&vc1_norm2_vlc, VC1_NORM2_VLC_BITS, 4,
465 vc1_norm2_bits, 1, 1,
466 vc1_norm2_codes, 1, 1, 1);
467 init_vlc(&vc1_norm6_vlc, VC1_NORM6_VLC_BITS, 64,
468 vc1_norm6_bits, 1, 1,
469 vc1_norm6_codes, 2, 2, 1);
470 init_vlc(&vc1_imode_vlc, VC1_IMODE_VLC_BITS, 7,
471 vc1_imode_bits, 1, 1,
472 vc1_imode_codes, 1, 1, 1);
475 init_vlc(&vc1_ttmb_vlc[i], VC1_TTMB_VLC_BITS, 16,
476 vc1_ttmb_bits[i], 1, 1,
477 vc1_ttmb_codes[i], 2, 2, 1);
478 init_vlc(&vc1_ttblk_vlc[i], VC1_TTBLK_VLC_BITS, 8,
479 vc1_ttblk_bits[i], 1, 1,
480 vc1_ttblk_codes[i], 1, 1, 1);
481 init_vlc(&vc1_subblkpat_vlc[i], VC1_SUBBLKPAT_VLC_BITS, 15,
482 vc1_subblkpat_bits[i], 1, 1,
483 vc1_subblkpat_codes[i], 1, 1, 1);
487 init_vlc(&vc1_4mv_block_pattern_vlc[i], VC1_4MV_BLOCK_PATTERN_VLC_BITS, 16,
488 vc1_4mv_block_pattern_bits[i], 1, 1,
489 vc1_4mv_block_pattern_codes[i], 1, 1, 1);
490 init_vlc(&vc1_cbpcy_p_vlc[i], VC1_CBPCY_P_VLC_BITS, 64,
491 vc1_cbpcy_p_bits[i], 1, 1,
492 vc1_cbpcy_p_codes[i], 2, 2, 1);
493 init_vlc(&vc1_mv_diff_vlc[i], VC1_MV_DIFF_VLC_BITS, 73,
494 vc1_mv_diff_bits[i], 1, 1,
495 vc1_mv_diff_codes[i], 2, 2, 1);
498 init_vlc(&vc1_ac_coeff_table[i], AC_VLC_BITS, vc1_ac_sizes[i],
499 &vc1_ac_tables[i][0][1], 8, 4,
500 &vc1_ac_tables[i][0][0], 8, 4, 1);
501 init_vlc(&ff_msmp4_mb_i_vlc, MB_INTRA_VLC_BITS, 64,
502 &ff_msmp4_mb_i_table[0][1], 4, 2,
503 &ff_msmp4_mb_i_table[0][0], 4, 2, 1);
508 v->mvrange = 0; /* 7.1.1.18, p80 */
513 /***********************************************************************/
515 * @defgroup bitplane VC9 Bitplane decoding
520 /** @addtogroup bitplane
533 /** @} */ //imode defines
535 /** Decode rows by checking if they are skipped
536 * @param plane Buffer to store decoded bits
537 * @param[in] width Width of this buffer
538 * @param[in] height Height of this buffer
539 * @param[in] stride of this buffer
541 static void decode_rowskip(uint8_t* plane, int width, int height, int stride, GetBitContext *gb){
544 for (y=0; y<height; y++){
545 if (!get_bits(gb, 1)) //rowskip
546 memset(plane, 0, width);
548 for (x=0; x<width; x++)
549 plane[x] = get_bits(gb, 1);
554 /** Decode columns by checking if they are skipped
555 * @param plane Buffer to store decoded bits
556 * @param[in] width Width of this buffer
557 * @param[in] height Height of this buffer
558 * @param[in] stride of this buffer
559 * @fixme FIXME: Optimize
561 static void decode_colskip(uint8_t* plane, int width, int height, int stride, GetBitContext *gb){
564 for (x=0; x<width; x++){
565 if (!get_bits(gb, 1)) //colskip
566 for (y=0; y<height; y++)
569 for (y=0; y<height; y++)
570 plane[y*stride] = get_bits(gb, 1);
575 /** Decode a bitplane's bits
576 * @param bp Bitplane where to store the decode bits
577 * @param v VC-1 context for bit reading and logging
579 * @fixme FIXME: Optimize
581 static int bitplane_decoding(uint8_t* data, int *raw_flag, VC1Context *v)
583 GetBitContext *gb = &v->s.gb;
585 int imode, x, y, code, offset;
586 uint8_t invert, *planep = data;
587 int width, height, stride;
589 width = v->s.mb_width;
590 height = v->s.mb_height;
591 stride = v->s.mb_stride;
592 invert = get_bits(gb, 1);
593 imode = get_vlc2(gb, vc1_imode_vlc.table, VC1_IMODE_VLC_BITS, 1);
599 //Data is actually read in the MB layer (same for all tests == "raw")
600 *raw_flag = 1; //invert ignored
604 if ((height * width) & 1)
606 *planep++ = get_bits(gb, 1);
610 // decode bitplane as one long line
611 for (y = offset; y < height * width; y += 2) {
612 code = get_vlc2(gb, vc1_norm2_vlc.table, VC1_NORM2_VLC_BITS, 1);
613 *planep++ = code & 1;
615 if(offset == width) {
617 planep += stride - width;
619 *planep++ = code >> 1;
621 if(offset == width) {
623 planep += stride - width;
629 if(!(height % 3) && (width % 3)) { // use 2x3 decoding
630 for(y = 0; y < height; y+= 3) {
631 for(x = width & 1; x < width; x += 2) {
632 code = get_vlc2(gb, vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
634 av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
637 planep[x + 0] = (code >> 0) & 1;
638 planep[x + 1] = (code >> 1) & 1;
639 planep[x + 0 + stride] = (code >> 2) & 1;
640 planep[x + 1 + stride] = (code >> 3) & 1;
641 planep[x + 0 + stride * 2] = (code >> 4) & 1;
642 planep[x + 1 + stride * 2] = (code >> 5) & 1;
644 planep += stride * 3;
646 if(width & 1) decode_colskip(data, 1, height, stride, &v->s.gb);
648 planep += (height & 1) * stride;
649 for(y = height & 1; y < height; y += 2) {
650 for(x = width % 3; x < width; x += 3) {
651 code = get_vlc2(gb, vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
653 av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
656 planep[x + 0] = (code >> 0) & 1;
657 planep[x + 1] = (code >> 1) & 1;
658 planep[x + 2] = (code >> 2) & 1;
659 planep[x + 0 + stride] = (code >> 3) & 1;
660 planep[x + 1 + stride] = (code >> 4) & 1;
661 planep[x + 2 + stride] = (code >> 5) & 1;
663 planep += stride * 2;
666 if(x) decode_colskip(data , x, height , stride, &v->s.gb);
667 if(height & 1) decode_rowskip(data+x, width - x, 1, stride, &v->s.gb);
671 decode_rowskip(data, width, height, stride, &v->s.gb);
674 decode_colskip(data, width, height, stride, &v->s.gb);
679 /* Applying diff operator */
680 if (imode == IMODE_DIFF2 || imode == IMODE_DIFF6)
684 for (x=1; x<width; x++)
685 planep[x] ^= planep[x-1];
686 for (y=1; y<height; y++)
689 planep[0] ^= planep[-stride];
690 for (x=1; x<width; x++)
692 if (planep[x-1] != planep[x-stride]) planep[x] ^= invert;
693 else planep[x] ^= planep[x-1];
700 for (x=0; x<stride*height; x++) planep[x] = !planep[x]; //FIXME stride
702 return (imode<<1) + invert;
705 /** @} */ //Bitplane group
707 /***********************************************************************/
708 /** VOP Dquant decoding
709 * @param v VC-1 Context
711 static int vop_dquant_decoding(VC1Context *v)
713 GetBitContext *gb = &v->s.gb;
719 pqdiff = get_bits(gb, 3);
720 if (pqdiff == 7) v->altpq = get_bits(gb, 5);
721 else v->altpq = v->pq + pqdiff + 1;
725 v->dquantfrm = get_bits(gb, 1);
728 v->dqprofile = get_bits(gb, 2);
729 switch (v->dqprofile)
731 case DQPROFILE_SINGLE_EDGE:
732 case DQPROFILE_DOUBLE_EDGES:
733 v->dqsbedge = get_bits(gb, 2);
735 case DQPROFILE_ALL_MBS:
736 v->dqbilevel = get_bits(gb, 1);
737 default: break; //Forbidden ?
739 if (v->dqbilevel || v->dqprofile != DQPROFILE_ALL_MBS)
741 pqdiff = get_bits(gb, 3);
742 if (pqdiff == 7) v->altpq = get_bits(gb, 5);
743 else v->altpq = v->pq + pqdiff + 1;
750 /** Put block onto picture
752 static void vc1_put_block(VC1Context *v, DCTELEM block[6][64])
756 DSPContext *dsp = &v->s.dsp;
760 for(k = 0; k < 6; k++)
761 for(j = 0; j < 8; j++)
762 for(i = 0; i < 8; i++)
763 block[k][i + j*8] = ((block[k][i + j*8] - 128) << 1) + 128;
766 ys = v->s.current_picture.linesize[0];
767 us = v->s.current_picture.linesize[1];
768 vs = v->s.current_picture.linesize[2];
771 dsp->put_pixels_clamped(block[0], Y, ys);
772 dsp->put_pixels_clamped(block[1], Y + 8, ys);
774 dsp->put_pixels_clamped(block[2], Y, ys);
775 dsp->put_pixels_clamped(block[3], Y + 8, ys);
777 if(!(v->s.flags & CODEC_FLAG_GRAY)) {
778 dsp->put_pixels_clamped(block[4], v->s.dest[1], us);
779 dsp->put_pixels_clamped(block[5], v->s.dest[2], vs);
783 /** Do motion compensation over 1 macroblock
784 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
786 static void vc1_mc_1mv(VC1Context *v, int dir)
788 MpegEncContext *s = &v->s;
789 DSPContext *dsp = &v->s.dsp;
790 uint8_t *srcY, *srcU, *srcV;
791 int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
793 if(!v->s.last_picture.data[0])return;
795 mx = s->mv[dir][0][0];
796 my = s->mv[dir][0][1];
798 // store motion vectors for further use in B frames
799 if(s->pict_type == P_TYPE) {
800 s->current_picture.motion_val[1][s->block_index[0]][0] = mx;
801 s->current_picture.motion_val[1][s->block_index[0]][1] = my;
803 uvmx = (mx + ((mx & 3) == 3)) >> 1;
804 uvmy = (my + ((my & 3) == 3)) >> 1;
806 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
807 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
810 srcY = s->last_picture.data[0];
811 srcU = s->last_picture.data[1];
812 srcV = s->last_picture.data[2];
814 srcY = s->next_picture.data[0];
815 srcU = s->next_picture.data[1];
816 srcV = s->next_picture.data[2];
819 src_x = s->mb_x * 16 + (mx >> 2);
820 src_y = s->mb_y * 16 + (my >> 2);
821 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
822 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
824 if(v->profile != PROFILE_ADVANCED){
825 src_x = av_clip( src_x, -16, s->mb_width * 16);
826 src_y = av_clip( src_y, -16, s->mb_height * 16);
827 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
828 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
830 src_x = av_clip( src_x, -17, s->avctx->coded_width);
831 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
832 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
833 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
836 srcY += src_y * s->linesize + src_x;
837 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
838 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
840 /* for grayscale we should not try to read from unknown area */
841 if(s->flags & CODEC_FLAG_GRAY) {
842 srcU = s->edge_emu_buffer + 18 * s->linesize;
843 srcV = s->edge_emu_buffer + 18 * s->linesize;
846 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
847 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel*3
848 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 16 - s->mspel*3){
849 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
851 srcY -= s->mspel * (1 + s->linesize);
852 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
853 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
854 srcY = s->edge_emu_buffer;
855 ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
856 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
857 ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
858 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
861 /* if we deal with range reduction we need to scale source blocks */
867 for(j = 0; j < 17 + s->mspel*2; j++) {
868 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
871 src = srcU; src2 = srcV;
872 for(j = 0; j < 9; j++) {
873 for(i = 0; i < 9; i++) {
874 src[i] = ((src[i] - 128) >> 1) + 128;
875 src2[i] = ((src2[i] - 128) >> 1) + 128;
877 src += s->uvlinesize;
878 src2 += s->uvlinesize;
881 /* if we deal with intensity compensation we need to scale source blocks */
882 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
887 for(j = 0; j < 17 + s->mspel*2; j++) {
888 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = v->luty[src[i]];
891 src = srcU; src2 = srcV;
892 for(j = 0; j < 9; j++) {
893 for(i = 0; i < 9; i++) {
894 src[i] = v->lutuv[src[i]];
895 src2[i] = v->lutuv[src2[i]];
897 src += s->uvlinesize;
898 src2 += s->uvlinesize;
901 srcY += s->mspel * (1 + s->linesize);
905 dxy = ((my & 3) << 2) | (mx & 3);
906 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
907 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
908 srcY += s->linesize * 8;
909 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
910 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
911 } else { // hpel mc - always used for luma
912 dxy = (my & 2) | ((mx & 2) >> 1);
915 dsp->put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
917 dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
920 if(s->flags & CODEC_FLAG_GRAY) return;
921 /* Chroma MC always uses qpel bilinear */
922 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
926 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
927 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
929 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
930 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
934 /** Do motion compensation for 4-MV macroblock - luminance block
936 static void vc1_mc_4mv_luma(VC1Context *v, int n)
938 MpegEncContext *s = &v->s;
939 DSPContext *dsp = &v->s.dsp;
941 int dxy, mx, my, src_x, src_y;
944 if(!v->s.last_picture.data[0])return;
947 srcY = s->last_picture.data[0];
949 off = s->linesize * 4 * (n&2) + (n&1) * 8;
951 src_x = s->mb_x * 16 + (n&1) * 8 + (mx >> 2);
952 src_y = s->mb_y * 16 + (n&2) * 4 + (my >> 2);
954 if(v->profile != PROFILE_ADVANCED){
955 src_x = av_clip( src_x, -16, s->mb_width * 16);
956 src_y = av_clip( src_y, -16, s->mb_height * 16);
958 src_x = av_clip( src_x, -17, s->avctx->coded_width);
959 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
962 srcY += src_y * s->linesize + src_x;
964 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
965 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 8 - s->mspel*2
966 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 8 - s->mspel*2){
967 srcY -= s->mspel * (1 + s->linesize);
968 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 9+s->mspel*2, 9+s->mspel*2,
969 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
970 srcY = s->edge_emu_buffer;
971 /* if we deal with range reduction we need to scale source blocks */
977 for(j = 0; j < 9 + s->mspel*2; j++) {
978 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
982 /* if we deal with intensity compensation we need to scale source blocks */
983 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
988 for(j = 0; j < 9 + s->mspel*2; j++) {
989 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = v->luty[src[i]];
993 srcY += s->mspel * (1 + s->linesize);
997 dxy = ((my & 3) << 2) | (mx & 3);
998 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, v->rnd);
999 } else { // hpel mc - always used for luma
1000 dxy = (my & 2) | ((mx & 2) >> 1);
1002 dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
1004 dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
1008 static inline int median4(int a, int b, int c, int d)
1011 if(c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
1012 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
1014 if(c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
1015 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
1020 /** Do motion compensation for 4-MV macroblock - both chroma blocks
1022 static void vc1_mc_4mv_chroma(VC1Context *v)
1024 MpegEncContext *s = &v->s;
1025 DSPContext *dsp = &v->s.dsp;
1026 uint8_t *srcU, *srcV;
1027 int uvdxy, uvmx, uvmy, uvsrc_x, uvsrc_y;
1028 int i, idx, tx = 0, ty = 0;
1029 int mvx[4], mvy[4], intra[4];
1030 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
1032 if(!v->s.last_picture.data[0])return;
1033 if(s->flags & CODEC_FLAG_GRAY) return;
1035 for(i = 0; i < 4; i++) {
1036 mvx[i] = s->mv[0][i][0];
1037 mvy[i] = s->mv[0][i][1];
1038 intra[i] = v->mb_type[0][s->block_index[i]];
1041 /* calculate chroma MV vector from four luma MVs */
1042 idx = (intra[3] << 3) | (intra[2] << 2) | (intra[1] << 1) | intra[0];
1043 if(!idx) { // all blocks are inter
1044 tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
1045 ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
1046 } else if(count[idx] == 1) { // 3 inter blocks
1049 tx = mid_pred(mvx[1], mvx[2], mvx[3]);
1050 ty = mid_pred(mvy[1], mvy[2], mvy[3]);
1053 tx = mid_pred(mvx[0], mvx[2], mvx[3]);
1054 ty = mid_pred(mvy[0], mvy[2], mvy[3]);
1057 tx = mid_pred(mvx[0], mvx[1], mvx[3]);
1058 ty = mid_pred(mvy[0], mvy[1], mvy[3]);
1061 tx = mid_pred(mvx[0], mvx[1], mvx[2]);
1062 ty = mid_pred(mvy[0], mvy[1], mvy[2]);
1065 } else if(count[idx] == 2) {
1067 for(i=0; i<3;i++) if(!intra[i]) {t1 = i; break;}
1068 for(i= t1+1; i<4; i++)if(!intra[i]) {t2 = i; break;}
1069 tx = (mvx[t1] + mvx[t2]) / 2;
1070 ty = (mvy[t1] + mvy[t2]) / 2;
1072 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
1073 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
1074 return; //no need to do MC for inter blocks
1077 s->current_picture.motion_val[1][s->block_index[0]][0] = tx;
1078 s->current_picture.motion_val[1][s->block_index[0]][1] = ty;
1079 uvmx = (tx + ((tx&3) == 3)) >> 1;
1080 uvmy = (ty + ((ty&3) == 3)) >> 1;
1082 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
1083 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
1086 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1087 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1089 if(v->profile != PROFILE_ADVANCED){
1090 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
1091 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
1093 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1094 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1097 srcU = s->last_picture.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1098 srcV = s->last_picture.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1099 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1100 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
1101 || (unsigned)uvsrc_y > (s->v_edge_pos >> 1) - 9){
1102 ff_emulated_edge_mc(s->edge_emu_buffer , srcU, s->uvlinesize, 8+1, 8+1,
1103 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
1104 ff_emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize, 8+1, 8+1,
1105 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
1106 srcU = s->edge_emu_buffer;
1107 srcV = s->edge_emu_buffer + 16;
1109 /* if we deal with range reduction we need to scale source blocks */
1110 if(v->rangeredfrm) {
1112 uint8_t *src, *src2;
1114 src = srcU; src2 = srcV;
1115 for(j = 0; j < 9; j++) {
1116 for(i = 0; i < 9; i++) {
1117 src[i] = ((src[i] - 128) >> 1) + 128;
1118 src2[i] = ((src2[i] - 128) >> 1) + 128;
1120 src += s->uvlinesize;
1121 src2 += s->uvlinesize;
1124 /* if we deal with intensity compensation we need to scale source blocks */
1125 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1127 uint8_t *src, *src2;
1129 src = srcU; src2 = srcV;
1130 for(j = 0; j < 9; j++) {
1131 for(i = 0; i < 9; i++) {
1132 src[i] = v->lutuv[src[i]];
1133 src2[i] = v->lutuv[src2[i]];
1135 src += s->uvlinesize;
1136 src2 += s->uvlinesize;
1141 /* Chroma MC always uses qpel bilinear */
1142 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
1146 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
1147 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
1149 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
1150 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
1154 static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb);
1157 * Decode Simple/Main Profiles sequence header
1158 * @see Figure 7-8, p16-17
1159 * @param avctx Codec context
1160 * @param gb GetBit context initialized from Codec context extra_data
1163 static int decode_sequence_header(AVCodecContext *avctx, GetBitContext *gb)
1165 VC1Context *v = avctx->priv_data;
1167 av_log(avctx, AV_LOG_DEBUG, "Header: %0X\n", show_bits(gb, 32));
1168 v->profile = get_bits(gb, 2);
1169 if (v->profile == PROFILE_COMPLEX)
1171 av_log(avctx, AV_LOG_ERROR, "WMV3 Complex Profile is not fully supported\n");
1174 if (v->profile == PROFILE_ADVANCED)
1176 return decode_sequence_header_adv(v, gb);
1180 v->res_sm = get_bits(gb, 2); //reserved
1183 av_log(avctx, AV_LOG_ERROR,
1184 "Reserved RES_SM=%i is forbidden\n", v->res_sm);
1190 v->frmrtq_postproc = get_bits(gb, 3); //common
1191 // (bitrate-32kbps)/64kbps
1192 v->bitrtq_postproc = get_bits(gb, 5); //common
1193 v->s.loop_filter = get_bits(gb, 1); //common
1194 if(v->s.loop_filter == 1 && v->profile == PROFILE_SIMPLE)
1196 av_log(avctx, AV_LOG_ERROR,
1197 "LOOPFILTER shell not be enabled in simple profile\n");
1200 v->res_x8 = get_bits(gb, 1); //reserved
1203 av_log(avctx, AV_LOG_ERROR,
1204 "1 for reserved RES_X8 is forbidden\n");
1207 v->multires = get_bits(gb, 1);
1208 v->res_fasttx = get_bits(gb, 1);
1211 av_log(avctx, AV_LOG_ERROR,
1212 "0 for reserved RES_FASTTX is forbidden\n");
1216 v->fastuvmc = get_bits(gb, 1); //common
1217 if (!v->profile && !v->fastuvmc)
1219 av_log(avctx, AV_LOG_ERROR,
1220 "FASTUVMC unavailable in Simple Profile\n");
1223 v->extended_mv = get_bits(gb, 1); //common
1224 if (!v->profile && v->extended_mv)
1226 av_log(avctx, AV_LOG_ERROR,
1227 "Extended MVs unavailable in Simple Profile\n");
1230 v->dquant = get_bits(gb, 2); //common
1231 v->vstransform = get_bits(gb, 1); //common
1233 v->res_transtab = get_bits(gb, 1);
1234 if (v->res_transtab)
1236 av_log(avctx, AV_LOG_ERROR,
1237 "1 for reserved RES_TRANSTAB is forbidden\n");
1241 v->overlap = get_bits(gb, 1); //common
1243 v->s.resync_marker = get_bits(gb, 1);
1244 v->rangered = get_bits(gb, 1);
1245 if (v->rangered && v->profile == PROFILE_SIMPLE)
1247 av_log(avctx, AV_LOG_INFO,
1248 "RANGERED should be set to 0 in simple profile\n");
1251 v->s.max_b_frames = avctx->max_b_frames = get_bits(gb, 3); //common
1252 v->quantizer_mode = get_bits(gb, 2); //common
1254 v->finterpflag = get_bits(gb, 1); //common
1255 v->res_rtm_flag = get_bits(gb, 1); //reserved
1256 if (!v->res_rtm_flag)
1258 // av_log(avctx, AV_LOG_ERROR,
1259 // "0 for reserved RES_RTM_FLAG is forbidden\n");
1260 av_log(avctx, AV_LOG_ERROR,
1261 "Old WMV3 version detected, only I-frames will be decoded\n");
1264 av_log(avctx, AV_LOG_DEBUG,
1265 "Profile %i:\nfrmrtq_postproc=%i, bitrtq_postproc=%i\n"
1266 "LoopFilter=%i, MultiRes=%i, FastUVMC=%i, Extended MV=%i\n"
1267 "Rangered=%i, VSTransform=%i, Overlap=%i, SyncMarker=%i\n"
1268 "DQuant=%i, Quantizer mode=%i, Max B frames=%i\n",
1269 v->profile, v->frmrtq_postproc, v->bitrtq_postproc,
1270 v->s.loop_filter, v->multires, v->fastuvmc, v->extended_mv,
1271 v->rangered, v->vstransform, v->overlap, v->s.resync_marker,
1272 v->dquant, v->quantizer_mode, avctx->max_b_frames
1277 static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb)
1279 v->res_rtm_flag = 1;
1280 v->level = get_bits(gb, 3);
1283 av_log(v->s.avctx, AV_LOG_ERROR, "Reserved LEVEL %i\n",v->level);
1285 v->chromaformat = get_bits(gb, 2);
1286 if (v->chromaformat != 1)
1288 av_log(v->s.avctx, AV_LOG_ERROR,
1289 "Only 4:2:0 chroma format supported\n");
1294 v->frmrtq_postproc = get_bits(gb, 3); //common
1295 // (bitrate-32kbps)/64kbps
1296 v->bitrtq_postproc = get_bits(gb, 5); //common
1297 v->postprocflag = get_bits(gb, 1); //common
1299 v->s.avctx->coded_width = (get_bits(gb, 12) + 1) << 1;
1300 v->s.avctx->coded_height = (get_bits(gb, 12) + 1) << 1;
1301 v->s.avctx->width = v->s.avctx->coded_width;
1302 v->s.avctx->height = v->s.avctx->coded_height;
1303 v->broadcast = get_bits1(gb);
1304 v->interlace = get_bits1(gb);
1305 v->tfcntrflag = get_bits1(gb);
1306 v->finterpflag = get_bits1(gb);
1307 get_bits1(gb); // reserved
1309 v->s.h_edge_pos = v->s.avctx->coded_width;
1310 v->s.v_edge_pos = v->s.avctx->coded_height;
1312 av_log(v->s.avctx, AV_LOG_DEBUG,
1313 "Advanced Profile level %i:\nfrmrtq_postproc=%i, bitrtq_postproc=%i\n"
1314 "LoopFilter=%i, ChromaFormat=%i, Pulldown=%i, Interlace: %i\n"
1315 "TFCTRflag=%i, FINTERPflag=%i\n",
1316 v->level, v->frmrtq_postproc, v->bitrtq_postproc,
1317 v->s.loop_filter, v->chromaformat, v->broadcast, v->interlace,
1318 v->tfcntrflag, v->finterpflag
1321 v->psf = get_bits1(gb);
1322 if(v->psf) { //PsF, 6.1.13
1323 av_log(v->s.avctx, AV_LOG_ERROR, "Progressive Segmented Frame mode: not supported (yet)\n");
1326 v->s.max_b_frames = v->s.avctx->max_b_frames = 7;
1327 if(get_bits1(gb)) { //Display Info - decoding is not affected by it
1329 av_log(v->s.avctx, AV_LOG_DEBUG, "Display extended info:\n");
1330 v->s.avctx->width = v->s.width = w = get_bits(gb, 14) + 1;
1331 v->s.avctx->height = v->s.height = h = get_bits(gb, 14) + 1;
1332 av_log(v->s.avctx, AV_LOG_DEBUG, "Display dimensions: %ix%i\n", w, h);
1334 ar = get_bits(gb, 4);
1336 v->s.avctx->sample_aspect_ratio = vc1_pixel_aspect[ar];
1338 w = get_bits(gb, 8);
1339 h = get_bits(gb, 8);
1340 v->s.avctx->sample_aspect_ratio = (AVRational){w, h};
1343 if(get_bits1(gb)){ //framerate stuff
1345 v->s.avctx->time_base.num = 32;
1346 v->s.avctx->time_base.den = get_bits(gb, 16) + 1;
1349 nr = get_bits(gb, 8);
1350 dr = get_bits(gb, 4);
1351 if(nr && nr < 8 && dr && dr < 3){
1352 v->s.avctx->time_base.num = fps_dr[dr - 1];
1353 v->s.avctx->time_base.den = fps_nr[nr - 1] * 1000;
1359 v->color_prim = get_bits(gb, 8);
1360 v->transfer_char = get_bits(gb, 8);
1361 v->matrix_coef = get_bits(gb, 8);
1365 v->hrd_param_flag = get_bits1(gb);
1366 if(v->hrd_param_flag) {
1368 v->hrd_num_leaky_buckets = get_bits(gb, 5);
1369 get_bits(gb, 4); //bitrate exponent
1370 get_bits(gb, 4); //buffer size exponent
1371 for(i = 0; i < v->hrd_num_leaky_buckets; i++) {
1372 get_bits(gb, 16); //hrd_rate[n]
1373 get_bits(gb, 16); //hrd_buffer[n]
1379 static int decode_entry_point(AVCodecContext *avctx, GetBitContext *gb)
1381 VC1Context *v = avctx->priv_data;
1382 int i, blink, clentry, refdist;
1384 av_log(avctx, AV_LOG_DEBUG, "Entry point: %08X\n", show_bits_long(gb, 32));
1385 blink = get_bits1(gb); // broken link
1386 clentry = get_bits1(gb); // closed entry
1387 v->panscanflag = get_bits1(gb);
1388 refdist = get_bits1(gb); // refdist flag
1389 v->s.loop_filter = get_bits1(gb);
1390 v->fastuvmc = get_bits1(gb);
1391 v->extended_mv = get_bits1(gb);
1392 v->dquant = get_bits(gb, 2);
1393 v->vstransform = get_bits1(gb);
1394 v->overlap = get_bits1(gb);
1395 v->quantizer_mode = get_bits(gb, 2);
1397 if(v->hrd_param_flag){
1398 for(i = 0; i < v->hrd_num_leaky_buckets; i++) {
1399 get_bits(gb, 8); //hrd_full[n]
1404 avctx->coded_width = (get_bits(gb, 12)+1)<<1;
1405 avctx->coded_height = (get_bits(gb, 12)+1)<<1;
1408 v->extended_dmv = get_bits1(gb);
1410 av_log(avctx, AV_LOG_ERROR, "Luma scaling is not supported, expect wrong picture\n");
1411 skip_bits(gb, 3); // Y range, ignored for now
1414 av_log(avctx, AV_LOG_ERROR, "Chroma scaling is not supported, expect wrong picture\n");
1415 skip_bits(gb, 3); // UV range, ignored for now
1418 av_log(avctx, AV_LOG_DEBUG, "Entry point info:\n"
1419 "BrokenLink=%i, ClosedEntry=%i, PanscanFlag=%i\n"
1420 "RefDist=%i, Postproc=%i, FastUVMC=%i, ExtMV=%i\n"
1421 "DQuant=%i, VSTransform=%i, Overlap=%i, Qmode=%i\n",
1422 blink, clentry, v->panscanflag, refdist, v->s.loop_filter,
1423 v->fastuvmc, v->extended_mv, v->dquant, v->vstransform, v->overlap, v->quantizer_mode);
1428 static int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
1430 int pqindex, lowquant, status;
1432 if(v->finterpflag) v->interpfrm = get_bits(gb, 1);
1433 skip_bits(gb, 2); //framecnt unused
1435 if (v->rangered) v->rangeredfrm = get_bits(gb, 1);
1436 v->s.pict_type = get_bits(gb, 1);
1437 if (v->s.avctx->max_b_frames) {
1438 if (!v->s.pict_type) {
1439 if (get_bits(gb, 1)) v->s.pict_type = I_TYPE;
1440 else v->s.pict_type = B_TYPE;
1441 } else v->s.pict_type = P_TYPE;
1442 } else v->s.pict_type = v->s.pict_type ? P_TYPE : I_TYPE;
1445 if(v->s.pict_type == B_TYPE) {
1446 v->bfraction = get_vlc2(gb, vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
1447 v->bfraction = vc1_bfraction_lut[v->bfraction];
1448 if(v->bfraction == 0) {
1449 v->s.pict_type = BI_TYPE;
1452 if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1453 get_bits(gb, 7); // skip buffer fullness
1456 if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1458 if(v->s.pict_type == P_TYPE)
1461 /* Quantizer stuff */
1462 pqindex = get_bits(gb, 5);
1463 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1464 v->pq = pquant_table[0][pqindex];
1466 v->pq = pquant_table[1][pqindex];
1469 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1470 v->pquantizer = pqindex < 9;
1471 if (v->quantizer_mode == QUANT_NON_UNIFORM)
1473 v->pqindex = pqindex;
1474 if (pqindex < 9) v->halfpq = get_bits(gb, 1);
1476 if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
1477 v->pquantizer = get_bits(gb, 1);
1479 if (v->extended_mv == 1) v->mvrange = get_prefix(gb, 0, 3);
1480 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1481 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1482 v->range_x = 1 << (v->k_x - 1);
1483 v->range_y = 1 << (v->k_y - 1);
1484 if (v->profile == PROFILE_ADVANCED)
1486 if (v->postprocflag) v->postproc = get_bits(gb, 1);
1489 if (v->multires && v->s.pict_type != B_TYPE) v->respic = get_bits(gb, 2);
1491 if(v->res_x8 && (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)){
1492 if(get_bits1(gb))return -1;
1494 //av_log(v->s.avctx, AV_LOG_INFO, "%c Frame: QP=[%i]%i (+%i/2) %i\n",
1495 // (v->s.pict_type == P_TYPE) ? 'P' : ((v->s.pict_type == I_TYPE) ? 'I' : 'B'), pqindex, v->pq, v->halfpq, v->rangeredfrm);
1497 if(v->s.pict_type == I_TYPE || v->s.pict_type == P_TYPE) v->use_ic = 0;
1499 switch(v->s.pict_type) {
1501 if (v->pq < 5) v->tt_index = 0;
1502 else if(v->pq < 13) v->tt_index = 1;
1503 else v->tt_index = 2;
1505 lowquant = (v->pq > 12) ? 0 : 1;
1506 v->mv_mode = mv_pmode_table[lowquant][get_prefix(gb, 1, 4)];
1507 if (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1509 int scale, shift, i;
1510 v->mv_mode2 = mv_pmode_table2[lowquant][get_prefix(gb, 1, 3)];
1511 v->lumscale = get_bits(gb, 6);
1512 v->lumshift = get_bits(gb, 6);
1514 /* fill lookup tables for intensity compensation */
1517 shift = (255 - v->lumshift * 2) << 6;
1518 if(v->lumshift > 31)
1521 scale = v->lumscale + 32;
1522 if(v->lumshift > 31)
1523 shift = (v->lumshift - 64) << 6;
1525 shift = v->lumshift << 6;
1527 for(i = 0; i < 256; i++) {
1528 v->luty[i] = av_clip_uint8((scale * i + shift + 32) >> 6);
1529 v->lutuv[i] = av_clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
1532 if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
1533 v->s.quarter_sample = 0;
1534 else if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1535 if(v->mv_mode2 == MV_PMODE_1MV_HPEL || v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)
1536 v->s.quarter_sample = 0;
1538 v->s.quarter_sample = 1;
1540 v->s.quarter_sample = 1;
1541 v->s.mspel = !(v->mv_mode == MV_PMODE_1MV_HPEL_BILIN || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN));
1543 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP &&
1544 v->mv_mode2 == MV_PMODE_MIXED_MV)
1545 || v->mv_mode == MV_PMODE_MIXED_MV)
1547 status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v);
1548 if (status < 0) return -1;
1549 av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
1550 "Imode: %i, Invert: %i\n", status>>1, status&1);
1552 v->mv_type_is_raw = 0;
1553 memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
1555 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1556 if (status < 0) return -1;
1557 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1558 "Imode: %i, Invert: %i\n", status>>1, status&1);
1560 /* Hopefully this is correct for P frames */
1561 v->s.mv_table_index = get_bits(gb, 2); //but using vc1_ tables
1562 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1566 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1567 vop_dquant_decoding(v);
1570 v->ttfrm = 0; //FIXME Is that so ?
1573 v->ttmbf = get_bits(gb, 1);
1576 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1584 if (v->pq < 5) v->tt_index = 0;
1585 else if(v->pq < 13) v->tt_index = 1;
1586 else v->tt_index = 2;
1588 lowquant = (v->pq > 12) ? 0 : 1;
1589 v->mv_mode = get_bits1(gb) ? MV_PMODE_1MV : MV_PMODE_1MV_HPEL_BILIN;
1590 v->s.quarter_sample = (v->mv_mode == MV_PMODE_1MV);
1591 v->s.mspel = v->s.quarter_sample;
1593 status = bitplane_decoding(v->direct_mb_plane, &v->dmb_is_raw, v);
1594 if (status < 0) return -1;
1595 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
1596 "Imode: %i, Invert: %i\n", status>>1, status&1);
1597 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1598 if (status < 0) return -1;
1599 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1600 "Imode: %i, Invert: %i\n", status>>1, status&1);
1602 v->s.mv_table_index = get_bits(gb, 2);
1603 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1607 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1608 vop_dquant_decoding(v);
1614 v->ttmbf = get_bits(gb, 1);
1617 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1627 v->c_ac_table_index = decode012(gb);
1628 if (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1630 v->y_ac_table_index = decode012(gb);
1633 v->s.dc_table_index = get_bits(gb, 1);
1635 if(v->s.pict_type == BI_TYPE) {
1636 v->s.pict_type = B_TYPE;
1642 static int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
1644 int pqindex, lowquant;
1647 v->p_frame_skipped = 0;
1650 v->fcm = decode012(gb);
1651 if(v->fcm) return -1; // interlaced frames/fields are not implemented
1653 switch(get_prefix(gb, 0, 4)) {
1655 v->s.pict_type = P_TYPE;
1658 v->s.pict_type = B_TYPE;
1661 v->s.pict_type = I_TYPE;
1664 v->s.pict_type = BI_TYPE;
1667 v->s.pict_type = P_TYPE; // skipped pic
1668 v->p_frame_skipped = 1;
1674 if(!v->interlace || v->psf) {
1675 v->rptfrm = get_bits(gb, 2);
1677 v->tff = get_bits1(gb);
1678 v->rptfrm = get_bits1(gb);
1681 if(v->panscanflag) {
1684 v->rnd = get_bits1(gb);
1686 v->uvsamp = get_bits1(gb);
1687 if(v->finterpflag) v->interpfrm = get_bits(gb, 1);
1688 if(v->s.pict_type == B_TYPE) {
1689 v->bfraction = get_vlc2(gb, vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
1690 v->bfraction = vc1_bfraction_lut[v->bfraction];
1691 if(v->bfraction == 0) {
1692 v->s.pict_type = BI_TYPE; /* XXX: should not happen here */
1695 pqindex = get_bits(gb, 5);
1696 v->pqindex = pqindex;
1697 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1698 v->pq = pquant_table[0][pqindex];
1700 v->pq = pquant_table[1][pqindex];
1703 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1704 v->pquantizer = pqindex < 9;
1705 if (v->quantizer_mode == QUANT_NON_UNIFORM)
1707 v->pqindex = pqindex;
1708 if (pqindex < 9) v->halfpq = get_bits(gb, 1);
1710 if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
1711 v->pquantizer = get_bits(gb, 1);
1713 if(v->s.pict_type == I_TYPE || v->s.pict_type == P_TYPE) v->use_ic = 0;
1715 switch(v->s.pict_type) {
1718 status = bitplane_decoding(v->acpred_plane, &v->acpred_is_raw, v);
1719 if (status < 0) return -1;
1720 av_log(v->s.avctx, AV_LOG_DEBUG, "ACPRED plane encoding: "
1721 "Imode: %i, Invert: %i\n", status>>1, status&1);
1722 v->condover = CONDOVER_NONE;
1723 if(v->overlap && v->pq <= 8) {
1724 v->condover = decode012(gb);
1725 if(v->condover == CONDOVER_SELECT) {
1726 status = bitplane_decoding(v->over_flags_plane, &v->overflg_is_raw, v);
1727 if (status < 0) return -1;
1728 av_log(v->s.avctx, AV_LOG_DEBUG, "CONDOVER plane encoding: "
1729 "Imode: %i, Invert: %i\n", status>>1, status&1);
1735 v->postproc = get_bits1(gb);
1736 if (v->extended_mv) v->mvrange = get_prefix(gb, 0, 3);
1737 else v->mvrange = 0;
1738 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1739 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1740 v->range_x = 1 << (v->k_x - 1);
1741 v->range_y = 1 << (v->k_y - 1);
1743 if (v->pq < 5) v->tt_index = 0;
1744 else if(v->pq < 13) v->tt_index = 1;
1745 else v->tt_index = 2;
1747 lowquant = (v->pq > 12) ? 0 : 1;
1748 v->mv_mode = mv_pmode_table[lowquant][get_prefix(gb, 1, 4)];
1749 if (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1751 int scale, shift, i;
1752 v->mv_mode2 = mv_pmode_table2[lowquant][get_prefix(gb, 1, 3)];
1753 v->lumscale = get_bits(gb, 6);
1754 v->lumshift = get_bits(gb, 6);
1755 /* fill lookup tables for intensity compensation */
1758 shift = (255 - v->lumshift * 2) << 6;
1759 if(v->lumshift > 31)
1762 scale = v->lumscale + 32;
1763 if(v->lumshift > 31)
1764 shift = (v->lumshift - 64) << 6;
1766 shift = v->lumshift << 6;
1768 for(i = 0; i < 256; i++) {
1769 v->luty[i] = av_clip_uint8((scale * i + shift + 32) >> 6);
1770 v->lutuv[i] = av_clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
1774 if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
1775 v->s.quarter_sample = 0;
1776 else if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1777 if(v->mv_mode2 == MV_PMODE_1MV_HPEL || v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)
1778 v->s.quarter_sample = 0;
1780 v->s.quarter_sample = 1;
1782 v->s.quarter_sample = 1;
1783 v->s.mspel = !(v->mv_mode == MV_PMODE_1MV_HPEL_BILIN || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN));
1785 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP &&
1786 v->mv_mode2 == MV_PMODE_MIXED_MV)
1787 || v->mv_mode == MV_PMODE_MIXED_MV)
1789 status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v);
1790 if (status < 0) return -1;
1791 av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
1792 "Imode: %i, Invert: %i\n", status>>1, status&1);
1794 v->mv_type_is_raw = 0;
1795 memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
1797 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1798 if (status < 0) return -1;
1799 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1800 "Imode: %i, Invert: %i\n", status>>1, status&1);
1802 /* Hopefully this is correct for P frames */
1803 v->s.mv_table_index = get_bits(gb, 2); //but using vc1_ tables
1804 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1807 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1808 vop_dquant_decoding(v);
1811 v->ttfrm = 0; //FIXME Is that so ?
1814 v->ttmbf = get_bits(gb, 1);
1817 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1826 v->postproc = get_bits1(gb);
1827 if (v->extended_mv) v->mvrange = get_prefix(gb, 0, 3);
1828 else v->mvrange = 0;
1829 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1830 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1831 v->range_x = 1 << (v->k_x - 1);
1832 v->range_y = 1 << (v->k_y - 1);
1834 if (v->pq < 5) v->tt_index = 0;
1835 else if(v->pq < 13) v->tt_index = 1;
1836 else v->tt_index = 2;
1838 lowquant = (v->pq > 12) ? 0 : 1;
1839 v->mv_mode = get_bits1(gb) ? MV_PMODE_1MV : MV_PMODE_1MV_HPEL_BILIN;
1840 v->s.quarter_sample = (v->mv_mode == MV_PMODE_1MV);
1841 v->s.mspel = v->s.quarter_sample;
1843 status = bitplane_decoding(v->direct_mb_plane, &v->dmb_is_raw, v);
1844 if (status < 0) return -1;
1845 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
1846 "Imode: %i, Invert: %i\n", status>>1, status&1);
1847 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1848 if (status < 0) return -1;
1849 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1850 "Imode: %i, Invert: %i\n", status>>1, status&1);
1852 v->s.mv_table_index = get_bits(gb, 2);
1853 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1857 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1858 vop_dquant_decoding(v);
1864 v->ttmbf = get_bits(gb, 1);
1867 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1877 v->c_ac_table_index = decode012(gb);
1878 if (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1880 v->y_ac_table_index = decode012(gb);
1883 v->s.dc_table_index = get_bits(gb, 1);
1884 if ((v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE) && v->dquant) {
1885 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1886 vop_dquant_decoding(v);
1890 if(v->s.pict_type == BI_TYPE) {
1891 v->s.pict_type = B_TYPE;
1897 /***********************************************************************/
1899 * @defgroup block VC-1 Block-level functions
1900 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1906 * @brief Get macroblock-level quantizer scale
1908 #define GET_MQUANT() \
1912 if (v->dqprofile == DQPROFILE_ALL_MBS) \
1916 mquant = (get_bits(gb, 1)) ? v->altpq : v->pq; \
1920 mqdiff = get_bits(gb, 3); \
1921 if (mqdiff != 7) mquant = v->pq + mqdiff; \
1922 else mquant = get_bits(gb, 5); \
1925 if(v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1926 edges = 1 << v->dqsbedge; \
1927 else if(v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1928 edges = (3 << v->dqsbedge) % 15; \
1929 else if(v->dqprofile == DQPROFILE_FOUR_EDGES) \
1931 if((edges&1) && !s->mb_x) \
1932 mquant = v->altpq; \
1933 if((edges&2) && s->first_slice_line) \
1934 mquant = v->altpq; \
1935 if((edges&4) && s->mb_x == (s->mb_width - 1)) \
1936 mquant = v->altpq; \
1937 if((edges&8) && s->mb_y == (s->mb_height - 1)) \
1938 mquant = v->altpq; \
1942 * @def GET_MVDATA(_dmv_x, _dmv_y)
1943 * @brief Get MV differentials
1944 * @see MVDATA decoding from 8.3.5.2, p(1)20
1945 * @param _dmv_x Horizontal differential for decoded MV
1946 * @param _dmv_y Vertical differential for decoded MV
1948 #define GET_MVDATA(_dmv_x, _dmv_y) \
1949 index = 1 + get_vlc2(gb, vc1_mv_diff_vlc[s->mv_table_index].table,\
1950 VC1_MV_DIFF_VLC_BITS, 2); \
1953 mb_has_coeffs = 1; \
1956 else mb_has_coeffs = 0; \
1958 if (!index) { _dmv_x = _dmv_y = 0; } \
1959 else if (index == 35) \
1961 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1962 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1964 else if (index == 36) \
1973 if (!s->quarter_sample && index1 == 5) val = 1; \
1975 if(size_table[index1] - val > 0) \
1976 val = get_bits(gb, size_table[index1] - val); \
1978 sign = 0 - (val&1); \
1979 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1982 if (!s->quarter_sample && index1 == 5) val = 1; \
1984 if(size_table[index1] - val > 0) \
1985 val = get_bits(gb, size_table[index1] - val); \
1987 sign = 0 - (val&1); \
1988 _dmv_y = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1991 /** Predict and set motion vector
1993 static inline void vc1_pred_mv(MpegEncContext *s, int n, int dmv_x, int dmv_y, int mv1, int r_x, int r_y, uint8_t* is_intra)
1995 int xy, wrap, off = 0;
2000 /* scale MV difference to be quad-pel */
2001 dmv_x <<= 1 - s->quarter_sample;
2002 dmv_y <<= 1 - s->quarter_sample;
2004 wrap = s->b8_stride;
2005 xy = s->block_index[n];
2008 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
2009 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
2010 s->current_picture.motion_val[1][xy][0] = 0;
2011 s->current_picture.motion_val[1][xy][1] = 0;
2012 if(mv1) { /* duplicate motion data for 1-MV block */
2013 s->current_picture.motion_val[0][xy + 1][0] = 0;
2014 s->current_picture.motion_val[0][xy + 1][1] = 0;
2015 s->current_picture.motion_val[0][xy + wrap][0] = 0;
2016 s->current_picture.motion_val[0][xy + wrap][1] = 0;
2017 s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
2018 s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
2019 s->current_picture.motion_val[1][xy + 1][0] = 0;
2020 s->current_picture.motion_val[1][xy + 1][1] = 0;
2021 s->current_picture.motion_val[1][xy + wrap][0] = 0;
2022 s->current_picture.motion_val[1][xy + wrap][1] = 0;
2023 s->current_picture.motion_val[1][xy + wrap + 1][0] = 0;
2024 s->current_picture.motion_val[1][xy + wrap + 1][1] = 0;
2029 C = s->current_picture.motion_val[0][xy - 1];
2030 A = s->current_picture.motion_val[0][xy - wrap];
2032 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
2034 //in 4-MV mode different blocks have different B predictor position
2037 off = (s->mb_x > 0) ? -1 : 1;
2040 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
2049 B = s->current_picture.motion_val[0][xy - wrap + off];
2051 if(!s->first_slice_line || (n==2 || n==3)) { // predictor A is not out of bounds
2052 if(s->mb_width == 1) {
2056 px = mid_pred(A[0], B[0], C[0]);
2057 py = mid_pred(A[1], B[1], C[1]);
2059 } else if(s->mb_x || (n==1 || n==3)) { // predictor C is not out of bounds
2065 /* Pullback MV as specified in 8.3.5.3.4 */
2068 qx = (s->mb_x << 6) + ((n==1 || n==3) ? 32 : 0);
2069 qy = (s->mb_y << 6) + ((n==2 || n==3) ? 32 : 0);
2070 X = (s->mb_width << 6) - 4;
2071 Y = (s->mb_height << 6) - 4;
2073 if(qx + px < -60) px = -60 - qx;
2074 if(qy + py < -60) py = -60 - qy;
2076 if(qx + px < -28) px = -28 - qx;
2077 if(qy + py < -28) py = -28 - qy;
2079 if(qx + px > X) px = X - qx;
2080 if(qy + py > Y) py = Y - qy;
2082 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2083 if((!s->first_slice_line || (n==2 || n==3)) && (s->mb_x || (n==1 || n==3))) {
2084 if(is_intra[xy - wrap])
2085 sum = FFABS(px) + FFABS(py);
2087 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2089 if(get_bits1(&s->gb)) {
2097 if(is_intra[xy - 1])
2098 sum = FFABS(px) + FFABS(py);
2100 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2102 if(get_bits1(&s->gb)) {
2112 /* store MV using signed modulus of MV range defined in 4.11 */
2113 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
2114 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
2115 if(mv1) { /* duplicate motion data for 1-MV block */
2116 s->current_picture.motion_val[0][xy + 1][0] = s->current_picture.motion_val[0][xy][0];
2117 s->current_picture.motion_val[0][xy + 1][1] = s->current_picture.motion_val[0][xy][1];
2118 s->current_picture.motion_val[0][xy + wrap][0] = s->current_picture.motion_val[0][xy][0];
2119 s->current_picture.motion_val[0][xy + wrap][1] = s->current_picture.motion_val[0][xy][1];
2120 s->current_picture.motion_val[0][xy + wrap + 1][0] = s->current_picture.motion_val[0][xy][0];
2121 s->current_picture.motion_val[0][xy + wrap + 1][1] = s->current_picture.motion_val[0][xy][1];
2125 /** Motion compensation for direct or interpolated blocks in B-frames
2127 static void vc1_interp_mc(VC1Context *v)
2129 MpegEncContext *s = &v->s;
2130 DSPContext *dsp = &v->s.dsp;
2131 uint8_t *srcY, *srcU, *srcV;
2132 int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
2134 if(!v->s.next_picture.data[0])return;
2136 mx = s->mv[1][0][0];
2137 my = s->mv[1][0][1];
2138 uvmx = (mx + ((mx & 3) == 3)) >> 1;
2139 uvmy = (my + ((my & 3) == 3)) >> 1;
2141 uvmx = uvmx + ((uvmx<0)?-(uvmx&1):(uvmx&1));
2142 uvmy = uvmy + ((uvmy<0)?-(uvmy&1):(uvmy&1));
2144 srcY = s->next_picture.data[0];
2145 srcU = s->next_picture.data[1];
2146 srcV = s->next_picture.data[2];
2148 src_x = s->mb_x * 16 + (mx >> 2);
2149 src_y = s->mb_y * 16 + (my >> 2);
2150 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
2151 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
2153 if(v->profile != PROFILE_ADVANCED){
2154 src_x = av_clip( src_x, -16, s->mb_width * 16);
2155 src_y = av_clip( src_y, -16, s->mb_height * 16);
2156 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
2157 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
2159 src_x = av_clip( src_x, -17, s->avctx->coded_width);
2160 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
2161 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
2162 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
2165 srcY += src_y * s->linesize + src_x;
2166 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
2167 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
2169 /* for grayscale we should not try to read from unknown area */
2170 if(s->flags & CODEC_FLAG_GRAY) {
2171 srcU = s->edge_emu_buffer + 18 * s->linesize;
2172 srcV = s->edge_emu_buffer + 18 * s->linesize;
2176 || (unsigned)src_x > s->h_edge_pos - (mx&3) - 16
2177 || (unsigned)src_y > s->v_edge_pos - (my&3) - 16){
2178 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
2180 srcY -= s->mspel * (1 + s->linesize);
2181 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
2182 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
2183 srcY = s->edge_emu_buffer;
2184 ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
2185 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
2186 ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
2187 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
2190 /* if we deal with range reduction we need to scale source blocks */
2191 if(v->rangeredfrm) {
2193 uint8_t *src, *src2;
2196 for(j = 0; j < 17 + s->mspel*2; j++) {
2197 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
2200 src = srcU; src2 = srcV;
2201 for(j = 0; j < 9; j++) {
2202 for(i = 0; i < 9; i++) {
2203 src[i] = ((src[i] - 128) >> 1) + 128;
2204 src2[i] = ((src2[i] - 128) >> 1) + 128;
2206 src += s->uvlinesize;
2207 src2 += s->uvlinesize;
2210 srcY += s->mspel * (1 + s->linesize);
2215 dxy = ((my & 1) << 1) | (mx & 1);
2217 dsp->avg_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
2219 if(s->flags & CODEC_FLAG_GRAY) return;
2220 /* Chroma MC always uses qpel blilinear */
2221 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
2224 dsp->avg_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
2225 dsp->avg_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
2228 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2232 #if B_FRACTION_DEN==256
2236 return 2 * ((value * n + 255) >> 9);
2237 return (value * n + 128) >> 8;
2240 n -= B_FRACTION_DEN;
2242 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2243 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2247 /** Reconstruct motion vector for B-frame and do motion compensation
2249 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mode)
2252 v->mv_mode2 = v->mv_mode;
2253 v->mv_mode = MV_PMODE_INTENSITY_COMP;
2258 if(v->use_ic) v->mv_mode = v->mv_mode2;
2261 if(mode == BMV_TYPE_INTERPOLATED) {
2264 if(v->use_ic) v->mv_mode = v->mv_mode2;
2268 if(v->use_ic && (mode == BMV_TYPE_BACKWARD)) v->mv_mode = v->mv_mode2;
2269 vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2270 if(v->use_ic) v->mv_mode = v->mv_mode2;
2273 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mvtype)
2275 MpegEncContext *s = &v->s;
2276 int xy, wrap, off = 0;
2281 const uint8_t *is_intra = v->mb_type[0];
2285 /* scale MV difference to be quad-pel */
2286 dmv_x[0] <<= 1 - s->quarter_sample;
2287 dmv_y[0] <<= 1 - s->quarter_sample;
2288 dmv_x[1] <<= 1 - s->quarter_sample;
2289 dmv_y[1] <<= 1 - s->quarter_sample;
2291 wrap = s->b8_stride;
2292 xy = s->block_index[0];
2295 s->current_picture.motion_val[0][xy][0] =
2296 s->current_picture.motion_val[0][xy][1] =
2297 s->current_picture.motion_val[1][xy][0] =
2298 s->current_picture.motion_val[1][xy][1] = 0;
2301 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2302 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2303 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2304 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2306 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2307 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2308 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2309 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2313 if((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2314 C = s->current_picture.motion_val[0][xy - 2];
2315 A = s->current_picture.motion_val[0][xy - wrap*2];
2316 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2317 B = s->current_picture.motion_val[0][xy - wrap*2 + off];
2319 if(!s->first_slice_line) { // predictor A is not out of bounds
2320 if(s->mb_width == 1) {
2324 px = mid_pred(A[0], B[0], C[0]);
2325 py = mid_pred(A[1], B[1], C[1]);
2327 } else if(s->mb_x) { // predictor C is not out of bounds
2333 /* Pullback MV as specified in 8.3.5.3.4 */
2336 if(v->profile < PROFILE_ADVANCED) {
2337 qx = (s->mb_x << 5);
2338 qy = (s->mb_y << 5);
2339 X = (s->mb_width << 5) - 4;
2340 Y = (s->mb_height << 5) - 4;
2341 if(qx + px < -28) px = -28 - qx;
2342 if(qy + py < -28) py = -28 - qy;
2343 if(qx + px > X) px = X - qx;
2344 if(qy + py > Y) py = Y - qy;
2346 qx = (s->mb_x << 6);
2347 qy = (s->mb_y << 6);
2348 X = (s->mb_width << 6) - 4;
2349 Y = (s->mb_height << 6) - 4;
2350 if(qx + px < -60) px = -60 - qx;
2351 if(qy + py < -60) py = -60 - qy;
2352 if(qx + px > X) px = X - qx;
2353 if(qy + py > Y) py = Y - qy;
2356 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2357 if(0 && !s->first_slice_line && s->mb_x) {
2358 if(is_intra[xy - wrap])
2359 sum = FFABS(px) + FFABS(py);
2361 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2363 if(get_bits1(&s->gb)) {
2371 if(is_intra[xy - 2])
2372 sum = FFABS(px) + FFABS(py);
2374 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2376 if(get_bits1(&s->gb)) {
2386 /* store MV using signed modulus of MV range defined in 4.11 */
2387 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2388 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2390 if((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2391 C = s->current_picture.motion_val[1][xy - 2];
2392 A = s->current_picture.motion_val[1][xy - wrap*2];
2393 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2394 B = s->current_picture.motion_val[1][xy - wrap*2 + off];
2396 if(!s->first_slice_line) { // predictor A is not out of bounds
2397 if(s->mb_width == 1) {
2401 px = mid_pred(A[0], B[0], C[0]);
2402 py = mid_pred(A[1], B[1], C[1]);
2404 } else if(s->mb_x) { // predictor C is not out of bounds
2410 /* Pullback MV as specified in 8.3.5.3.4 */
2413 if(v->profile < PROFILE_ADVANCED) {
2414 qx = (s->mb_x << 5);
2415 qy = (s->mb_y << 5);
2416 X = (s->mb_width << 5) - 4;
2417 Y = (s->mb_height << 5) - 4;
2418 if(qx + px < -28) px = -28 - qx;
2419 if(qy + py < -28) py = -28 - qy;
2420 if(qx + px > X) px = X - qx;
2421 if(qy + py > Y) py = Y - qy;
2423 qx = (s->mb_x << 6);
2424 qy = (s->mb_y << 6);
2425 X = (s->mb_width << 6) - 4;
2426 Y = (s->mb_height << 6) - 4;
2427 if(qx + px < -60) px = -60 - qx;
2428 if(qy + py < -60) py = -60 - qy;
2429 if(qx + px > X) px = X - qx;
2430 if(qy + py > Y) py = Y - qy;
2433 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2434 if(0 && !s->first_slice_line && s->mb_x) {
2435 if(is_intra[xy - wrap])
2436 sum = FFABS(px) + FFABS(py);
2438 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2440 if(get_bits1(&s->gb)) {
2448 if(is_intra[xy - 2])
2449 sum = FFABS(px) + FFABS(py);
2451 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2453 if(get_bits1(&s->gb)) {
2463 /* store MV using signed modulus of MV range defined in 4.11 */
2465 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2466 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2468 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2469 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2470 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2471 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2474 /** Get predicted DC value for I-frames only
2475 * prediction dir: left=0, top=1
2476 * @param s MpegEncContext
2477 * @param[in] n block index in the current MB
2478 * @param dc_val_ptr Pointer to DC predictor
2479 * @param dir_ptr Prediction direction for use in AC prediction
2481 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2482 int16_t **dc_val_ptr, int *dir_ptr)
2484 int a, b, c, wrap, pred, scale;
2486 static const uint16_t dcpred[32] = {
2487 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2488 114, 102, 93, 85, 79, 73, 68, 64,
2489 60, 57, 54, 51, 49, 47, 45, 43,
2490 41, 39, 38, 37, 35, 34, 33
2493 /* find prediction - wmv3_dc_scale always used here in fact */
2494 if (n < 4) scale = s->y_dc_scale;
2495 else scale = s->c_dc_scale;
2497 wrap = s->block_wrap[n];
2498 dc_val= s->dc_val[0] + s->block_index[n];
2504 b = dc_val[ - 1 - wrap];
2505 a = dc_val[ - wrap];
2507 if (pq < 9 || !overlap)
2509 /* Set outer values */
2510 if (s->first_slice_line && (n!=2 && n!=3)) b=a=dcpred[scale];
2511 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=dcpred[scale];
2515 /* Set outer values */
2516 if (s->first_slice_line && (n!=2 && n!=3)) b=a=0;
2517 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=0;
2520 if (abs(a - b) <= abs(b - c)) {
2528 /* update predictor */
2529 *dc_val_ptr = &dc_val[0];
2534 /** Get predicted DC value
2535 * prediction dir: left=0, top=1
2536 * @param s MpegEncContext
2537 * @param[in] n block index in the current MB
2538 * @param dc_val_ptr Pointer to DC predictor
2539 * @param dir_ptr Prediction direction for use in AC prediction
2541 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2542 int a_avail, int c_avail,
2543 int16_t **dc_val_ptr, int *dir_ptr)
2545 int a, b, c, wrap, pred, scale;
2547 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2550 /* find prediction - wmv3_dc_scale always used here in fact */
2551 if (n < 4) scale = s->y_dc_scale;
2552 else scale = s->c_dc_scale;
2554 wrap = s->block_wrap[n];
2555 dc_val= s->dc_val[0] + s->block_index[n];
2561 b = dc_val[ - 1 - wrap];
2562 a = dc_val[ - wrap];
2563 /* scale predictors if needed */
2564 q1 = s->current_picture.qscale_table[mb_pos];
2565 if(c_avail && (n!= 1 && n!=3)) {
2566 q2 = s->current_picture.qscale_table[mb_pos - 1];
2568 c = (c * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2570 if(a_avail && (n!= 2 && n!=3)) {
2571 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2573 a = (a * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2575 if(a_avail && c_avail && (n!=3)) {
2578 if(n != 2) off -= s->mb_stride;
2579 q2 = s->current_picture.qscale_table[off];
2581 b = (b * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2584 if(a_avail && c_avail) {
2585 if(abs(a - b) <= abs(b - c)) {
2592 } else if(a_avail) {
2595 } else if(c_avail) {
2603 /* update predictor */
2604 *dc_val_ptr = &dc_val[0];
2610 * @defgroup std_mb VC1 Macroblock-level functions in Simple/Main Profiles
2611 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2615 static inline int vc1_coded_block_pred(MpegEncContext * s, int n, uint8_t **coded_block_ptr)
2617 int xy, wrap, pred, a, b, c;
2619 xy = s->block_index[n];
2620 wrap = s->b8_stride;
2625 a = s->coded_block[xy - 1 ];
2626 b = s->coded_block[xy - 1 - wrap];
2627 c = s->coded_block[xy - wrap];
2636 *coded_block_ptr = &s->coded_block[xy];
2642 * Decode one AC coefficient
2643 * @param v The VC1 context
2644 * @param last Last coefficient
2645 * @param skip How much zero coefficients to skip
2646 * @param value Decoded AC coefficient value
2649 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip, int *value, int codingset)
2651 GetBitContext *gb = &v->s.gb;
2652 int index, escape, run = 0, level = 0, lst = 0;
2654 index = get_vlc2(gb, vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2655 if (index != vc1_ac_sizes[codingset] - 1) {
2656 run = vc1_index_decode_table[codingset][index][0];
2657 level = vc1_index_decode_table[codingset][index][1];
2658 lst = index >= vc1_last_decode_table[codingset];
2662 escape = decode210(gb);
2664 index = get_vlc2(gb, vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2665 run = vc1_index_decode_table[codingset][index][0];
2666 level = vc1_index_decode_table[codingset][index][1];
2667 lst = index >= vc1_last_decode_table[codingset];
2670 level += vc1_last_delta_level_table[codingset][run];
2672 level += vc1_delta_level_table[codingset][run];
2675 run += vc1_last_delta_run_table[codingset][level] + 1;
2677 run += vc1_delta_run_table[codingset][level] + 1;
2683 lst = get_bits(gb, 1);
2684 if(v->s.esc3_level_length == 0) {
2685 if(v->pq < 8 || v->dquantfrm) { // table 59
2686 v->s.esc3_level_length = get_bits(gb, 3);
2687 if(!v->s.esc3_level_length)
2688 v->s.esc3_level_length = get_bits(gb, 2) + 8;
2690 v->s.esc3_level_length = get_prefix(gb, 1, 6) + 2;
2692 v->s.esc3_run_length = 3 + get_bits(gb, 2);
2694 run = get_bits(gb, v->s.esc3_run_length);
2695 sign = get_bits(gb, 1);
2696 level = get_bits(gb, v->s.esc3_level_length);
2707 /** Decode intra block in intra frames - should be faster than decode_intra_block
2708 * @param v VC1Context
2709 * @param block block to decode
2710 * @param coded are AC coeffs present or not
2711 * @param codingset set of VLC to decode data
2713 static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset)
2715 GetBitContext *gb = &v->s.gb;
2716 MpegEncContext *s = &v->s;
2717 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2720 int16_t *ac_val, *ac_val2;
2723 /* Get DC differential */
2725 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2727 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2730 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2735 if (dcdiff == 119 /* ESC index value */)
2737 /* TODO: Optimize */
2738 if (v->pq == 1) dcdiff = get_bits(gb, 10);
2739 else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2740 else dcdiff = get_bits(gb, 8);
2745 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2746 else if (v->pq == 2)
2747 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
2749 if (get_bits(gb, 1))
2754 dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2757 /* Store the quantized DC coeff, used for prediction */
2759 block[0] = dcdiff * s->y_dc_scale;
2761 block[0] = dcdiff * s->c_dc_scale;
2774 int last = 0, skip, value;
2775 const int8_t *zz_table;
2779 scale = v->pq * 2 + v->halfpq;
2783 zz_table = vc1_horizontal_zz;
2785 zz_table = vc1_vertical_zz;
2787 zz_table = vc1_normal_zz;
2789 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2791 if(dc_pred_dir) //left
2794 ac_val -= 16 * s->block_wrap[n];
2797 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2801 block[zz_table[i++]] = value;
2804 /* apply AC prediction if needed */
2806 if(dc_pred_dir) { //left
2807 for(k = 1; k < 8; k++)
2808 block[k << 3] += ac_val[k];
2810 for(k = 1; k < 8; k++)
2811 block[k] += ac_val[k + 8];
2814 /* save AC coeffs for further prediction */
2815 for(k = 1; k < 8; k++) {
2816 ac_val2[k] = block[k << 3];
2817 ac_val2[k + 8] = block[k];
2820 /* scale AC coeffs */
2821 for(k = 1; k < 64; k++)
2825 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2828 if(s->ac_pred) i = 63;
2834 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2837 scale = v->pq * 2 + v->halfpq;
2838 memset(ac_val2, 0, 16 * 2);
2839 if(dc_pred_dir) {//left
2842 memcpy(ac_val2, ac_val, 8 * 2);
2844 ac_val -= 16 * s->block_wrap[n];
2846 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2849 /* apply AC prediction if needed */
2851 if(dc_pred_dir) { //left
2852 for(k = 1; k < 8; k++) {
2853 block[k << 3] = ac_val[k] * scale;
2854 if(!v->pquantizer && block[k << 3])
2855 block[k << 3] += (block[k << 3] < 0) ? -v->pq : v->pq;
2858 for(k = 1; k < 8; k++) {
2859 block[k] = ac_val[k + 8] * scale;
2860 if(!v->pquantizer && block[k])
2861 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2867 s->block_last_index[n] = i;
2872 /** Decode intra block in intra frames - should be faster than decode_intra_block
2873 * @param v VC1Context
2874 * @param block block to decode
2875 * @param coded are AC coeffs present or not
2876 * @param codingset set of VLC to decode data
2878 static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset, int mquant)
2880 GetBitContext *gb = &v->s.gb;
2881 MpegEncContext *s = &v->s;
2882 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2885 int16_t *ac_val, *ac_val2;
2887 int a_avail = v->a_avail, c_avail = v->c_avail;
2888 int use_pred = s->ac_pred;
2891 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2893 /* Get DC differential */
2895 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2897 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2900 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2905 if (dcdiff == 119 /* ESC index value */)
2907 /* TODO: Optimize */
2908 if (mquant == 1) dcdiff = get_bits(gb, 10);
2909 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2910 else dcdiff = get_bits(gb, 8);
2915 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2916 else if (mquant == 2)
2917 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
2919 if (get_bits(gb, 1))
2924 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2927 /* Store the quantized DC coeff, used for prediction */
2929 block[0] = dcdiff * s->y_dc_scale;
2931 block[0] = dcdiff * s->c_dc_scale;
2940 /* check if AC is needed at all and adjust direction if needed */
2941 if(!a_avail) dc_pred_dir = 1;
2942 if(!c_avail) dc_pred_dir = 0;
2943 if(!a_avail && !c_avail) use_pred = 0;
2944 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2947 scale = mquant * 2 + v->halfpq;
2949 if(dc_pred_dir) //left
2952 ac_val -= 16 * s->block_wrap[n];
2954 q1 = s->current_picture.qscale_table[mb_pos];
2955 if(dc_pred_dir && c_avail && mb_pos) q2 = s->current_picture.qscale_table[mb_pos - 1];
2956 if(!dc_pred_dir && a_avail && mb_pos >= s->mb_stride) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2957 if(dc_pred_dir && n==1) q2 = q1;
2958 if(!dc_pred_dir && n==2) q2 = q1;
2962 int last = 0, skip, value;
2963 const int8_t *zz_table;
2968 zz_table = vc1_horizontal_zz;
2970 zz_table = vc1_vertical_zz;
2972 zz_table = vc1_normal_zz;
2975 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2979 block[zz_table[i++]] = value;
2982 /* apply AC prediction if needed */
2984 /* scale predictors if needed*/
2986 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2987 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2989 if(dc_pred_dir) { //left
2990 for(k = 1; k < 8; k++)
2991 block[k << 3] += (ac_val[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2993 for(k = 1; k < 8; k++)
2994 block[k] += (ac_val[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2997 if(dc_pred_dir) { //left
2998 for(k = 1; k < 8; k++)
2999 block[k << 3] += ac_val[k];
3001 for(k = 1; k < 8; k++)
3002 block[k] += ac_val[k + 8];
3006 /* save AC coeffs for further prediction */
3007 for(k = 1; k < 8; k++) {
3008 ac_val2[k] = block[k << 3];
3009 ac_val2[k + 8] = block[k];
3012 /* scale AC coeffs */
3013 for(k = 1; k < 64; k++)
3017 block[k] += (block[k] < 0) ? -mquant : mquant;
3020 if(use_pred) i = 63;
3021 } else { // no AC coeffs
3024 memset(ac_val2, 0, 16 * 2);
3025 if(dc_pred_dir) {//left
3027 memcpy(ac_val2, ac_val, 8 * 2);
3029 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3030 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3031 for(k = 1; k < 8; k++)
3032 ac_val2[k] = (ac_val2[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3037 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3039 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3040 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3041 for(k = 1; k < 8; k++)
3042 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3047 /* apply AC prediction if needed */
3049 if(dc_pred_dir) { //left
3050 for(k = 1; k < 8; k++) {
3051 block[k << 3] = ac_val2[k] * scale;
3052 if(!v->pquantizer && block[k << 3])
3053 block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant;
3056 for(k = 1; k < 8; k++) {
3057 block[k] = ac_val2[k + 8] * scale;
3058 if(!v->pquantizer && block[k])
3059 block[k] += (block[k] < 0) ? -mquant : mquant;
3065 s->block_last_index[n] = i;
3070 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
3071 * @param v VC1Context
3072 * @param block block to decode
3073 * @param coded are AC coeffs present or not
3074 * @param mquant block quantizer
3075 * @param codingset set of VLC to decode data
3077 static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int coded, int mquant, int codingset)
3079 GetBitContext *gb = &v->s.gb;
3080 MpegEncContext *s = &v->s;
3081 int dc_pred_dir = 0; /* Direction of the DC prediction used */
3084 int16_t *ac_val, *ac_val2;
3086 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3087 int a_avail = v->a_avail, c_avail = v->c_avail;
3088 int use_pred = s->ac_pred;
3092 /* XXX: Guard against dumb values of mquant */
3093 mquant = (mquant < 1) ? 0 : ( (mquant>31) ? 31 : mquant );
3095 /* Set DC scale - y and c use the same */
3096 s->y_dc_scale = s->y_dc_scale_table[mquant];
3097 s->c_dc_scale = s->c_dc_scale_table[mquant];
3099 /* Get DC differential */
3101 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3103 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3106 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
3111 if (dcdiff == 119 /* ESC index value */)
3113 /* TODO: Optimize */
3114 if (mquant == 1) dcdiff = get_bits(gb, 10);
3115 else if (mquant == 2) dcdiff = get_bits(gb, 9);
3116 else dcdiff = get_bits(gb, 8);
3121 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
3122 else if (mquant == 2)
3123 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
3125 if (get_bits(gb, 1))
3130 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
3133 /* Store the quantized DC coeff, used for prediction */
3136 block[0] = dcdiff * s->y_dc_scale;
3138 block[0] = dcdiff * s->c_dc_scale;
3147 /* check if AC is needed at all and adjust direction if needed */
3148 if(!a_avail) dc_pred_dir = 1;
3149 if(!c_avail) dc_pred_dir = 0;
3150 if(!a_avail && !c_avail) use_pred = 0;
3151 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3154 scale = mquant * 2 + v->halfpq;
3156 if(dc_pred_dir) //left
3159 ac_val -= 16 * s->block_wrap[n];
3161 q1 = s->current_picture.qscale_table[mb_pos];
3162 if(dc_pred_dir && c_avail && mb_pos) q2 = s->current_picture.qscale_table[mb_pos - 1];
3163 if(!dc_pred_dir && a_avail && mb_pos >= s->mb_stride) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
3164 if(dc_pred_dir && n==1) q2 = q1;
3165 if(!dc_pred_dir && n==2) q2 = q1;
3169 int last = 0, skip, value;
3170 const int8_t *zz_table;
3173 zz_table = vc1_simple_progressive_8x8_zz;
3176 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3180 block[zz_table[i++]] = value;
3183 /* apply AC prediction if needed */
3185 /* scale predictors if needed*/
3187 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3188 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3190 if(dc_pred_dir) { //left
3191 for(k = 1; k < 8; k++)
3192 block[k << 3] += (ac_val[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3194 for(k = 1; k < 8; k++)
3195 block[k] += (ac_val[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3198 if(dc_pred_dir) { //left
3199 for(k = 1; k < 8; k++)
3200 block[k << 3] += ac_val[k];
3202 for(k = 1; k < 8; k++)
3203 block[k] += ac_val[k + 8];
3207 /* save AC coeffs for further prediction */
3208 for(k = 1; k < 8; k++) {
3209 ac_val2[k] = block[k << 3];
3210 ac_val2[k + 8] = block[k];
3213 /* scale AC coeffs */
3214 for(k = 1; k < 64; k++)
3218 block[k] += (block[k] < 0) ? -mquant : mquant;
3221 if(use_pred) i = 63;
3222 } else { // no AC coeffs
3225 memset(ac_val2, 0, 16 * 2);
3226 if(dc_pred_dir) {//left
3228 memcpy(ac_val2, ac_val, 8 * 2);
3230 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3231 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3232 for(k = 1; k < 8; k++)
3233 ac_val2[k] = (ac_val2[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3238 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3240 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3241 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3242 for(k = 1; k < 8; k++)
3243 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3248 /* apply AC prediction if needed */
3250 if(dc_pred_dir) { //left
3251 for(k = 1; k < 8; k++) {
3252 block[k << 3] = ac_val2[k] * scale;
3253 if(!v->pquantizer && block[k << 3])
3254 block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant;
3257 for(k = 1; k < 8; k++) {
3258 block[k] = ac_val2[k + 8] * scale;
3259 if(!v->pquantizer && block[k])
3260 block[k] += (block[k] < 0) ? -mquant : mquant;
3266 s->block_last_index[n] = i;
3273 static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n, int mquant, int ttmb, int first_block)
3275 MpegEncContext *s = &v->s;
3276 GetBitContext *gb = &s->gb;
3279 int scale, off, idx, last, skip, value;
3280 int ttblk = ttmb & 7;
3283 ttblk = ttblk_to_tt[v->tt_index][get_vlc2(gb, vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
3285 if(ttblk == TT_4X4) {
3286 subblkpat = ~(get_vlc2(gb, vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3288 if((ttblk != TT_8X8 && ttblk != TT_4X4) && (v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))) {
3289 subblkpat = decode012(gb);
3290 if(subblkpat) subblkpat ^= 3; //swap decoded pattern bits
3291 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) ttblk = TT_8X4;
3292 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) ttblk = TT_4X8;
3294 scale = 2 * mquant + v->halfpq;
3296 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3297 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3298 subblkpat = 2 - (ttblk == TT_8X4_TOP);
3301 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3302 subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3310 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3314 idx = vc1_simple_progressive_8x8_zz[i++];
3315 block[idx] = value * scale;
3317 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3319 s->dsp.vc1_inv_trans_8x8(block);
3322 for(j = 0; j < 4; j++) {
3323 last = subblkpat & (1 << (3 - j));
3325 off = (j & 1) * 4 + (j & 2) * 16;
3327 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3331 idx = vc1_simple_progressive_4x4_zz[i++];
3332 block[idx + off] = value * scale;
3334 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3336 if(!(subblkpat & (1 << (3 - j))))
3337 s->dsp.vc1_inv_trans_4x4(block, j);
3341 for(j = 0; j < 2; j++) {
3342 last = subblkpat & (1 << (1 - j));
3346 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3350 if(v->profile < PROFILE_ADVANCED)
3351 idx = vc1_simple_progressive_8x4_zz[i++];
3353 idx = vc1_adv_progressive_8x4_zz[i++];
3354 block[idx + off] = value * scale;
3356 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3358 if(!(subblkpat & (1 << (1 - j))))
3359 s->dsp.vc1_inv_trans_8x4(block, j);
3363 for(j = 0; j < 2; j++) {
3364 last = subblkpat & (1 << (1 - j));
3368 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3372 if(v->profile < PROFILE_ADVANCED)
3373 idx = vc1_simple_progressive_4x8_zz[i++];
3375 idx = vc1_adv_progressive_4x8_zz[i++];
3376 block[idx + off] = value * scale;
3378 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3380 if(!(subblkpat & (1 << (1 - j))))
3381 s->dsp.vc1_inv_trans_4x8(block, j);
3389 /** Decode one P-frame MB (in Simple/Main profile)
3391 static int vc1_decode_p_mb(VC1Context *v)
3393 MpegEncContext *s = &v->s;
3394 GetBitContext *gb = &s->gb;
3396 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3397 int cbp; /* cbp decoding stuff */
3398 int mqdiff, mquant; /* MB quantization */
3399 int ttmb = v->ttfrm; /* MB Transform type */
3402 static const int size_table[6] = { 0, 2, 3, 4, 5, 8 },
3403 offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3404 int mb_has_coeffs = 1; /* last_flag */
3405 int dmv_x, dmv_y; /* Differential MV components */
3406 int index, index1; /* LUT indices */
3407 int val, sign; /* temp values */
3408 int first_block = 1;
3410 int skipped, fourmv;
3412 mquant = v->pq; /* Loosy initialization */
3414 if (v->mv_type_is_raw)
3415 fourmv = get_bits1(gb);
3417 fourmv = v->mv_type_mb_plane[mb_pos];
3419 skipped = get_bits1(gb);
3421 skipped = v->s.mbskip_table[mb_pos];
3423 s->dsp.clear_blocks(s->block[0]);
3425 if (!fourmv) /* 1MV mode */
3429 GET_MVDATA(dmv_x, dmv_y);
3432 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3433 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3435 s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
3436 vc1_pred_mv(s, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
3438 /* FIXME Set DC val for inter block ? */
3439 if (s->mb_intra && !mb_has_coeffs)
3442 s->ac_pred = get_bits(gb, 1);
3445 else if (mb_has_coeffs)
3447 if (s->mb_intra) s->ac_pred = get_bits(gb, 1);
3448 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3456 s->current_picture.qscale_table[mb_pos] = mquant;
3458 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3459 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table,
3460 VC1_TTMB_VLC_BITS, 2);
3461 if(!s->mb_intra) vc1_mc_1mv(v, 0);
3465 s->dc_val[0][s->block_index[i]] = 0;
3467 val = ((cbp >> (5 - i)) & 1);
3468 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3469 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3471 /* check if prediction blocks A and C are available */
3472 v->a_avail = v->c_avail = 0;
3473 if(i == 2 || i == 3 || !s->first_slice_line)
3474 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3475 if(i == 1 || i == 3 || s->mb_x)
3476 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3478 vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
3479 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3480 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3481 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3482 for(j = 0; j < 64; j++) s->block[i][j] += 128;
3483 if(!v->res_fasttx && v->res_x8) for(j = 0; j < 64; j++) s->block[i][j] += 16;
3484 s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3485 if(v->pq >= 9 && v->overlap) {
3487 s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3489 s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3492 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
3493 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3495 if((i<4) || !(s->flags & CODEC_FLAG_GRAY))
3496 s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3503 for(i = 0; i < 6; i++) {
3504 v->mb_type[0][s->block_index[i]] = 0;
3505 s->dc_val[0][s->block_index[i]] = 0;
3507 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3508 s->current_picture.qscale_table[mb_pos] = 0;
3509 vc1_pred_mv(s, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
3516 if (!skipped /* unskipped MB */)
3518 int intra_count = 0, coded_inter = 0;
3519 int is_intra[6], is_coded[6];
3521 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3524 val = ((cbp >> (5 - i)) & 1);
3525 s->dc_val[0][s->block_index[i]] = 0;
3532 GET_MVDATA(dmv_x, dmv_y);
3534 vc1_pred_mv(s, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]);
3535 if(!s->mb_intra) vc1_mc_4mv_luma(v, i);
3536 intra_count += s->mb_intra;
3537 is_intra[i] = s->mb_intra;
3538 is_coded[i] = mb_has_coeffs;