2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2006 Konstantin Shishkov
4 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * VC-1 and WMV3 decoder
32 #include "mpegvideo.h"
34 #include "vc1acdata.h"
39 extern const uint32_t ff_table0_dc_lum[120][2], ff_table1_dc_lum[120][2];
40 extern const uint32_t ff_table0_dc_chroma[120][2], ff_table1_dc_chroma[120][2];
41 extern VLC ff_msmp4_dc_luma_vlc[2], ff_msmp4_dc_chroma_vlc[2];
42 #define MB_INTRA_VLC_BITS 9
43 extern VLC ff_msmp4_mb_i_vlc;
44 extern const uint16_t ff_msmp4_mb_i_table[64][2];
47 static const uint16_t table_mb_intra[64][2];
49 /** Markers used if VC-1 AP frame data */
52 VC1_CODE_RES0 = 0x00000100,
53 VC1_CODE_ENDOFSEQ = 0x0000010A,
62 /** Available Profiles */
67 PROFILE_COMPLEX, ///< TODO: WMV9 specific
72 /** Sequence quantizer mode */
75 QUANT_FRAME_IMPLICIT, ///< Implicitly specified at frame level
76 QUANT_FRAME_EXPLICIT, ///< Explicitly specified at frame level
77 QUANT_NON_UNIFORM, ///< Non-uniform quant used for all frames
78 QUANT_UNIFORM ///< Uniform quant used for all frames
82 /** Where quant can be changed */
86 DQPROFILE_DOUBLE_EDGES,
87 DQPROFILE_SINGLE_EDGE,
92 /** @name Where quant can be changed
103 /** Which pair of edges is quantized with ALTPQUANT */
106 DQDOUBLE_BEDGE_TOPLEFT,
107 DQDOUBLE_BEDGE_TOPRIGHT,
108 DQDOUBLE_BEDGE_BOTTOMRIGHT,
109 DQDOUBLE_BEDGE_BOTTOMLEFT
113 /** MV modes for P frames */
116 MV_PMODE_1MV_HPEL_BILIN,
120 MV_PMODE_INTENSITY_COMP
124 /** @name MV types for B frames */
129 BMV_TYPE_INTERPOLATED
133 /** @name Block types for P/B frames */
135 enum TransformTypes {
139 TT_8X4, //Both halves
142 TT_4X8, //Both halves
147 /** Table for conversion between TTBLK and TTMB */
148 static const int ttblk_to_tt[3][8] = {
149 { TT_8X4, TT_4X8, TT_8X8, TT_4X4, TT_8X4_TOP, TT_8X4_BOTTOM, TT_4X8_RIGHT, TT_4X8_LEFT },
150 { TT_8X8, TT_4X8_RIGHT, TT_4X8_LEFT, TT_4X4, TT_8X4, TT_4X8, TT_8X4_BOTTOM, TT_8X4_TOP },
151 { TT_8X8, TT_4X8, TT_4X4, TT_8X4_BOTTOM, TT_4X8_RIGHT, TT_4X8_LEFT, TT_8X4, TT_8X4_TOP }
154 static const int ttfrm_to_tt[4] = { TT_8X8, TT_8X4, TT_4X8, TT_4X4 };
156 /** MV P mode - the 5th element is only used for mode 1 */
157 static const uint8_t mv_pmode_table[2][5] = {
158 { MV_PMODE_1MV_HPEL_BILIN, MV_PMODE_1MV, MV_PMODE_1MV_HPEL, MV_PMODE_INTENSITY_COMP, MV_PMODE_MIXED_MV },
159 { MV_PMODE_1MV, MV_PMODE_MIXED_MV, MV_PMODE_1MV_HPEL, MV_PMODE_INTENSITY_COMP, MV_PMODE_1MV_HPEL_BILIN }
161 static const uint8_t mv_pmode_table2[2][4] = {
162 { MV_PMODE_1MV_HPEL_BILIN, MV_PMODE_1MV, MV_PMODE_1MV_HPEL, MV_PMODE_MIXED_MV },
163 { MV_PMODE_1MV, MV_PMODE_MIXED_MV, MV_PMODE_1MV_HPEL, MV_PMODE_1MV_HPEL_BILIN }
166 /** One more frame type */
169 static const int fps_nr[5] = { 24, 25, 30, 50, 60 },
170 fps_dr[2] = { 1000, 1001 };
171 static const uint8_t pquant_table[3][32] = {
172 { /* Implicit quantizer */
173 0, 1, 2, 3, 4, 5, 6, 7, 8, 6, 7, 8, 9, 10, 11, 12,
174 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 29, 31
176 { /* Explicit quantizer, pquantizer uniform */
177 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
178 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
180 { /* Explicit quantizer, pquantizer non-uniform */
181 0, 1, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
182 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 29, 31
186 /** @name VC-1 VLC tables and defines
187 * @todo TODO move this into the context
190 #define VC1_BFRACTION_VLC_BITS 7
191 static VLC vc1_bfraction_vlc;
192 #define VC1_IMODE_VLC_BITS 4
193 static VLC vc1_imode_vlc;
194 #define VC1_NORM2_VLC_BITS 3
195 static VLC vc1_norm2_vlc;
196 #define VC1_NORM6_VLC_BITS 9
197 static VLC vc1_norm6_vlc;
198 /* Could be optimized, one table only needs 8 bits */
199 #define VC1_TTMB_VLC_BITS 9 //12
200 static VLC vc1_ttmb_vlc[3];
201 #define VC1_MV_DIFF_VLC_BITS 9 //15
202 static VLC vc1_mv_diff_vlc[4];
203 #define VC1_CBPCY_P_VLC_BITS 9 //14
204 static VLC vc1_cbpcy_p_vlc[4];
205 #define VC1_4MV_BLOCK_PATTERN_VLC_BITS 6
206 static VLC vc1_4mv_block_pattern_vlc[4];
207 #define VC1_TTBLK_VLC_BITS 5
208 static VLC vc1_ttblk_vlc[3];
209 #define VC1_SUBBLKPAT_VLC_BITS 6
210 static VLC vc1_subblkpat_vlc[3];
212 static VLC vc1_ac_coeff_table[8];
216 CS_HIGH_MOT_INTRA = 0,
226 /** @name Overlap conditions for Advanced Profile */
237 * @fixme Change size wherever another size is more efficient
238 * Many members are only used for Advanced Profile
240 typedef struct VC1Context{
245 /** Simple/Main Profile sequence header */
247 int res_sm; ///< reserved, 2b
248 int res_x8; ///< reserved
249 int multires; ///< frame-level RESPIC syntax element present
250 int res_fasttx; ///< reserved, always 1
251 int res_transtab; ///< reserved, always 0
252 int rangered; ///< RANGEREDFRM (range reduction) syntax element present
254 int res_rtm_flag; ///< reserved, set to 1
255 int reserved; ///< reserved
258 /** Advanced Profile */
260 int level; ///< 3bits, for Advanced/Simple Profile, provided by TS layer
261 int chromaformat; ///< 2bits, 2=4:2:0, only defined
262 int postprocflag; ///< Per-frame processing suggestion flag present
263 int broadcast; ///< TFF/RFF present
264 int interlace; ///< Progressive/interlaced (RPTFTM syntax element)
265 int tfcntrflag; ///< TFCNTR present
266 int panscanflag; ///< NUMPANSCANWIN, TOPLEFT{X,Y}, BOTRIGHT{X,Y} present
267 int extended_dmv; ///< Additional extended dmv range at P/B frame-level
268 int color_prim; ///< 8bits, chroma coordinates of the color primaries
269 int transfer_char; ///< 8bits, Opto-electronic transfer characteristics
270 int matrix_coef; ///< 8bits, Color primaries->YCbCr transform matrix
271 int hrd_param_flag; ///< Presence of Hypothetical Reference
272 ///< Decoder parameters
273 int psf; ///< Progressive Segmented Frame
276 /** Sequence header data for all Profiles
277 * TODO: choose between ints, uint8_ts and monobit flags
280 int profile; ///< 2bits, Profile
281 int frmrtq_postproc; ///< 3bits,
282 int bitrtq_postproc; ///< 5bits, quantized framerate-based postprocessing strength
283 int fastuvmc; ///< Rounding of qpel vector to hpel ? (not in Simple)
284 int extended_mv; ///< Ext MV in P/B (not in Simple)
285 int dquant; ///< How qscale varies with MBs, 2bits (not in Simple)
286 int vstransform; ///< variable-size [48]x[48] transform type + info
287 int overlap; ///< overlapped transforms in use
288 int quantizer_mode; ///< 2bits, quantizer mode used for sequence, see QUANT_*
289 int finterpflag; ///< INTERPFRM present
292 /** Frame decoding info for all profiles */
294 uint8_t mv_mode; ///< MV coding monde
295 uint8_t mv_mode2; ///< Secondary MV coding mode (B frames)
296 int k_x; ///< Number of bits for MVs (depends on MV range)
297 int k_y; ///< Number of bits for MVs (depends on MV range)
298 int range_x, range_y; ///< MV range
299 uint8_t pq, altpq; ///< Current/alternate frame quantizer scale
300 /** pquant parameters */
307 /** AC coding set indexes
308 * @see 8.1.1.10, p(1)10
311 int c_ac_table_index; ///< Chroma index from ACFRM element
312 int y_ac_table_index; ///< Luma index from AC2FRM element
314 int ttfrm; ///< Transform type info present at frame level
315 uint8_t ttmbf; ///< Transform type flag
316 uint8_t ttblk4x4; ///< Value of ttblk which indicates a 4x4 transform
317 int codingset; ///< index of current table set from 11.8 to use for luma block decoding
318 int codingset2; ///< index of current table set from 11.8 to use for chroma block decoding
319 int pqindex; ///< raw pqindex used in coding set selection
320 int a_avail, c_avail;
321 uint8_t *mb_type_base, *mb_type[3];
324 /** Luma compensation parameters */
329 int16_t bfraction; ///< Relative position % anchors=> how to scale MVs
330 uint8_t halfpq; ///< Uniform quant over image and qp+.5
331 uint8_t respic; ///< Frame-level flag for resized images
332 int buffer_fullness; ///< HRD info
334 * -# 0 -> [-64n 63.f] x [-32, 31.f]
335 * -# 1 -> [-128, 127.f] x [-64, 63.f]
336 * -# 2 -> [-512, 511.f] x [-128, 127.f]
337 * -# 3 -> [-1024, 1023.f] x [-256, 255.f]
340 uint8_t pquantizer; ///< Uniform (over sequence) quantizer in use
341 VLC *cbpcy_vlc; ///< CBPCY VLC table
342 int tt_index; ///< Index for Transform Type tables
343 uint8_t* mv_type_mb_plane; ///< bitplane for mv_type == (4MV)
344 uint8_t* direct_mb_plane; ///< bitplane for "direct" MBs
345 int mv_type_is_raw; ///< mv type mb plane is not coded
346 int dmb_is_raw; ///< direct mb plane is raw
347 int skip_is_raw; ///< skip mb plane is not coded
348 uint8_t luty[256], lutuv[256]; // lookup tables used for intensity compensation
349 int use_ic; ///< use intensity compensation in B-frames
350 int rnd; ///< rounding control
352 /** Frame decoding info for S/M profiles only */
354 uint8_t rangeredfrm; ///< out_sample = CLIP((in_sample-128)*2+128)
358 /** Frame decoding info for Advanced profile */
360 uint8_t fcm; ///< 0->Progressive, 2->Frame-Interlace, 3->Field-Interlace
361 uint8_t numpanscanwin;
363 uint8_t rptfrm, tff, rff;
366 uint16_t bottomrightx;
367 uint16_t bottomrighty;
370 int hrd_num_leaky_buckets;
371 uint8_t bit_rate_exponent;
372 uint8_t buffer_size_exponent;
373 uint8_t* acpred_plane; ///< AC prediction flags bitplane
375 uint8_t* over_flags_plane; ///< Overflags bitplane
378 uint16_t *hrd_rate, *hrd_buffer;
379 uint8_t *hrd_fullness;
380 uint8_t range_mapy_flag;
381 uint8_t range_mapuv_flag;
391 * Get unary code of limited length
392 * @fixme FIXME Slow and ugly
393 * @param gb GetBitContext
394 * @param[in] stop The bitstop value (unary code of 1's or 0's)
395 * @param[in] len Maximum length
396 * @return Unary length/index
398 static int get_prefix(GetBitContext *gb, int stop, int len)
403 for(i = 0; i < len && get_bits1(gb) != stop; i++);
405 /* int i = 0, tmp = !stop;
407 while (i != len && tmp != stop)
409 tmp = get_bits(gb, 1);
412 if (i == len && tmp != stop) return len+1;
419 UPDATE_CACHE(re, gb);
420 buf=GET_CACHE(re, gb); //Still not sure
421 if (stop) buf = ~buf;
423 log= av_log2(-buf); //FIXME: -?
425 LAST_SKIP_BITS(re, gb, log+1);
426 CLOSE_READER(re, gb);
430 LAST_SKIP_BITS(re, gb, limit);
431 CLOSE_READER(re, gb);
436 static inline int decode210(GetBitContext *gb){
442 return 2 - get_bits1(gb);
446 * Init VC-1 specific tables and VC1Context members
447 * @param v The VC1Context to initialize
450 static int vc1_init_common(VC1Context *v)
455 v->hrd_rate = v->hrd_buffer = NULL;
461 init_vlc(&vc1_bfraction_vlc, VC1_BFRACTION_VLC_BITS, 23,
462 vc1_bfraction_bits, 1, 1,
463 vc1_bfraction_codes, 1, 1, 1);
464 init_vlc(&vc1_norm2_vlc, VC1_NORM2_VLC_BITS, 4,
465 vc1_norm2_bits, 1, 1,
466 vc1_norm2_codes, 1, 1, 1);
467 init_vlc(&vc1_norm6_vlc, VC1_NORM6_VLC_BITS, 64,
468 vc1_norm6_bits, 1, 1,
469 vc1_norm6_codes, 2, 2, 1);
470 init_vlc(&vc1_imode_vlc, VC1_IMODE_VLC_BITS, 7,
471 vc1_imode_bits, 1, 1,
472 vc1_imode_codes, 1, 1, 1);
475 init_vlc(&vc1_ttmb_vlc[i], VC1_TTMB_VLC_BITS, 16,
476 vc1_ttmb_bits[i], 1, 1,
477 vc1_ttmb_codes[i], 2, 2, 1);
478 init_vlc(&vc1_ttblk_vlc[i], VC1_TTBLK_VLC_BITS, 8,
479 vc1_ttblk_bits[i], 1, 1,
480 vc1_ttblk_codes[i], 1, 1, 1);
481 init_vlc(&vc1_subblkpat_vlc[i], VC1_SUBBLKPAT_VLC_BITS, 15,
482 vc1_subblkpat_bits[i], 1, 1,
483 vc1_subblkpat_codes[i], 1, 1, 1);
487 init_vlc(&vc1_4mv_block_pattern_vlc[i], VC1_4MV_BLOCK_PATTERN_VLC_BITS, 16,
488 vc1_4mv_block_pattern_bits[i], 1, 1,
489 vc1_4mv_block_pattern_codes[i], 1, 1, 1);
490 init_vlc(&vc1_cbpcy_p_vlc[i], VC1_CBPCY_P_VLC_BITS, 64,
491 vc1_cbpcy_p_bits[i], 1, 1,
492 vc1_cbpcy_p_codes[i], 2, 2, 1);
493 init_vlc(&vc1_mv_diff_vlc[i], VC1_MV_DIFF_VLC_BITS, 73,
494 vc1_mv_diff_bits[i], 1, 1,
495 vc1_mv_diff_codes[i], 2, 2, 1);
498 init_vlc(&vc1_ac_coeff_table[i], AC_VLC_BITS, vc1_ac_sizes[i],
499 &vc1_ac_tables[i][0][1], 8, 4,
500 &vc1_ac_tables[i][0][0], 8, 4, 1);
501 init_vlc(&ff_msmp4_mb_i_vlc, MB_INTRA_VLC_BITS, 64,
502 &ff_msmp4_mb_i_table[0][1], 4, 2,
503 &ff_msmp4_mb_i_table[0][0], 4, 2, 1);
508 v->mvrange = 0; /* 7.1.1.18, p80 */
513 /***********************************************************************/
515 * @defgroup bitplane VC9 Bitplane decoding
520 /** @addtogroup bitplane
533 /** @} */ //imode defines
535 /** Decode rows by checking if they are skipped
536 * @param plane Buffer to store decoded bits
537 * @param[in] width Width of this buffer
538 * @param[in] height Height of this buffer
539 * @param[in] stride of this buffer
541 static void decode_rowskip(uint8_t* plane, int width, int height, int stride, GetBitContext *gb){
544 for (y=0; y<height; y++){
545 if (!get_bits(gb, 1)) //rowskip
546 memset(plane, 0, width);
548 for (x=0; x<width; x++)
549 plane[x] = get_bits(gb, 1);
554 /** Decode columns by checking if they are skipped
555 * @param plane Buffer to store decoded bits
556 * @param[in] width Width of this buffer
557 * @param[in] height Height of this buffer
558 * @param[in] stride of this buffer
559 * @fixme FIXME: Optimize
561 static void decode_colskip(uint8_t* plane, int width, int height, int stride, GetBitContext *gb){
564 for (x=0; x<width; x++){
565 if (!get_bits(gb, 1)) //colskip
566 for (y=0; y<height; y++)
569 for (y=0; y<height; y++)
570 plane[y*stride] = get_bits(gb, 1);
575 /** Decode a bitplane's bits
576 * @param bp Bitplane where to store the decode bits
577 * @param v VC-1 context for bit reading and logging
579 * @fixme FIXME: Optimize
581 static int bitplane_decoding(uint8_t* data, int *raw_flag, VC1Context *v)
583 GetBitContext *gb = &v->s.gb;
585 int imode, x, y, code, offset;
586 uint8_t invert, *planep = data;
587 int width, height, stride;
589 width = v->s.mb_width;
590 height = v->s.mb_height;
591 stride = v->s.mb_stride;
592 invert = get_bits(gb, 1);
593 imode = get_vlc2(gb, vc1_imode_vlc.table, VC1_IMODE_VLC_BITS, 1);
599 //Data is actually read in the MB layer (same for all tests == "raw")
600 *raw_flag = 1; //invert ignored
604 if ((height * width) & 1)
606 *planep++ = get_bits(gb, 1);
610 // decode bitplane as one long line
611 for (y = offset; y < height * width; y += 2) {
612 code = get_vlc2(gb, vc1_norm2_vlc.table, VC1_NORM2_VLC_BITS, 1);
613 *planep++ = code & 1;
615 if(offset == width) {
617 planep += stride - width;
619 *planep++ = code >> 1;
621 if(offset == width) {
623 planep += stride - width;
629 if(!(height % 3) && (width % 3)) { // use 2x3 decoding
630 for(y = 0; y < height; y+= 3) {
631 for(x = width & 1; x < width; x += 2) {
632 code = get_vlc2(gb, vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
634 av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
637 planep[x + 0] = (code >> 0) & 1;
638 planep[x + 1] = (code >> 1) & 1;
639 planep[x + 0 + stride] = (code >> 2) & 1;
640 planep[x + 1 + stride] = (code >> 3) & 1;
641 planep[x + 0 + stride * 2] = (code >> 4) & 1;
642 planep[x + 1 + stride * 2] = (code >> 5) & 1;
644 planep += stride * 3;
646 if(width & 1) decode_colskip(data, 1, height, stride, &v->s.gb);
648 planep += (height & 1) * stride;
649 for(y = height & 1; y < height; y += 2) {
650 for(x = width % 3; x < width; x += 3) {
651 code = get_vlc2(gb, vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
653 av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
656 planep[x + 0] = (code >> 0) & 1;
657 planep[x + 1] = (code >> 1) & 1;
658 planep[x + 2] = (code >> 2) & 1;
659 planep[x + 0 + stride] = (code >> 3) & 1;
660 planep[x + 1 + stride] = (code >> 4) & 1;
661 planep[x + 2 + stride] = (code >> 5) & 1;
663 planep += stride * 2;
666 if(x) decode_colskip(data , x, height , stride, &v->s.gb);
667 if(height & 1) decode_rowskip(data+x, width - x, 1, stride, &v->s.gb);
671 decode_rowskip(data, width, height, stride, &v->s.gb);
674 decode_colskip(data, width, height, stride, &v->s.gb);
679 /* Applying diff operator */
680 if (imode == IMODE_DIFF2 || imode == IMODE_DIFF6)
684 for (x=1; x<width; x++)
685 planep[x] ^= planep[x-1];
686 for (y=1; y<height; y++)
689 planep[0] ^= planep[-stride];
690 for (x=1; x<width; x++)
692 if (planep[x-1] != planep[x-stride]) planep[x] ^= invert;
693 else planep[x] ^= planep[x-1];
700 for (x=0; x<stride*height; x++) planep[x] = !planep[x]; //FIXME stride
702 return (imode<<1) + invert;
705 /** @} */ //Bitplane group
707 /***********************************************************************/
708 /** VOP Dquant decoding
709 * @param v VC-1 Context
711 static int vop_dquant_decoding(VC1Context *v)
713 GetBitContext *gb = &v->s.gb;
719 pqdiff = get_bits(gb, 3);
720 if (pqdiff == 7) v->altpq = get_bits(gb, 5);
721 else v->altpq = v->pq + pqdiff + 1;
725 v->dquantfrm = get_bits(gb, 1);
728 v->dqprofile = get_bits(gb, 2);
729 switch (v->dqprofile)
731 case DQPROFILE_SINGLE_EDGE:
732 case DQPROFILE_DOUBLE_EDGES:
733 v->dqsbedge = get_bits(gb, 2);
735 case DQPROFILE_ALL_MBS:
736 v->dqbilevel = get_bits(gb, 1);
737 default: break; //Forbidden ?
739 if (v->dqbilevel || v->dqprofile != DQPROFILE_ALL_MBS)
741 pqdiff = get_bits(gb, 3);
742 if (pqdiff == 7) v->altpq = get_bits(gb, 5);
743 else v->altpq = v->pq + pqdiff + 1;
750 /** Put block onto picture
752 static void vc1_put_block(VC1Context *v, DCTELEM block[6][64])
756 DSPContext *dsp = &v->s.dsp;
760 for(k = 0; k < 6; k++)
761 for(j = 0; j < 8; j++)
762 for(i = 0; i < 8; i++)
763 block[k][i + j*8] = ((block[k][i + j*8] - 128) << 1) + 128;
766 ys = v->s.current_picture.linesize[0];
767 us = v->s.current_picture.linesize[1];
768 vs = v->s.current_picture.linesize[2];
771 dsp->put_pixels_clamped(block[0], Y, ys);
772 dsp->put_pixels_clamped(block[1], Y + 8, ys);
774 dsp->put_pixels_clamped(block[2], Y, ys);
775 dsp->put_pixels_clamped(block[3], Y + 8, ys);
777 if(!(v->s.flags & CODEC_FLAG_GRAY)) {
778 dsp->put_pixels_clamped(block[4], v->s.dest[1], us);
779 dsp->put_pixels_clamped(block[5], v->s.dest[2], vs);
783 /** Do motion compensation over 1 macroblock
784 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
786 static void vc1_mc_1mv(VC1Context *v, int dir)
788 MpegEncContext *s = &v->s;
789 DSPContext *dsp = &v->s.dsp;
790 uint8_t *srcY, *srcU, *srcV;
791 int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
793 if(!v->s.last_picture.data[0])return;
795 mx = s->mv[dir][0][0];
796 my = s->mv[dir][0][1];
798 // store motion vectors for further use in B frames
799 if(s->pict_type == P_TYPE) {
800 s->current_picture.motion_val[1][s->block_index[0]][0] = mx;
801 s->current_picture.motion_val[1][s->block_index[0]][1] = my;
803 uvmx = (mx + ((mx & 3) == 3)) >> 1;
804 uvmy = (my + ((my & 3) == 3)) >> 1;
806 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
807 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
810 srcY = s->last_picture.data[0];
811 srcU = s->last_picture.data[1];
812 srcV = s->last_picture.data[2];
814 srcY = s->next_picture.data[0];
815 srcU = s->next_picture.data[1];
816 srcV = s->next_picture.data[2];
819 src_x = s->mb_x * 16 + (mx >> 2);
820 src_y = s->mb_y * 16 + (my >> 2);
821 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
822 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
824 src_x = clip( src_x, -16, s->mb_width * 16);
825 src_y = clip( src_y, -16, s->mb_height * 16);
826 uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8);
827 uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
829 srcY += src_y * s->linesize + src_x;
830 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
831 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
833 /* for grayscale we should not try to read from unknown area */
834 if(s->flags & CODEC_FLAG_GRAY) {
835 srcU = s->edge_emu_buffer + 18 * s->linesize;
836 srcV = s->edge_emu_buffer + 18 * s->linesize;
839 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
840 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel*3
841 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 16 - s->mspel*3){
842 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
844 srcY -= s->mspel * (1 + s->linesize);
845 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
846 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
847 srcY = s->edge_emu_buffer;
848 ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
849 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
850 ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
851 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
854 /* if we deal with range reduction we need to scale source blocks */
860 for(j = 0; j < 17 + s->mspel*2; j++) {
861 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
864 src = srcU; src2 = srcV;
865 for(j = 0; j < 9; j++) {
866 for(i = 0; i < 9; i++) {
867 src[i] = ((src[i] - 128) >> 1) + 128;
868 src2[i] = ((src2[i] - 128) >> 1) + 128;
870 src += s->uvlinesize;
871 src2 += s->uvlinesize;
874 /* if we deal with intensity compensation we need to scale source blocks */
875 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
880 for(j = 0; j < 17 + s->mspel*2; j++) {
881 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = v->luty[src[i]];
884 src = srcU; src2 = srcV;
885 for(j = 0; j < 9; j++) {
886 for(i = 0; i < 9; i++) {
887 src[i] = v->lutuv[src[i]];
888 src2[i] = v->lutuv[src2[i]];
890 src += s->uvlinesize;
891 src2 += s->uvlinesize;
894 srcY += s->mspel * (1 + s->linesize);
898 dxy = ((my & 3) << 2) | (mx & 3);
899 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
900 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
901 srcY += s->linesize * 8;
902 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
903 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
904 } else { // hpel mc - always used for luma
905 dxy = (my & 2) | ((mx & 2) >> 1);
908 dsp->put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
910 dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
913 if(s->flags & CODEC_FLAG_GRAY) return;
914 /* Chroma MC always uses qpel bilinear */
915 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
919 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
920 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
922 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
923 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
927 /** Do motion compensation for 4-MV macroblock - luminance block
929 static void vc1_mc_4mv_luma(VC1Context *v, int n)
931 MpegEncContext *s = &v->s;
932 DSPContext *dsp = &v->s.dsp;
934 int dxy, mx, my, src_x, src_y;
937 if(!v->s.last_picture.data[0])return;
940 srcY = s->last_picture.data[0];
942 off = s->linesize * 4 * (n&2) + (n&1) * 8;
944 src_x = s->mb_x * 16 + (n&1) * 8 + (mx >> 2);
945 src_y = s->mb_y * 16 + (n&2) * 4 + (my >> 2);
947 src_x = clip( src_x, -16, s->mb_width * 16);
948 src_y = clip( src_y, -16, s->mb_height * 16);
950 srcY += src_y * s->linesize + src_x;
952 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
953 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 8 - s->mspel*2
954 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 8 - s->mspel*2){
955 srcY -= s->mspel * (1 + s->linesize);
956 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 9+s->mspel*2, 9+s->mspel*2,
957 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
958 srcY = s->edge_emu_buffer;
959 /* if we deal with range reduction we need to scale source blocks */
965 for(j = 0; j < 9 + s->mspel*2; j++) {
966 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
970 /* if we deal with intensity compensation we need to scale source blocks */
971 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
976 for(j = 0; j < 9 + s->mspel*2; j++) {
977 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = v->luty[src[i]];
981 srcY += s->mspel * (1 + s->linesize);
985 dxy = ((my & 3) << 2) | (mx & 3);
986 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, v->rnd);
987 } else { // hpel mc - always used for luma
988 dxy = (my & 2) | ((mx & 2) >> 1);
990 dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
992 dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
996 static inline int median4(int a, int b, int c, int d)
999 if(c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
1000 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
1002 if(c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
1003 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
1008 /** Do motion compensation for 4-MV macroblock - both chroma blocks
1010 static void vc1_mc_4mv_chroma(VC1Context *v)
1012 MpegEncContext *s = &v->s;
1013 DSPContext *dsp = &v->s.dsp;
1014 uint8_t *srcU, *srcV;
1015 int uvdxy, uvmx, uvmy, uvsrc_x, uvsrc_y;
1016 int i, idx, tx = 0, ty = 0;
1017 int mvx[4], mvy[4], intra[4];
1018 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
1020 if(!v->s.last_picture.data[0])return;
1021 if(s->flags & CODEC_FLAG_GRAY) return;
1023 for(i = 0; i < 4; i++) {
1024 mvx[i] = s->mv[0][i][0];
1025 mvy[i] = s->mv[0][i][1];
1026 intra[i] = v->mb_type[0][s->block_index[i]];
1029 /* calculate chroma MV vector from four luma MVs */
1030 idx = (intra[3] << 3) | (intra[2] << 2) | (intra[1] << 1) | intra[0];
1031 if(!idx) { // all blocks are inter
1032 tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
1033 ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
1034 } else if(count[idx] == 1) { // 3 inter blocks
1037 tx = mid_pred(mvx[1], mvx[2], mvx[3]);
1038 ty = mid_pred(mvy[1], mvy[2], mvy[3]);
1041 tx = mid_pred(mvx[0], mvx[2], mvx[3]);
1042 ty = mid_pred(mvy[0], mvy[2], mvy[3]);
1045 tx = mid_pred(mvx[0], mvx[1], mvx[3]);
1046 ty = mid_pred(mvy[0], mvy[1], mvy[3]);
1049 tx = mid_pred(mvx[0], mvx[1], mvx[2]);
1050 ty = mid_pred(mvy[0], mvy[1], mvy[2]);
1053 } else if(count[idx] == 2) {
1055 for(i=0; i<3;i++) if(!intra[i]) {t1 = i; break;}
1056 for(i= t1+1; i<4; i++)if(!intra[i]) {t2 = i; break;}
1057 tx = (mvx[t1] + mvx[t2]) / 2;
1058 ty = (mvy[t1] + mvy[t2]) / 2;
1060 return; //no need to do MC for inter blocks
1062 s->current_picture.motion_val[1][s->block_index[0]][0] = tx;
1063 s->current_picture.motion_val[1][s->block_index[0]][1] = ty;
1064 uvmx = (tx + ((tx&3) == 3)) >> 1;
1065 uvmy = (ty + ((ty&3) == 3)) >> 1;
1067 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
1068 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
1071 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1072 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1074 uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8);
1075 uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
1076 srcU = s->last_picture.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1077 srcV = s->last_picture.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1078 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1079 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
1080 || (unsigned)uvsrc_y > (s->v_edge_pos >> 1) - 9){
1081 ff_emulated_edge_mc(s->edge_emu_buffer , srcU, s->uvlinesize, 8+1, 8+1,
1082 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
1083 ff_emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize, 8+1, 8+1,
1084 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
1085 srcU = s->edge_emu_buffer;
1086 srcV = s->edge_emu_buffer + 16;
1088 /* if we deal with range reduction we need to scale source blocks */
1089 if(v->rangeredfrm) {
1091 uint8_t *src, *src2;
1093 src = srcU; src2 = srcV;
1094 for(j = 0; j < 9; j++) {
1095 for(i = 0; i < 9; i++) {
1096 src[i] = ((src[i] - 128) >> 1) + 128;
1097 src2[i] = ((src2[i] - 128) >> 1) + 128;
1099 src += s->uvlinesize;
1100 src2 += s->uvlinesize;
1103 /* if we deal with intensity compensation we need to scale source blocks */
1104 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1106 uint8_t *src, *src2;
1108 src = srcU; src2 = srcV;
1109 for(j = 0; j < 9; j++) {
1110 for(i = 0; i < 9; i++) {
1111 src[i] = v->lutuv[src[i]];
1112 src2[i] = v->lutuv[src2[i]];
1114 src += s->uvlinesize;
1115 src2 += s->uvlinesize;
1120 /* Chroma MC always uses qpel bilinear */
1121 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
1125 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
1126 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
1128 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
1129 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
1133 static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb);
1136 * Decode Simple/Main Profiles sequence header
1137 * @see Figure 7-8, p16-17
1138 * @param avctx Codec context
1139 * @param gb GetBit context initialized from Codec context extra_data
1142 static int decode_sequence_header(AVCodecContext *avctx, GetBitContext *gb)
1144 VC1Context *v = avctx->priv_data;
1146 av_log(avctx, AV_LOG_DEBUG, "Header: %0X\n", show_bits(gb, 32));
1147 v->profile = get_bits(gb, 2);
1148 if (v->profile == 2)
1150 av_log(avctx, AV_LOG_ERROR, "Profile value 2 is forbidden (and WMV3 Complex Profile is unsupported)\n");
1154 if (v->profile == PROFILE_ADVANCED)
1156 return decode_sequence_header_adv(v, gb);
1160 v->res_sm = get_bits(gb, 2); //reserved
1163 av_log(avctx, AV_LOG_ERROR,
1164 "Reserved RES_SM=%i is forbidden\n", v->res_sm);
1170 v->frmrtq_postproc = get_bits(gb, 3); //common
1171 // (bitrate-32kbps)/64kbps
1172 v->bitrtq_postproc = get_bits(gb, 5); //common
1173 v->s.loop_filter = get_bits(gb, 1); //common
1174 if(v->s.loop_filter == 1 && v->profile == PROFILE_SIMPLE)
1176 av_log(avctx, AV_LOG_ERROR,
1177 "LOOPFILTER shell not be enabled in simple profile\n");
1180 v->res_x8 = get_bits(gb, 1); //reserved
1183 av_log(avctx, AV_LOG_ERROR,
1184 "1 for reserved RES_X8 is forbidden\n");
1187 v->multires = get_bits(gb, 1);
1188 v->res_fasttx = get_bits(gb, 1);
1191 av_log(avctx, AV_LOG_ERROR,
1192 "0 for reserved RES_FASTTX is forbidden\n");
1196 v->fastuvmc = get_bits(gb, 1); //common
1197 if (!v->profile && !v->fastuvmc)
1199 av_log(avctx, AV_LOG_ERROR,
1200 "FASTUVMC unavailable in Simple Profile\n");
1203 v->extended_mv = get_bits(gb, 1); //common
1204 if (!v->profile && v->extended_mv)
1206 av_log(avctx, AV_LOG_ERROR,
1207 "Extended MVs unavailable in Simple Profile\n");
1210 v->dquant = get_bits(gb, 2); //common
1211 v->vstransform = get_bits(gb, 1); //common
1213 v->res_transtab = get_bits(gb, 1);
1214 if (v->res_transtab)
1216 av_log(avctx, AV_LOG_ERROR,
1217 "1 for reserved RES_TRANSTAB is forbidden\n");
1221 v->overlap = get_bits(gb, 1); //common
1223 v->s.resync_marker = get_bits(gb, 1);
1224 v->rangered = get_bits(gb, 1);
1225 if (v->rangered && v->profile == PROFILE_SIMPLE)
1227 av_log(avctx, AV_LOG_INFO,
1228 "RANGERED should be set to 0 in simple profile\n");
1231 v->s.max_b_frames = avctx->max_b_frames = get_bits(gb, 3); //common
1232 v->quantizer_mode = get_bits(gb, 2); //common
1234 v->finterpflag = get_bits(gb, 1); //common
1235 v->res_rtm_flag = get_bits(gb, 1); //reserved
1236 if (!v->res_rtm_flag)
1238 // av_log(avctx, AV_LOG_ERROR,
1239 // "0 for reserved RES_RTM_FLAG is forbidden\n");
1240 av_log(avctx, AV_LOG_ERROR,
1241 "Old WMV3 version detected, only I-frames will be decoded\n");
1244 av_log(avctx, AV_LOG_DEBUG,
1245 "Profile %i:\nfrmrtq_postproc=%i, bitrtq_postproc=%i\n"
1246 "LoopFilter=%i, MultiRes=%i, FastUVMC=%i, Extended MV=%i\n"
1247 "Rangered=%i, VSTransform=%i, Overlap=%i, SyncMarker=%i\n"
1248 "DQuant=%i, Quantizer mode=%i, Max B frames=%i\n",
1249 v->profile, v->frmrtq_postproc, v->bitrtq_postproc,
1250 v->s.loop_filter, v->multires, v->fastuvmc, v->extended_mv,
1251 v->rangered, v->vstransform, v->overlap, v->s.resync_marker,
1252 v->dquant, v->quantizer_mode, avctx->max_b_frames
1257 static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb)
1259 v->res_rtm_flag = 1;
1260 v->level = get_bits(gb, 3);
1263 av_log(v->s.avctx, AV_LOG_ERROR, "Reserved LEVEL %i\n",v->level);
1265 v->chromaformat = get_bits(gb, 2);
1266 if (v->chromaformat != 1)
1268 av_log(v->s.avctx, AV_LOG_ERROR,
1269 "Only 4:2:0 chroma format supported\n");
1274 v->frmrtq_postproc = get_bits(gb, 3); //common
1275 // (bitrate-32kbps)/64kbps
1276 v->bitrtq_postproc = get_bits(gb, 5); //common
1277 v->postprocflag = get_bits(gb, 1); //common
1279 v->s.avctx->coded_width = (get_bits(gb, 12) + 1) << 1;
1280 v->s.avctx->coded_height = (get_bits(gb, 12) + 1) << 1;
1281 v->s.avctx->width = v->s.avctx->coded_width;
1282 v->s.avctx->height = v->s.avctx->coded_height;
1283 v->broadcast = get_bits1(gb);
1284 v->interlace = get_bits1(gb);
1286 av_log(v->s.avctx, AV_LOG_ERROR, "Interlaced mode not supported (yet)\n");
1289 v->tfcntrflag = get_bits1(gb);
1290 v->finterpflag = get_bits1(gb);
1291 get_bits1(gb); // reserved
1293 av_log(v->s.avctx, AV_LOG_DEBUG,
1294 "Advanced Profile level %i:\nfrmrtq_postproc=%i, bitrtq_postproc=%i\n"
1295 "LoopFilter=%i, ChromaFormat=%i, Pulldown=%i, Interlace: %i\n"
1296 "TFCTRflag=%i, FINTERPflag=%i\n",
1297 v->level, v->frmrtq_postproc, v->bitrtq_postproc,
1298 v->s.loop_filter, v->chromaformat, v->broadcast, v->interlace,
1299 v->tfcntrflag, v->finterpflag
1302 v->psf = get_bits1(gb);
1303 if(v->psf) { //PsF, 6.1.13
1304 av_log(v->s.avctx, AV_LOG_ERROR, "Progressive Segmented Frame mode: not supported (yet)\n");
1307 if(get_bits1(gb)) { //Display Info - decoding is not affected by it
1309 av_log(v->s.avctx, AV_LOG_DEBUG, "Display extended info:\n");
1310 v->s.avctx->width = v->s.width = w = get_bits(gb, 14) + 1;
1311 v->s.avctx->height = v->s.height = h = get_bits(gb, 14) + 1;
1312 av_log(v->s.avctx, AV_LOG_DEBUG, "Display dimensions: %ix%i\n", w, h);
1314 ar = get_bits(gb, 4);
1316 v->s.avctx->sample_aspect_ratio = vc1_pixel_aspect[ar];
1318 w = get_bits(gb, 8);
1319 h = get_bits(gb, 8);
1320 v->s.avctx->sample_aspect_ratio = (AVRational){w, h};
1323 if(get_bits1(gb)){ //framerate stuff
1325 v->s.avctx->time_base.num = 32;
1326 v->s.avctx->time_base.den = get_bits(gb, 16) + 1;
1329 nr = get_bits(gb, 8);
1330 dr = get_bits(gb, 4);
1331 if(nr && nr < 8 && dr && dr < 3){
1332 v->s.avctx->time_base.num = fps_dr[dr - 1];
1333 v->s.avctx->time_base.den = fps_nr[nr - 1] * 1000;
1339 v->color_prim = get_bits(gb, 8);
1340 v->transfer_char = get_bits(gb, 8);
1341 v->matrix_coef = get_bits(gb, 8);
1345 v->hrd_param_flag = get_bits1(gb);
1346 if(v->hrd_param_flag) {
1348 v->hrd_num_leaky_buckets = get_bits(gb, 5);
1349 get_bits(gb, 4); //bitrate exponent
1350 get_bits(gb, 4); //buffer size exponent
1351 for(i = 0; i < v->hrd_num_leaky_buckets; i++) {
1352 get_bits(gb, 16); //hrd_rate[n]
1353 get_bits(gb, 16); //hrd_buffer[n]
1359 static int decode_entry_point(AVCodecContext *avctx, GetBitContext *gb)
1361 VC1Context *v = avctx->priv_data;
1362 int i, blink, refdist;
1364 av_log(avctx, AV_LOG_DEBUG, "Entry point: %08X\n", show_bits_long(gb, 32));
1365 blink = get_bits1(gb); // broken link
1366 avctx->max_b_frames = 1 - get_bits1(gb); // 'closed entry' also signalize possible B-frames
1367 v->panscanflag = get_bits1(gb);
1368 refdist = get_bits1(gb); // refdist flag
1369 v->s.loop_filter = get_bits1(gb);
1370 v->fastuvmc = get_bits1(gb);
1371 v->extended_mv = get_bits1(gb);
1372 v->dquant = get_bits(gb, 2);
1373 v->vstransform = get_bits1(gb);
1374 v->overlap = get_bits1(gb);
1375 v->quantizer_mode = get_bits(gb, 2);
1377 if(v->hrd_param_flag){
1378 for(i = 0; i < v->hrd_num_leaky_buckets; i++) {
1379 get_bits(gb, 8); //hrd_full[n]
1384 avctx->coded_width = (get_bits(gb, 12)+1)<<1;
1385 avctx->coded_height = (get_bits(gb, 12)+1)<<1;
1388 v->extended_dmv = get_bits1(gb);
1390 av_log(avctx, AV_LOG_ERROR, "Luma scaling is not supported, expect wrong picture\n");
1391 skip_bits(gb, 3); // Y range, ignored for now
1394 av_log(avctx, AV_LOG_ERROR, "Chroma scaling is not supported, expect wrong picture\n");
1395 skip_bits(gb, 3); // UV range, ignored for now
1398 av_log(avctx, AV_LOG_DEBUG, "Entry point info:\n"
1399 "BrokenLink=%i, ClosedEntry=%i, PanscanFlag=%i\n"
1400 "RefDist=%i, Postproc=%i, FastUVMC=%i, ExtMV=%i\n"
1401 "DQuant=%i, VSTransform=%i, Overlap=%i, Qmode=%i\n",
1402 blink, 1 - avctx->max_b_frames, v->panscanflag, refdist, v->s.loop_filter,
1403 v->fastuvmc, v->extended_mv, v->dquant, v->vstransform, v->overlap, v->quantizer_mode);
1408 static int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
1410 int pqindex, lowquant, status;
1412 if(v->finterpflag) v->interpfrm = get_bits(gb, 1);
1413 skip_bits(gb, 2); //framecnt unused
1415 if (v->rangered) v->rangeredfrm = get_bits(gb, 1);
1416 v->s.pict_type = get_bits(gb, 1);
1417 if (v->s.avctx->max_b_frames) {
1418 if (!v->s.pict_type) {
1419 if (get_bits(gb, 1)) v->s.pict_type = I_TYPE;
1420 else v->s.pict_type = B_TYPE;
1421 } else v->s.pict_type = P_TYPE;
1422 } else v->s.pict_type = v->s.pict_type ? P_TYPE : I_TYPE;
1425 if(v->s.pict_type == B_TYPE) {
1426 v->bfraction = get_vlc2(gb, vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
1427 v->bfraction = vc1_bfraction_lut[v->bfraction];
1428 if(v->bfraction == 0) {
1429 v->s.pict_type = BI_TYPE;
1432 if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1433 get_bits(gb, 7); // skip buffer fullness
1436 if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1438 if(v->s.pict_type == P_TYPE)
1441 /* Quantizer stuff */
1442 pqindex = get_bits(gb, 5);
1443 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1444 v->pq = pquant_table[0][pqindex];
1446 v->pq = pquant_table[1][pqindex];
1449 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1450 v->pquantizer = pqindex < 9;
1451 if (v->quantizer_mode == QUANT_NON_UNIFORM)
1453 v->pqindex = pqindex;
1454 if (pqindex < 9) v->halfpq = get_bits(gb, 1);
1456 if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
1457 v->pquantizer = get_bits(gb, 1);
1459 if (v->extended_mv == 1) v->mvrange = get_prefix(gb, 0, 3);
1460 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1461 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1462 v->range_x = 1 << (v->k_x - 1);
1463 v->range_y = 1 << (v->k_y - 1);
1464 if (v->profile == PROFILE_ADVANCED)
1466 if (v->postprocflag) v->postproc = get_bits(gb, 1);
1469 if (v->multires && v->s.pict_type != B_TYPE) v->respic = get_bits(gb, 2);
1471 //av_log(v->s.avctx, AV_LOG_INFO, "%c Frame: QP=[%i]%i (+%i/2) %i\n",
1472 // (v->s.pict_type == P_TYPE) ? 'P' : ((v->s.pict_type == I_TYPE) ? 'I' : 'B'), pqindex, v->pq, v->halfpq, v->rangeredfrm);
1474 if(v->s.pict_type == I_TYPE || v->s.pict_type == P_TYPE) v->use_ic = 0;
1476 switch(v->s.pict_type) {
1478 if (v->pq < 5) v->tt_index = 0;
1479 else if(v->pq < 13) v->tt_index = 1;
1480 else v->tt_index = 2;
1482 lowquant = (v->pq > 12) ? 0 : 1;
1483 v->mv_mode = mv_pmode_table[lowquant][get_prefix(gb, 1, 4)];
1484 if (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1486 int scale, shift, i;
1487 v->mv_mode2 = mv_pmode_table2[lowquant][get_prefix(gb, 1, 3)];
1488 v->lumscale = get_bits(gb, 6);
1489 v->lumshift = get_bits(gb, 6);
1491 /* fill lookup tables for intensity compensation */
1494 shift = (255 - v->lumshift * 2) << 6;
1495 if(v->lumshift > 31)
1498 scale = v->lumscale + 32;
1499 if(v->lumshift > 31)
1500 shift = (v->lumshift - 64) << 6;
1502 shift = v->lumshift << 6;
1504 for(i = 0; i < 256; i++) {
1505 v->luty[i] = clip_uint8((scale * i + shift + 32) >> 6);
1506 v->lutuv[i] = clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
1509 if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
1510 v->s.quarter_sample = 0;
1511 else if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1512 if(v->mv_mode2 == MV_PMODE_1MV_HPEL || v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)
1513 v->s.quarter_sample = 0;
1515 v->s.quarter_sample = 1;
1517 v->s.quarter_sample = 1;
1518 v->s.mspel = !(v->mv_mode == MV_PMODE_1MV_HPEL_BILIN || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN));
1520 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP &&
1521 v->mv_mode2 == MV_PMODE_MIXED_MV)
1522 || v->mv_mode == MV_PMODE_MIXED_MV)
1524 status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v);
1525 if (status < 0) return -1;
1526 av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
1527 "Imode: %i, Invert: %i\n", status>>1, status&1);
1529 v->mv_type_is_raw = 0;
1530 memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
1532 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1533 if (status < 0) return -1;
1534 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1535 "Imode: %i, Invert: %i\n", status>>1, status&1);
1537 /* Hopefully this is correct for P frames */
1538 v->s.mv_table_index = get_bits(gb, 2); //but using vc1_ tables
1539 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1543 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1544 vop_dquant_decoding(v);
1547 v->ttfrm = 0; //FIXME Is that so ?
1550 v->ttmbf = get_bits(gb, 1);
1553 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1561 if (v->pq < 5) v->tt_index = 0;
1562 else if(v->pq < 13) v->tt_index = 1;
1563 else v->tt_index = 2;
1565 lowquant = (v->pq > 12) ? 0 : 1;
1566 v->mv_mode = get_bits1(gb) ? MV_PMODE_1MV : MV_PMODE_1MV_HPEL_BILIN;
1567 v->s.quarter_sample = (v->mv_mode == MV_PMODE_1MV);
1568 v->s.mspel = v->s.quarter_sample;
1570 status = bitplane_decoding(v->direct_mb_plane, &v->dmb_is_raw, v);
1571 if (status < 0) return -1;
1572 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
1573 "Imode: %i, Invert: %i\n", status>>1, status&1);
1574 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1575 if (status < 0) return -1;
1576 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1577 "Imode: %i, Invert: %i\n", status>>1, status&1);
1579 v->s.mv_table_index = get_bits(gb, 2);
1580 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1584 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1585 vop_dquant_decoding(v);
1591 v->ttmbf = get_bits(gb, 1);
1594 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1604 v->c_ac_table_index = decode012(gb);
1605 if (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1607 v->y_ac_table_index = decode012(gb);
1610 v->s.dc_table_index = get_bits(gb, 1);
1612 if(v->s.pict_type == BI_TYPE) {
1613 v->s.pict_type = B_TYPE;
1619 static int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
1621 int pqindex, lowquant;
1624 v->p_frame_skipped = 0;
1627 v->fcm = decode012(gb);
1628 switch(get_prefix(gb, 0, 4)) {
1630 v->s.pict_type = P_TYPE;
1633 v->s.pict_type = B_TYPE;
1636 v->s.pict_type = I_TYPE;
1639 v->s.pict_type = BI_TYPE;
1642 v->s.pict_type = P_TYPE; // skipped pic
1643 v->p_frame_skipped = 1;
1649 if(!v->interlace || v->psf) {
1650 v->rptfrm = get_bits(gb, 2);
1652 v->tff = get_bits1(gb);
1653 v->rptfrm = get_bits1(gb);
1656 if(v->panscanflag) {
1659 v->rnd = get_bits1(gb);
1661 v->uvsamp = get_bits1(gb);
1662 if(v->finterpflag) v->interpfrm = get_bits(gb, 1);
1663 if(v->s.pict_type == B_TYPE) {
1664 v->bfraction = get_vlc2(gb, vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
1665 v->bfraction = vc1_bfraction_lut[v->bfraction];
1666 if(v->bfraction == 0) {
1667 v->s.pict_type = BI_TYPE; /* XXX: should not happen here */
1670 pqindex = get_bits(gb, 5);
1671 v->pqindex = pqindex;
1672 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1673 v->pq = pquant_table[0][pqindex];
1675 v->pq = pquant_table[1][pqindex];
1678 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1679 v->pquantizer = pqindex < 9;
1680 if (v->quantizer_mode == QUANT_NON_UNIFORM)
1682 v->pqindex = pqindex;
1683 if (pqindex < 9) v->halfpq = get_bits(gb, 1);
1685 if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
1686 v->pquantizer = get_bits(gb, 1);
1688 switch(v->s.pict_type) {
1691 status = bitplane_decoding(v->acpred_plane, &v->acpred_is_raw, v);
1692 if (status < 0) return -1;
1693 av_log(v->s.avctx, AV_LOG_DEBUG, "ACPRED plane encoding: "
1694 "Imode: %i, Invert: %i\n", status>>1, status&1);
1695 v->condover = CONDOVER_NONE;
1696 if(v->overlap && v->pq <= 8) {
1697 v->condover = decode012(gb);
1698 if(v->condover == CONDOVER_SELECT) {
1699 status = bitplane_decoding(v->over_flags_plane, &v->overflg_is_raw, v);
1700 if (status < 0) return -1;
1701 av_log(v->s.avctx, AV_LOG_DEBUG, "CONDOVER plane encoding: "
1702 "Imode: %i, Invert: %i\n", status>>1, status&1);
1708 v->postproc = get_bits1(gb);
1709 if (v->extended_mv) v->mvrange = get_prefix(gb, 0, 3);
1710 else v->mvrange = 0;
1711 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1712 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1713 v->range_x = 1 << (v->k_x - 1);
1714 v->range_y = 1 << (v->k_y - 1);
1716 if (v->pq < 5) v->tt_index = 0;
1717 else if(v->pq < 13) v->tt_index = 1;
1718 else v->tt_index = 2;
1720 lowquant = (v->pq > 12) ? 0 : 1;
1721 v->mv_mode = mv_pmode_table[lowquant][get_prefix(gb, 1, 4)];
1722 if (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1724 int scale, shift, i;
1725 v->mv_mode2 = mv_pmode_table2[lowquant][get_prefix(gb, 1, 3)];
1726 v->lumscale = get_bits(gb, 6);
1727 v->lumshift = get_bits(gb, 6);
1728 /* fill lookup tables for intensity compensation */
1731 shift = (255 - v->lumshift * 2) << 6;
1732 if(v->lumshift > 31)
1735 scale = v->lumscale + 32;
1736 if(v->lumshift > 31)
1737 shift = (v->lumshift - 64) << 6;
1739 shift = v->lumshift << 6;
1741 for(i = 0; i < 256; i++) {
1742 v->luty[i] = clip_uint8((scale * i + shift + 32) >> 6);
1743 v->lutuv[i] = clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
1746 if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
1747 v->s.quarter_sample = 0;
1748 else if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1749 if(v->mv_mode2 == MV_PMODE_1MV_HPEL || v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)
1750 v->s.quarter_sample = 0;
1752 v->s.quarter_sample = 1;
1754 v->s.quarter_sample = 1;
1755 v->s.mspel = !(v->mv_mode == MV_PMODE_1MV_HPEL_BILIN || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN));
1757 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP &&
1758 v->mv_mode2 == MV_PMODE_MIXED_MV)
1759 || v->mv_mode == MV_PMODE_MIXED_MV)
1761 status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v);
1762 if (status < 0) return -1;
1763 av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
1764 "Imode: %i, Invert: %i\n", status>>1, status&1);
1766 v->mv_type_is_raw = 0;
1767 memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
1769 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1770 if (status < 0) return -1;
1771 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1772 "Imode: %i, Invert: %i\n", status>>1, status&1);
1774 /* Hopefully this is correct for P frames */
1775 v->s.mv_table_index = get_bits(gb, 2); //but using vc1_ tables
1776 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1779 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1780 vop_dquant_decoding(v);
1783 v->ttfrm = 0; //FIXME Is that so ?
1786 v->ttmbf = get_bits(gb, 1);
1789 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1798 v->postproc = get_bits1(gb);
1799 if (v->extended_mv) v->mvrange = get_prefix(gb, 0, 3);
1800 else v->mvrange = 0;
1801 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1802 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1803 v->range_x = 1 << (v->k_x - 1);
1804 v->range_y = 1 << (v->k_y - 1);
1806 if (v->pq < 5) v->tt_index = 0;
1807 else if(v->pq < 13) v->tt_index = 1;
1808 else v->tt_index = 2;
1810 lowquant = (v->pq > 12) ? 0 : 1;
1811 v->mv_mode = get_bits1(gb) ? MV_PMODE_1MV : MV_PMODE_1MV_HPEL_BILIN;
1812 v->s.quarter_sample = (v->mv_mode == MV_PMODE_1MV);
1813 v->s.mspel = v->s.quarter_sample;
1815 status = bitplane_decoding(v->direct_mb_plane, &v->dmb_is_raw, v);
1816 if (status < 0) return -1;
1817 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
1818 "Imode: %i, Invert: %i\n", status>>1, status&1);
1819 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1820 if (status < 0) return -1;
1821 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1822 "Imode: %i, Invert: %i\n", status>>1, status&1);
1824 v->s.mv_table_index = get_bits(gb, 2);
1825 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1829 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1830 vop_dquant_decoding(v);
1836 v->ttmbf = get_bits(gb, 1);
1839 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1849 v->c_ac_table_index = decode012(gb);
1850 if (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1852 v->y_ac_table_index = decode012(gb);
1855 v->s.dc_table_index = get_bits(gb, 1);
1856 if ((v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE) && v->dquant) {
1857 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1858 vop_dquant_decoding(v);
1862 if(v->s.pict_type == BI_TYPE) {
1863 v->s.pict_type = B_TYPE;
1869 /***********************************************************************/
1871 * @defgroup block VC-1 Block-level functions
1872 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1878 * @brief Get macroblock-level quantizer scale
1880 #define GET_MQUANT() \
1884 if (v->dqprofile == DQPROFILE_ALL_MBS) \
1888 mquant = (get_bits(gb, 1)) ? v->altpq : v->pq; \
1892 mqdiff = get_bits(gb, 3); \
1893 if (mqdiff != 7) mquant = v->pq + mqdiff; \
1894 else mquant = get_bits(gb, 5); \
1897 if(v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1898 edges = 1 << v->dqsbedge; \
1899 else if(v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1900 edges = (3 << v->dqsbedge) % 15; \
1901 else if(v->dqprofile == DQPROFILE_FOUR_EDGES) \
1903 if((edges&1) && !s->mb_x) \
1904 mquant = v->altpq; \
1905 if((edges&2) && s->first_slice_line) \
1906 mquant = v->altpq; \
1907 if((edges&4) && s->mb_x == (s->mb_width - 1)) \
1908 mquant = v->altpq; \
1909 if((edges&8) && s->mb_y == (s->mb_height - 1)) \
1910 mquant = v->altpq; \
1914 * @def GET_MVDATA(_dmv_x, _dmv_y)
1915 * @brief Get MV differentials
1916 * @see MVDATA decoding from 8.3.5.2, p(1)20
1917 * @param _dmv_x Horizontal differential for decoded MV
1918 * @param _dmv_y Vertical differential for decoded MV
1920 #define GET_MVDATA(_dmv_x, _dmv_y) \
1921 index = 1 + get_vlc2(gb, vc1_mv_diff_vlc[s->mv_table_index].table,\
1922 VC1_MV_DIFF_VLC_BITS, 2); \
1925 mb_has_coeffs = 1; \
1928 else mb_has_coeffs = 0; \
1930 if (!index) { _dmv_x = _dmv_y = 0; } \
1931 else if (index == 35) \
1933 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1934 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1936 else if (index == 36) \
1945 if (!s->quarter_sample && index1 == 5) val = 1; \
1947 if(size_table[index1] - val > 0) \
1948 val = get_bits(gb, size_table[index1] - val); \
1950 sign = 0 - (val&1); \
1951 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1954 if (!s->quarter_sample && index1 == 5) val = 1; \
1956 if(size_table[index1] - val > 0) \
1957 val = get_bits(gb, size_table[index1] - val); \
1959 sign = 0 - (val&1); \
1960 _dmv_y = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1963 /** Predict and set motion vector
1965 static inline void vc1_pred_mv(MpegEncContext *s, int n, int dmv_x, int dmv_y, int mv1, int r_x, int r_y, uint8_t* is_intra)
1967 int xy, wrap, off = 0;
1972 /* scale MV difference to be quad-pel */
1973 dmv_x <<= 1 - s->quarter_sample;
1974 dmv_y <<= 1 - s->quarter_sample;
1976 wrap = s->b8_stride;
1977 xy = s->block_index[n];
1980 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
1981 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
1982 if(mv1) { /* duplicate motion data for 1-MV block */
1983 s->current_picture.motion_val[0][xy + 1][0] = 0;
1984 s->current_picture.motion_val[0][xy + 1][1] = 0;
1985 s->current_picture.motion_val[0][xy + wrap][0] = 0;
1986 s->current_picture.motion_val[0][xy + wrap][1] = 0;
1987 s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
1988 s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
1993 C = s->current_picture.motion_val[0][xy - 1];
1994 A = s->current_picture.motion_val[0][xy - wrap];
1996 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1998 //in 4-MV mode different blocks have different B predictor position
2001 off = (s->mb_x > 0) ? -1 : 1;
2004 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
2013 B = s->current_picture.motion_val[0][xy - wrap + off];
2015 if(!s->first_slice_line || (n==2 || n==3)) { // predictor A is not out of bounds
2016 if(s->mb_width == 1) {
2020 px = mid_pred(A[0], B[0], C[0]);
2021 py = mid_pred(A[1], B[1], C[1]);
2023 } else if(s->mb_x || (n==1 || n==3)) { // predictor C is not out of bounds
2029 /* Pullback MV as specified in 8.3.5.3.4 */
2032 qx = (s->mb_x << 6) + ((n==1 || n==3) ? 32 : 0);
2033 qy = (s->mb_y << 6) + ((n==2 || n==3) ? 32 : 0);
2034 X = (s->mb_width << 6) - 4;
2035 Y = (s->mb_height << 6) - 4;
2037 if(qx + px < -60) px = -60 - qx;
2038 if(qy + py < -60) py = -60 - qy;
2040 if(qx + px < -28) px = -28 - qx;
2041 if(qy + py < -28) py = -28 - qy;
2043 if(qx + px > X) px = X - qx;
2044 if(qy + py > Y) py = Y - qy;
2046 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2047 if((!s->first_slice_line || (n==2 || n==3)) && (s->mb_x || (n==1 || n==3))) {
2048 if(is_intra[xy - wrap])
2049 sum = FFABS(px) + FFABS(py);
2051 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2053 if(get_bits1(&s->gb)) {
2061 if(is_intra[xy - 1])
2062 sum = FFABS(px) + FFABS(py);
2064 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2066 if(get_bits1(&s->gb)) {
2076 /* store MV using signed modulus of MV range defined in 4.11 */
2077 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
2078 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
2079 if(mv1) { /* duplicate motion data for 1-MV block */
2080 s->current_picture.motion_val[0][xy + 1][0] = s->current_picture.motion_val[0][xy][0];
2081 s->current_picture.motion_val[0][xy + 1][1] = s->current_picture.motion_val[0][xy][1];
2082 s->current_picture.motion_val[0][xy + wrap][0] = s->current_picture.motion_val[0][xy][0];
2083 s->current_picture.motion_val[0][xy + wrap][1] = s->current_picture.motion_val[0][xy][1];
2084 s->current_picture.motion_val[0][xy + wrap + 1][0] = s->current_picture.motion_val[0][xy][0];
2085 s->current_picture.motion_val[0][xy + wrap + 1][1] = s->current_picture.motion_val[0][xy][1];
2089 /** Motion compensation for direct or interpolated blocks in B-frames
2091 static void vc1_interp_mc(VC1Context *v)
2093 MpegEncContext *s = &v->s;
2094 DSPContext *dsp = &v->s.dsp;
2095 uint8_t *srcY, *srcU, *srcV;
2096 int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
2098 if(!v->s.next_picture.data[0])return;
2100 mx = s->mv[1][0][0];
2101 my = s->mv[1][0][1];
2102 uvmx = (mx + ((mx & 3) == 3)) >> 1;
2103 uvmy = (my + ((my & 3) == 3)) >> 1;
2105 uvmx = uvmx + ((uvmx<0)?-(uvmx&1):(uvmx&1));
2106 uvmy = uvmy + ((uvmy<0)?-(uvmy&1):(uvmy&1));
2108 srcY = s->next_picture.data[0];
2109 srcU = s->next_picture.data[1];
2110 srcV = s->next_picture.data[2];
2112 src_x = s->mb_x * 16 + (mx >> 2);
2113 src_y = s->mb_y * 16 + (my >> 2);
2114 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
2115 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
2117 src_x = clip( src_x, -16, s->mb_width * 16);
2118 src_y = clip( src_y, -16, s->mb_height * 16);
2119 uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8);
2120 uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
2122 srcY += src_y * s->linesize + src_x;
2123 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
2124 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
2126 /* for grayscale we should not try to read from unknown area */
2127 if(s->flags & CODEC_FLAG_GRAY) {
2128 srcU = s->edge_emu_buffer + 18 * s->linesize;
2129 srcV = s->edge_emu_buffer + 18 * s->linesize;
2133 || (unsigned)src_x > s->h_edge_pos - (mx&3) - 16
2134 || (unsigned)src_y > s->v_edge_pos - (my&3) - 16){
2135 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
2137 srcY -= s->mspel * (1 + s->linesize);
2138 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
2139 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
2140 srcY = s->edge_emu_buffer;
2141 ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
2142 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
2143 ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
2144 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
2147 /* if we deal with range reduction we need to scale source blocks */
2148 if(v->rangeredfrm) {
2150 uint8_t *src, *src2;
2153 for(j = 0; j < 17 + s->mspel*2; j++) {
2154 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
2157 src = srcU; src2 = srcV;
2158 for(j = 0; j < 9; j++) {
2159 for(i = 0; i < 9; i++) {
2160 src[i] = ((src[i] - 128) >> 1) + 128;
2161 src2[i] = ((src2[i] - 128) >> 1) + 128;
2163 src += s->uvlinesize;
2164 src2 += s->uvlinesize;
2167 srcY += s->mspel * (1 + s->linesize);
2172 dxy = ((my & 1) << 1) | (mx & 1);
2174 dsp->avg_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
2176 if(s->flags & CODEC_FLAG_GRAY) return;
2177 /* Chroma MC always uses qpel blilinear */
2178 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
2181 dsp->avg_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
2182 dsp->avg_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
2185 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2189 #if B_FRACTION_DEN==256
2193 return 2 * ((value * n + 255) >> 9);
2194 return (value * n + 128) >> 8;
2197 n -= B_FRACTION_DEN;
2199 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2200 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2204 /** Reconstruct motion vector for B-frame and do motion compensation
2206 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mode)
2209 v->mv_mode2 = v->mv_mode;
2210 v->mv_mode = MV_PMODE_INTENSITY_COMP;
2215 if(v->use_ic) v->mv_mode = v->mv_mode2;
2218 if(mode == BMV_TYPE_INTERPOLATED) {
2221 if(v->use_ic) v->mv_mode = v->mv_mode2;
2225 if(v->use_ic && (mode == BMV_TYPE_BACKWARD)) v->mv_mode = v->mv_mode2;
2226 vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2227 if(v->use_ic) v->mv_mode = v->mv_mode2;
2230 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mvtype)
2232 MpegEncContext *s = &v->s;
2233 int xy, wrap, off = 0;
2238 const uint8_t *is_intra = v->mb_type[0];
2242 /* scale MV difference to be quad-pel */
2243 dmv_x[0] <<= 1 - s->quarter_sample;
2244 dmv_y[0] <<= 1 - s->quarter_sample;
2245 dmv_x[1] <<= 1 - s->quarter_sample;
2246 dmv_y[1] <<= 1 - s->quarter_sample;
2248 wrap = s->b8_stride;
2249 xy = s->block_index[0];
2252 s->current_picture.motion_val[0][xy][0] =
2253 s->current_picture.motion_val[0][xy][1] =
2254 s->current_picture.motion_val[1][xy][0] =
2255 s->current_picture.motion_val[1][xy][1] = 0;
2258 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2259 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2260 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2261 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2263 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2264 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2265 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2266 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2270 if((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2271 C = s->current_picture.motion_val[0][xy - 2];
2272 A = s->current_picture.motion_val[0][xy - wrap*2];
2273 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2274 B = s->current_picture.motion_val[0][xy - wrap*2 + off];
2276 if(!s->first_slice_line) { // predictor A is not out of bounds
2277 if(s->mb_width == 1) {
2281 px = mid_pred(A[0], B[0], C[0]);
2282 py = mid_pred(A[1], B[1], C[1]);
2284 } else if(s->mb_x) { // predictor C is not out of bounds
2290 /* Pullback MV as specified in 8.3.5.3.4 */
2293 if(v->profile < PROFILE_ADVANCED) {
2294 qx = (s->mb_x << 5);
2295 qy = (s->mb_y << 5);
2296 X = (s->mb_width << 5) - 4;
2297 Y = (s->mb_height << 5) - 4;
2298 if(qx + px < -28) px = -28 - qx;
2299 if(qy + py < -28) py = -28 - qy;
2300 if(qx + px > X) px = X - qx;
2301 if(qy + py > Y) py = Y - qy;
2303 qx = (s->mb_x << 6);
2304 qy = (s->mb_y << 6);
2305 X = (s->mb_width << 6) - 4;
2306 Y = (s->mb_height << 6) - 4;
2307 if(qx + px < -60) px = -60 - qx;
2308 if(qy + py < -60) py = -60 - qy;
2309 if(qx + px > X) px = X - qx;
2310 if(qy + py > Y) py = Y - qy;
2313 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2314 if(0 && !s->first_slice_line && s->mb_x) {
2315 if(is_intra[xy - wrap])
2316 sum = FFABS(px) + FFABS(py);
2318 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2320 if(get_bits1(&s->gb)) {
2328 if(is_intra[xy - 2])
2329 sum = FFABS(px) + FFABS(py);
2331 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2333 if(get_bits1(&s->gb)) {
2343 /* store MV using signed modulus of MV range defined in 4.11 */
2344 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2345 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2347 if((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2348 C = s->current_picture.motion_val[1][xy - 2];
2349 A = s->current_picture.motion_val[1][xy - wrap*2];
2350 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2351 B = s->current_picture.motion_val[1][xy - wrap*2 + off];
2353 if(!s->first_slice_line) { // predictor A is not out of bounds
2354 if(s->mb_width == 1) {
2358 px = mid_pred(A[0], B[0], C[0]);
2359 py = mid_pred(A[1], B[1], C[1]);
2361 } else if(s->mb_x) { // predictor C is not out of bounds
2367 /* Pullback MV as specified in 8.3.5.3.4 */
2370 if(v->profile < PROFILE_ADVANCED) {
2371 qx = (s->mb_x << 5);
2372 qy = (s->mb_y << 5);
2373 X = (s->mb_width << 5) - 4;
2374 Y = (s->mb_height << 5) - 4;
2375 if(qx + px < -28) px = -28 - qx;
2376 if(qy + py < -28) py = -28 - qy;
2377 if(qx + px > X) px = X - qx;
2378 if(qy + py > Y) py = Y - qy;
2380 qx = (s->mb_x << 6);
2381 qy = (s->mb_y << 6);
2382 X = (s->mb_width << 6) - 4;
2383 Y = (s->mb_height << 6) - 4;
2384 if(qx + px < -60) px = -60 - qx;
2385 if(qy + py < -60) py = -60 - qy;
2386 if(qx + px > X) px = X - qx;
2387 if(qy + py > Y) py = Y - qy;
2390 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2391 if(0 && !s->first_slice_line && s->mb_x) {
2392 if(is_intra[xy - wrap])
2393 sum = FFABS(px) + FFABS(py);
2395 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2397 if(get_bits1(&s->gb)) {
2405 if(is_intra[xy - 2])
2406 sum = FFABS(px) + FFABS(py);
2408 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2410 if(get_bits1(&s->gb)) {
2420 /* store MV using signed modulus of MV range defined in 4.11 */
2422 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2423 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2425 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2426 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2427 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2428 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2431 /** Get predicted DC value for I-frames only
2432 * prediction dir: left=0, top=1
2433 * @param s MpegEncContext
2434 * @param[in] n block index in the current MB
2435 * @param dc_val_ptr Pointer to DC predictor
2436 * @param dir_ptr Prediction direction for use in AC prediction
2438 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2439 int16_t **dc_val_ptr, int *dir_ptr)
2441 int a, b, c, wrap, pred, scale;
2443 static const uint16_t dcpred[32] = {
2444 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2445 114, 102, 93, 85, 79, 73, 68, 64,
2446 60, 57, 54, 51, 49, 47, 45, 43,
2447 41, 39, 38, 37, 35, 34, 33
2450 /* find prediction - wmv3_dc_scale always used here in fact */
2451 if (n < 4) scale = s->y_dc_scale;
2452 else scale = s->c_dc_scale;
2454 wrap = s->block_wrap[n];
2455 dc_val= s->dc_val[0] + s->block_index[n];
2461 b = dc_val[ - 1 - wrap];
2462 a = dc_val[ - wrap];
2464 if (pq < 9 || !overlap)
2466 /* Set outer values */
2467 if (s->first_slice_line && (n!=2 && n!=3)) b=a=dcpred[scale];
2468 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=dcpred[scale];
2472 /* Set outer values */
2473 if (s->first_slice_line && (n!=2 && n!=3)) b=a=0;
2474 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=0;
2477 if (abs(a - b) <= abs(b - c)) {
2485 /* update predictor */
2486 *dc_val_ptr = &dc_val[0];
2491 /** Get predicted DC value
2492 * prediction dir: left=0, top=1
2493 * @param s MpegEncContext
2494 * @param[in] n block index in the current MB
2495 * @param dc_val_ptr Pointer to DC predictor
2496 * @param dir_ptr Prediction direction for use in AC prediction
2498 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2499 int a_avail, int c_avail,
2500 int16_t **dc_val_ptr, int *dir_ptr)
2502 int a, b, c, wrap, pred, scale;
2504 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2507 /* find prediction - wmv3_dc_scale always used here in fact */
2508 if (n < 4) scale = s->y_dc_scale;
2509 else scale = s->c_dc_scale;
2511 wrap = s->block_wrap[n];
2512 dc_val= s->dc_val[0] + s->block_index[n];
2518 b = dc_val[ - 1 - wrap];
2519 a = dc_val[ - wrap];
2520 /* scale predictors if needed */
2521 q1 = s->current_picture.qscale_table[mb_pos];
2522 if(c_avail && (n!= 1 && n!=3)) {
2523 q2 = s->current_picture.qscale_table[mb_pos - 1];
2525 c = (c * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2527 if(a_avail && (n!= 2 && n!=3)) {
2528 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2530 a = (a * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2532 if(a_avail && c_avail && (n!=3)) {
2535 if(n != 2) off -= s->mb_stride;
2536 q2 = s->current_picture.qscale_table[off];
2538 b = (b * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2541 if(a_avail && c_avail) {
2542 if(abs(a - b) <= abs(b - c)) {
2549 } else if(a_avail) {
2552 } else if(c_avail) {
2560 /* update predictor */
2561 *dc_val_ptr = &dc_val[0];
2567 * @defgroup std_mb VC1 Macroblock-level functions in Simple/Main Profiles
2568 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2572 static inline int vc1_coded_block_pred(MpegEncContext * s, int n, uint8_t **coded_block_ptr)
2574 int xy, wrap, pred, a, b, c;
2576 xy = s->block_index[n];
2577 wrap = s->b8_stride;
2582 a = s->coded_block[xy - 1 ];
2583 b = s->coded_block[xy - 1 - wrap];
2584 c = s->coded_block[xy - wrap];
2593 *coded_block_ptr = &s->coded_block[xy];
2599 * Decode one AC coefficient
2600 * @param v The VC1 context
2601 * @param last Last coefficient
2602 * @param skip How much zero coefficients to skip
2603 * @param value Decoded AC coefficient value
2606 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip, int *value, int codingset)
2608 GetBitContext *gb = &v->s.gb;
2609 int index, escape, run = 0, level = 0, lst = 0;
2611 index = get_vlc2(gb, vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2612 if (index != vc1_ac_sizes[codingset] - 1) {
2613 run = vc1_index_decode_table[codingset][index][0];
2614 level = vc1_index_decode_table[codingset][index][1];
2615 lst = index >= vc1_last_decode_table[codingset];
2619 escape = decode210(gb);
2621 index = get_vlc2(gb, vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2622 run = vc1_index_decode_table[codingset][index][0];
2623 level = vc1_index_decode_table[codingset][index][1];
2624 lst = index >= vc1_last_decode_table[codingset];
2627 level += vc1_last_delta_level_table[codingset][run];
2629 level += vc1_delta_level_table[codingset][run];
2632 run += vc1_last_delta_run_table[codingset][level] + 1;
2634 run += vc1_delta_run_table[codingset][level] + 1;
2640 lst = get_bits(gb, 1);
2641 if(v->s.esc3_level_length == 0) {
2642 if(v->pq < 8 || v->dquantfrm) { // table 59
2643 v->s.esc3_level_length = get_bits(gb, 3);
2644 if(!v->s.esc3_level_length)
2645 v->s.esc3_level_length = get_bits(gb, 2) + 8;
2647 v->s.esc3_level_length = get_prefix(gb, 1, 6) + 2;
2649 v->s.esc3_run_length = 3 + get_bits(gb, 2);
2651 run = get_bits(gb, v->s.esc3_run_length);
2652 sign = get_bits(gb, 1);
2653 level = get_bits(gb, v->s.esc3_level_length);
2664 /** Decode intra block in intra frames - should be faster than decode_intra_block
2665 * @param v VC1Context
2666 * @param block block to decode
2667 * @param coded are AC coeffs present or not
2668 * @param codingset set of VLC to decode data
2670 static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset)
2672 GetBitContext *gb = &v->s.gb;
2673 MpegEncContext *s = &v->s;
2674 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2677 int16_t *ac_val, *ac_val2;
2680 /* Get DC differential */
2682 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2684 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2687 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2692 if (dcdiff == 119 /* ESC index value */)
2694 /* TODO: Optimize */
2695 if (v->pq == 1) dcdiff = get_bits(gb, 10);
2696 else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2697 else dcdiff = get_bits(gb, 8);
2702 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2703 else if (v->pq == 2)
2704 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
2706 if (get_bits(gb, 1))
2711 dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2714 /* Store the quantized DC coeff, used for prediction */
2716 block[0] = dcdiff * s->y_dc_scale;
2718 block[0] = dcdiff * s->c_dc_scale;
2731 int last = 0, skip, value;
2732 const int8_t *zz_table;
2736 scale = v->pq * 2 + v->halfpq;
2740 zz_table = vc1_horizontal_zz;
2742 zz_table = vc1_vertical_zz;
2744 zz_table = vc1_normal_zz;
2746 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2748 if(dc_pred_dir) //left
2751 ac_val -= 16 * s->block_wrap[n];
2754 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2758 block[zz_table[i++]] = value;
2761 /* apply AC prediction if needed */
2763 if(dc_pred_dir) { //left
2764 for(k = 1; k < 8; k++)
2765 block[k << 3] += ac_val[k];
2767 for(k = 1; k < 8; k++)
2768 block[k] += ac_val[k + 8];
2771 /* save AC coeffs for further prediction */
2772 for(k = 1; k < 8; k++) {
2773 ac_val2[k] = block[k << 3];
2774 ac_val2[k + 8] = block[k];
2777 /* scale AC coeffs */
2778 for(k = 1; k < 64; k++)
2782 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2785 if(s->ac_pred) i = 63;
2791 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2794 scale = v->pq * 2 + v->halfpq;
2795 memset(ac_val2, 0, 16 * 2);
2796 if(dc_pred_dir) {//left
2799 memcpy(ac_val2, ac_val, 8 * 2);
2801 ac_val -= 16 * s->block_wrap[n];
2803 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2806 /* apply AC prediction if needed */
2808 if(dc_pred_dir) { //left
2809 for(k = 1; k < 8; k++) {
2810 block[k << 3] = ac_val[k] * scale;
2811 if(!v->pquantizer && block[k << 3])
2812 block[k << 3] += (block[k << 3] < 0) ? -v->pq : v->pq;
2815 for(k = 1; k < 8; k++) {
2816 block[k] = ac_val[k + 8] * scale;
2817 if(!v->pquantizer && block[k])
2818 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2824 s->block_last_index[n] = i;
2829 /** Decode intra block in intra frames - should be faster than decode_intra_block
2830 * @param v VC1Context
2831 * @param block block to decode
2832 * @param coded are AC coeffs present or not
2833 * @param codingset set of VLC to decode data
2835 static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset, int mquant)
2837 GetBitContext *gb = &v->s.gb;
2838 MpegEncContext *s = &v->s;
2839 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2842 int16_t *ac_val, *ac_val2;
2844 int a_avail = v->a_avail, c_avail = v->c_avail;
2845 int use_pred = s->ac_pred;
2848 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2850 /* Get DC differential */
2852 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2854 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2857 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2862 if (dcdiff == 119 /* ESC index value */)
2864 /* TODO: Optimize */
2865 if (mquant == 1) dcdiff = get_bits(gb, 10);
2866 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2867 else dcdiff = get_bits(gb, 8);
2872 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2873 else if (mquant == 2)
2874 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
2876 if (get_bits(gb, 1))
2881 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2884 /* Store the quantized DC coeff, used for prediction */
2886 block[0] = dcdiff * s->y_dc_scale;
2888 block[0] = dcdiff * s->c_dc_scale;
2897 /* check if AC is needed at all and adjust direction if needed */
2898 if(!a_avail) dc_pred_dir = 1;
2899 if(!c_avail) dc_pred_dir = 0;
2900 if(!a_avail && !c_avail) use_pred = 0;
2901 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2904 scale = mquant * 2 + v->halfpq;
2906 if(dc_pred_dir) //left
2909 ac_val -= 16 * s->block_wrap[n];
2911 q1 = s->current_picture.qscale_table[mb_pos];
2912 if(dc_pred_dir && c_avail && mb_pos) q2 = s->current_picture.qscale_table[mb_pos - 1];
2913 if(!dc_pred_dir && a_avail && mb_pos >= s->mb_stride) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2914 if(n && n<4) q2 = q1;
2917 int last = 0, skip, value;
2918 const int8_t *zz_table;
2923 zz_table = vc1_horizontal_zz;
2925 zz_table = vc1_vertical_zz;
2927 zz_table = vc1_normal_zz;
2930 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2934 block[zz_table[i++]] = value;
2937 /* apply AC prediction if needed */
2939 /* scale predictors if needed*/
2941 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2942 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2944 if(dc_pred_dir) { //left
2945 for(k = 1; k < 8; k++)
2946 block[k << 3] += (ac_val[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2948 for(k = 1; k < 8; k++)
2949 block[k] += (ac_val[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2952 if(dc_pred_dir) { //left
2953 for(k = 1; k < 8; k++)
2954 block[k << 3] += ac_val[k];
2956 for(k = 1; k < 8; k++)
2957 block[k] += ac_val[k + 8];
2961 /* save AC coeffs for further prediction */
2962 for(k = 1; k < 8; k++) {
2963 ac_val2[k] = block[k << 3];
2964 ac_val2[k + 8] = block[k];
2967 /* scale AC coeffs */
2968 for(k = 1; k < 64; k++)
2972 block[k] += (block[k] < 0) ? -mquant : mquant;
2975 if(use_pred) i = 63;
2976 } else { // no AC coeffs
2979 memset(ac_val2, 0, 16 * 2);
2980 if(dc_pred_dir) {//left
2982 memcpy(ac_val2, ac_val, 8 * 2);
2984 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2985 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2986 for(k = 1; k < 8; k++)
2987 ac_val2[k] = (ac_val2[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2992 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2994 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2995 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2996 for(k = 1; k < 8; k++)
2997 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3002 /* apply AC prediction if needed */
3004 if(dc_pred_dir) { //left
3005 for(k = 1; k < 8; k++) {
3006 block[k << 3] = ac_val2[k] * scale;
3007 if(!v->pquantizer && block[k << 3])
3008 block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant;
3011 for(k = 1; k < 8; k++) {
3012 block[k] = ac_val2[k + 8] * scale;
3013 if(!v->pquantizer && block[k])
3014 block[k] += (block[k] < 0) ? -mquant : mquant;
3020 s->block_last_index[n] = i;
3025 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
3026 * @param v VC1Context
3027 * @param block block to decode
3028 * @param coded are AC coeffs present or not
3029 * @param mquant block quantizer
3030 * @param codingset set of VLC to decode data
3032 static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int coded, int mquant, int codingset)
3034 GetBitContext *gb = &v->s.gb;
3035 MpegEncContext *s = &v->s;
3036 int dc_pred_dir = 0; /* Direction of the DC prediction used */
3039 int16_t *ac_val, *ac_val2;
3041 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3042 int a_avail = v->a_avail, c_avail = v->c_avail;
3043 int use_pred = s->ac_pred;
3047 /* XXX: Guard against dumb values of mquant */
3048 mquant = (mquant < 1) ? 0 : ( (mquant>31) ? 31 : mquant );
3050 /* Set DC scale - y and c use the same */
3051 s->y_dc_scale = s->y_dc_scale_table[mquant];
3052 s->c_dc_scale = s->c_dc_scale_table[mquant];
3054 /* Get DC differential */
3056 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3058 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3061 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
3066 if (dcdiff == 119 /* ESC index value */)
3068 /* TODO: Optimize */
3069 if (mquant == 1) dcdiff = get_bits(gb, 10);
3070 else if (mquant == 2) dcdiff = get_bits(gb, 9);
3071 else dcdiff = get_bits(gb, 8);
3076 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
3077 else if (mquant == 2)
3078 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
3080 if (get_bits(gb, 1))
3085 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
3088 /* Store the quantized DC coeff, used for prediction */
3091 block[0] = dcdiff * s->y_dc_scale;
3093 block[0] = dcdiff * s->c_dc_scale;
3102 /* check if AC is needed at all and adjust direction if needed */
3103 if(!a_avail) dc_pred_dir = 1;
3104 if(!c_avail) dc_pred_dir = 0;
3105 if(!a_avail && !c_avail) use_pred = 0;
3106 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3109 scale = mquant * 2 + v->halfpq;
3111 if(dc_pred_dir) //left
3114 ac_val -= 16 * s->block_wrap[n];
3116 q1 = s->current_picture.qscale_table[mb_pos];
3117 if(dc_pred_dir && c_avail && mb_pos) q2 = s->current_picture.qscale_table[mb_pos - 1];
3118 if(!dc_pred_dir && a_avail && mb_pos >= s->mb_stride) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
3119 if(n && n<4) q2 = q1;
3122 int last = 0, skip, value;
3123 const int8_t *zz_table;
3126 zz_table = vc1_simple_progressive_8x8_zz;
3129 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3133 block[zz_table[i++]] = value;
3136 /* apply AC prediction if needed */
3138 /* scale predictors if needed*/
3140 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3141 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3143 if(dc_pred_dir) { //left
3144 for(k = 1; k < 8; k++)
3145 block[k << 3] += (ac_val[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3147 for(k = 1; k < 8; k++)
3148 block[k] += (ac_val[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3151 if(dc_pred_dir) { //left
3152 for(k = 1; k < 8; k++)
3153 block[k << 3] += ac_val[k];
3155 for(k = 1; k < 8; k++)
3156 block[k] += ac_val[k + 8];
3160 /* save AC coeffs for further prediction */
3161 for(k = 1; k < 8; k++) {
3162 ac_val2[k] = block[k << 3];
3163 ac_val2[k + 8] = block[k];
3166 /* scale AC coeffs */
3167 for(k = 1; k < 64; k++)
3171 block[k] += (block[k] < 0) ? -mquant : mquant;
3174 if(use_pred) i = 63;
3175 } else { // no AC coeffs
3178 memset(ac_val2, 0, 16 * 2);
3179 if(dc_pred_dir) {//left
3181 memcpy(ac_val2, ac_val, 8 * 2);
3183 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3184 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3185 for(k = 1; k < 8; k++)
3186 ac_val2[k] = (ac_val2[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3191 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3193 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3194 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3195 for(k = 1; k < 8; k++)
3196 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3201 /* apply AC prediction if needed */
3203 if(dc_pred_dir) { //left
3204 for(k = 1; k < 8; k++) {
3205 block[k << 3] = ac_val2[k] * scale;
3206 if(!v->pquantizer && block[k << 3])
3207 block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant;
3210 for(k = 1; k < 8; k++) {
3211 block[k] = ac_val2[k + 8] * scale;
3212 if(!v->pquantizer && block[k])
3213 block[k] += (block[k] < 0) ? -mquant : mquant;
3219 s->block_last_index[n] = i;
3226 static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n, int mquant, int ttmb, int first_block)
3228 MpegEncContext *s = &v->s;
3229 GetBitContext *gb = &s->gb;
3232 int scale, off, idx, last, skip, value;
3233 int ttblk = ttmb & 7;
3236 ttblk = ttblk_to_tt[v->tt_index][get_vlc2(gb, vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
3238 if(ttblk == TT_4X4) {
3239 subblkpat = ~(get_vlc2(gb, vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3241 if((ttblk != TT_8X8 && ttblk != TT_4X4) && (v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))) {
3242 subblkpat = decode012(gb);
3243 if(subblkpat) subblkpat ^= 3; //swap decoded pattern bits
3244 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) ttblk = TT_8X4;
3245 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) ttblk = TT_4X8;
3247 scale = 2 * mquant + v->halfpq;
3249 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3250 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3251 subblkpat = 2 - (ttblk == TT_8X4_TOP);
3254 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3255 subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3263 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3267 idx = vc1_simple_progressive_8x8_zz[i++];
3268 block[idx] = value * scale;
3270 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3272 s->dsp.vc1_inv_trans_8x8(block);
3275 for(j = 0; j < 4; j++) {
3276 last = subblkpat & (1 << (3 - j));
3278 off = (j & 1) * 4 + (j & 2) * 16;
3280 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3284 idx = vc1_simple_progressive_4x4_zz[i++];
3285 block[idx + off] = value * scale;
3287 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3289 if(!(subblkpat & (1 << (3 - j))))
3290 s->dsp.vc1_inv_trans_4x4(block, j);
3294 for(j = 0; j < 2; j++) {
3295 last = subblkpat & (1 << (1 - j));
3299 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3303 if(v->profile < PROFILE_ADVANCED)
3304 idx = vc1_simple_progressive_8x4_zz[i++];
3306 idx = vc1_adv_progressive_8x4_zz[i++];
3307 block[idx + off] = value * scale;
3309 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3311 if(!(subblkpat & (1 << (1 - j))))
3312 s->dsp.vc1_inv_trans_8x4(block, j);
3316 for(j = 0; j < 2; j++) {
3317 last = subblkpat & (1 << (1 - j));
3321 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3325 if(v->profile < PROFILE_ADVANCED)
3326 idx = vc1_simple_progressive_4x8_zz[i++];
3328 idx = vc1_adv_progressive_4x8_zz[i++];
3329 block[idx + off] = value * scale;
3331 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3333 if(!(subblkpat & (1 << (1 - j))))
3334 s->dsp.vc1_inv_trans_4x8(block, j);
3342 /** Decode one P-frame MB (in Simple/Main profile)
3344 static int vc1_decode_p_mb(VC1Context *v)
3346 MpegEncContext *s = &v->s;
3347 GetBitContext *gb = &s->gb;
3349 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3350 int cbp; /* cbp decoding stuff */
3351 int mqdiff, mquant; /* MB quantization */
3352 int ttmb = v->ttfrm; /* MB Transform type */
3355 static const int size_table[6] = { 0, 2, 3, 4, 5, 8 },
3356 offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3357 int mb_has_coeffs = 1; /* last_flag */
3358 int dmv_x, dmv_y; /* Differential MV components */
3359 int index, index1; /* LUT indices */
3360 int val, sign; /* temp values */
3361 int first_block = 1;
3363 int skipped, fourmv;
3365 mquant = v->pq; /* Loosy initialization */
3367 if (v->mv_type_is_raw)
3368 fourmv = get_bits1(gb);
3370 fourmv = v->mv_type_mb_plane[mb_pos];
3372 skipped = get_bits1(gb);
3374 skipped = v->s.mbskip_table[mb_pos];
3376 s->dsp.clear_blocks(s->block[0]);
3378 if (!fourmv) /* 1MV mode */
3382 GET_MVDATA(dmv_x, dmv_y);
3385 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3386 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3388 s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
3389 vc1_pred_mv(s, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
3391 /* FIXME Set DC val for inter block ? */
3392 if (s->mb_intra && !mb_has_coeffs)
3395 s->ac_pred = get_bits(gb, 1);
3398 else if (mb_has_coeffs)
3400 if (s->mb_intra) s->ac_pred = get_bits(gb, 1);
3401 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3409 s->current_picture.qscale_table[mb_pos] = mquant;
3411 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3412 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table,
3413 VC1_TTMB_VLC_BITS, 2);
3414 if(!s->mb_intra) vc1_mc_1mv(v, 0);
3418 s->dc_val[0][s->block_index[i]] = 0;
3420 val = ((cbp >> (5 - i)) & 1);
3421 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3422 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3424 /* check if prediction blocks A and C are available */
3425 v->a_avail = v->c_avail = 0;
3426 if(i == 2 || i == 3 || !s->first_slice_line)
3427 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3428 if(i == 1 || i == 3 || s->mb_x)
3429 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3431 vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
3432 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3433 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3434 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3435 for(j = 0; j < 64; j++) s->block[i][j] += 128;
3436 s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3437 if(v->pq >= 9 && v->overlap) {
3439 s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3441 s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3444 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
3445 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3447 if((i<4) || !(s->flags & CODEC_FLAG_GRAY))
3448 s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3455 for(i = 0; i < 6; i++) {
3456 v->mb_type[0][s->block_index[i]] = 0;
3457 s->dc_val[0][s->block_index[i]] = 0;
3459 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3460 s->current_picture.qscale_table[mb_pos] = 0;
3461 vc1_pred_mv(s, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
3468 if (!skipped /* unskipped MB */)
3470 int intra_count = 0, coded_inter = 0;
3471 int is_intra[6], is_coded[6];
3473 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3476 val = ((cbp >> (5 - i)) & 1);
3477 s->dc_val[0][s->block_index[i]] = 0;
3484 GET_MVDATA(dmv_x, dmv_y);
3486 vc1_pred_mv(s, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]);
3487 if(!s->mb_intra) vc1_mc_4mv_luma(v, i);
3488 intra_count += s->mb_intra;
3489 is_intra[i] = s->mb_intra;
3490 is_coded[i] = mb_has_coeffs;
3493 is_intra[i] = (intra_count >= 3);
3496 if(i == 4) vc1_mc_4mv_chroma(v);
3497 v->mb_type[0][s->block_index[i]] = is_intra[i];
3498 if(!coded_inter) coded_inter = !is_intra[i] & is_coded[i];
3500 // if there are no coded blocks then don't do anything more
3501 if(!intra_count && !coded_inter) return 0;
3504 s->current_picture.qscale_table[mb_pos] = mquant;
3505 /* test if block is intra and has pred */
3510 if(((!s->first_slice_line || (i==2 || i==3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3511 || ((s->mb_x || (i==1 || i==3)) && v->mb_type[0][s->block_index[i] - 1])) {
3516 if(intrapred)s->ac_pred = get_bits(gb, 1);
3517 else s->ac_pred = 0;
3519 if (!v->ttmbf && coded_inter)
3520 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3524 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3525 s->mb_intra = is_intra[i];
3527 /* check if prediction blocks A and C are available */
3528 v->a_avail = v->c_avail = 0;
3529 if(i == 2 || i == 3 || !s->first_slice_line)
3530 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3531 if(i == 1 || i == 3 || s->mb_x)