2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2006 Konstantin Shishkov
4 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * VC-1 and WMV3 decoder
32 #include "mpegvideo.h"
34 #include "vc1acdata.h"
39 extern const uint32_t ff_table0_dc_lum[120][2], ff_table1_dc_lum[120][2];
40 extern const uint32_t ff_table0_dc_chroma[120][2], ff_table1_dc_chroma[120][2];
41 extern VLC ff_msmp4_dc_luma_vlc[2], ff_msmp4_dc_chroma_vlc[2];
42 #define MB_INTRA_VLC_BITS 9
43 extern VLC ff_msmp4_mb_i_vlc;
44 extern const uint16_t ff_msmp4_mb_i_table[64][2];
47 static const uint16_t table_mb_intra[64][2];
50 /** Available Profiles */
55 PROFILE_COMPLEX, ///< TODO: WMV9 specific
60 /** Sequence quantizer mode */
63 QUANT_FRAME_IMPLICIT, ///< Implicitly specified at frame level
64 QUANT_FRAME_EXPLICIT, ///< Explicitly specified at frame level
65 QUANT_NON_UNIFORM, ///< Non-uniform quant used for all frames
66 QUANT_UNIFORM ///< Uniform quant used for all frames
70 /** Where quant can be changed */
74 DQPROFILE_DOUBLE_EDGES,
75 DQPROFILE_SINGLE_EDGE,
80 /** @name Where quant can be changed
91 /** Which pair of edges is quantized with ALTPQUANT */
94 DQDOUBLE_BEDGE_TOPLEFT,
95 DQDOUBLE_BEDGE_TOPRIGHT,
96 DQDOUBLE_BEDGE_BOTTOMRIGHT,
97 DQDOUBLE_BEDGE_BOTTOMLEFT
101 /** MV modes for P frames */
104 MV_PMODE_1MV_HPEL_BILIN,
108 MV_PMODE_INTENSITY_COMP
112 /** @name MV types for B frames */
117 BMV_TYPE_INTERPOLATED
121 /** @name Block types for P/B frames */
123 enum TransformTypes {
127 TT_8X4, //Both halves
130 TT_4X8, //Both halves
135 /** Table for conversion between TTBLK and TTMB */
136 static const int ttblk_to_tt[3][8] = {
137 { TT_8X4, TT_4X8, TT_8X8, TT_4X4, TT_8X4_TOP, TT_8X4_BOTTOM, TT_4X8_RIGHT, TT_4X8_LEFT },
138 { TT_8X8, TT_4X8_RIGHT, TT_4X8_LEFT, TT_4X4, TT_8X4, TT_4X8, TT_8X4_BOTTOM, TT_8X4_TOP },
139 { TT_8X8, TT_4X8, TT_4X4, TT_8X4_BOTTOM, TT_4X8_RIGHT, TT_4X8_LEFT, TT_8X4, TT_8X4_TOP }
142 static const int ttfrm_to_tt[4] = { TT_8X8, TT_8X4, TT_4X8, TT_4X4 };
144 /** MV P mode - the 5th element is only used for mode 1 */
145 static const uint8_t mv_pmode_table[2][5] = {
146 { MV_PMODE_1MV_HPEL_BILIN, MV_PMODE_1MV, MV_PMODE_1MV_HPEL, MV_PMODE_INTENSITY_COMP, MV_PMODE_MIXED_MV },
147 { MV_PMODE_1MV, MV_PMODE_MIXED_MV, MV_PMODE_1MV_HPEL, MV_PMODE_INTENSITY_COMP, MV_PMODE_1MV_HPEL_BILIN }
149 static const uint8_t mv_pmode_table2[2][4] = {
150 { MV_PMODE_1MV_HPEL_BILIN, MV_PMODE_1MV, MV_PMODE_1MV_HPEL, MV_PMODE_MIXED_MV },
151 { MV_PMODE_1MV, MV_PMODE_MIXED_MV, MV_PMODE_1MV_HPEL, MV_PMODE_1MV_HPEL_BILIN }
154 /** One more frame type */
157 static const int fps_nr[5] = { 24, 25, 30, 50, 60 },
158 fps_dr[2] = { 1000, 1001 };
159 static const uint8_t pquant_table[3][32] = {
160 { /* Implicit quantizer */
161 0, 1, 2, 3, 4, 5, 6, 7, 8, 6, 7, 8, 9, 10, 11, 12,
162 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 29, 31
164 { /* Explicit quantizer, pquantizer uniform */
165 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
166 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
168 { /* Explicit quantizer, pquantizer non-uniform */
169 0, 1, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
170 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 29, 31
174 /** @name VC-1 VLC tables and defines
175 * @todo TODO move this into the context
178 #define VC1_BFRACTION_VLC_BITS 7
179 static VLC vc1_bfraction_vlc;
180 #define VC1_IMODE_VLC_BITS 4
181 static VLC vc1_imode_vlc;
182 #define VC1_NORM2_VLC_BITS 3
183 static VLC vc1_norm2_vlc;
184 #define VC1_NORM6_VLC_BITS 9
185 static VLC vc1_norm6_vlc;
186 /* Could be optimized, one table only needs 8 bits */
187 #define VC1_TTMB_VLC_BITS 9 //12
188 static VLC vc1_ttmb_vlc[3];
189 #define VC1_MV_DIFF_VLC_BITS 9 //15
190 static VLC vc1_mv_diff_vlc[4];
191 #define VC1_CBPCY_P_VLC_BITS 9 //14
192 static VLC vc1_cbpcy_p_vlc[4];
193 #define VC1_4MV_BLOCK_PATTERN_VLC_BITS 6
194 static VLC vc1_4mv_block_pattern_vlc[4];
195 #define VC1_TTBLK_VLC_BITS 5
196 static VLC vc1_ttblk_vlc[3];
197 #define VC1_SUBBLKPAT_VLC_BITS 6
198 static VLC vc1_subblkpat_vlc[3];
200 static VLC vc1_ac_coeff_table[8];
204 CS_HIGH_MOT_INTRA = 0,
214 /** @name Overlap conditions for Advanced Profile */
225 * @fixme Change size wherever another size is more efficient
226 * Many members are only used for Advanced Profile
228 typedef struct VC1Context{
233 /** Simple/Main Profile sequence header */
235 int res_sm; ///< reserved, 2b
236 int res_x8; ///< reserved
237 int multires; ///< frame-level RESPIC syntax element present
238 int res_fasttx; ///< reserved, always 1
239 int res_transtab; ///< reserved, always 0
240 int rangered; ///< RANGEREDFRM (range reduction) syntax element present
242 int res_rtm_flag; ///< reserved, set to 1
243 int reserved; ///< reserved
246 /** Advanced Profile */
248 int level; ///< 3bits, for Advanced/Simple Profile, provided by TS layer
249 int chromaformat; ///< 2bits, 2=4:2:0, only defined
250 int postprocflag; ///< Per-frame processing suggestion flag present
251 int broadcast; ///< TFF/RFF present
252 int interlace; ///< Progressive/interlaced (RPTFTM syntax element)
253 int tfcntrflag; ///< TFCNTR present
254 int panscanflag; ///< NUMPANSCANWIN, TOPLEFT{X,Y}, BOTRIGHT{X,Y} present
255 int extended_dmv; ///< Additional extended dmv range at P/B frame-level
256 int color_prim; ///< 8bits, chroma coordinates of the color primaries
257 int transfer_char; ///< 8bits, Opto-electronic transfer characteristics
258 int matrix_coef; ///< 8bits, Color primaries->YCbCr transform matrix
259 int hrd_param_flag; ///< Presence of Hypothetical Reference
260 ///< Decoder parameters
261 int psf; ///< Progressive Segmented Frame
264 /** Sequence header data for all Profiles
265 * TODO: choose between ints, uint8_ts and monobit flags
268 int profile; ///< 2bits, Profile
269 int frmrtq_postproc; ///< 3bits,
270 int bitrtq_postproc; ///< 5bits, quantized framerate-based postprocessing strength
271 int fastuvmc; ///< Rounding of qpel vector to hpel ? (not in Simple)
272 int extended_mv; ///< Ext MV in P/B (not in Simple)
273 int dquant; ///< How qscale varies with MBs, 2bits (not in Simple)
274 int vstransform; ///< variable-size [48]x[48] transform type + info
275 int overlap; ///< overlapped transforms in use
276 int quantizer_mode; ///< 2bits, quantizer mode used for sequence, see QUANT_*
277 int finterpflag; ///< INTERPFRM present
280 /** Frame decoding info for all profiles */
282 uint8_t mv_mode; ///< MV coding monde
283 uint8_t mv_mode2; ///< Secondary MV coding mode (B frames)
284 int k_x; ///< Number of bits for MVs (depends on MV range)
285 int k_y; ///< Number of bits for MVs (depends on MV range)
286 int range_x, range_y; ///< MV range
287 uint8_t pq, altpq; ///< Current/alternate frame quantizer scale
288 /** pquant parameters */
295 /** AC coding set indexes
296 * @see 8.1.1.10, p(1)10
299 int c_ac_table_index; ///< Chroma index from ACFRM element
300 int y_ac_table_index; ///< Luma index from AC2FRM element
302 int ttfrm; ///< Transform type info present at frame level
303 uint8_t ttmbf; ///< Transform type flag
304 uint8_t ttblk4x4; ///< Value of ttblk which indicates a 4x4 transform
305 int codingset; ///< index of current table set from 11.8 to use for luma block decoding
306 int codingset2; ///< index of current table set from 11.8 to use for chroma block decoding
307 int pqindex; ///< raw pqindex used in coding set selection
308 int a_avail, c_avail;
309 uint8_t *mb_type_base, *mb_type[3];
312 /** Luma compensation parameters */
317 int16_t bfraction; ///< Relative position % anchors=> how to scale MVs
318 uint8_t halfpq; ///< Uniform quant over image and qp+.5
319 uint8_t respic; ///< Frame-level flag for resized images
320 int buffer_fullness; ///< HRD info
322 * -# 0 -> [-64n 63.f] x [-32, 31.f]
323 * -# 1 -> [-128, 127.f] x [-64, 63.f]
324 * -# 2 -> [-512, 511.f] x [-128, 127.f]
325 * -# 3 -> [-1024, 1023.f] x [-256, 255.f]
328 uint8_t pquantizer; ///< Uniform (over sequence) quantizer in use
329 VLC *cbpcy_vlc; ///< CBPCY VLC table
330 int tt_index; ///< Index for Transform Type tables
331 uint8_t* mv_type_mb_plane; ///< bitplane for mv_type == (4MV)
332 uint8_t* direct_mb_plane; ///< bitplane for "direct" MBs
333 int mv_type_is_raw; ///< mv type mb plane is not coded
334 int dmb_is_raw; ///< direct mb plane is raw
335 int skip_is_raw; ///< skip mb plane is not coded
336 uint8_t luty[256], lutuv[256]; // lookup tables used for intensity compensation
337 int use_ic; ///< use intensity compensation in B-frames
338 int rnd; ///< rounding control
340 /** Frame decoding info for S/M profiles only */
342 uint8_t rangeredfrm; ///< out_sample = CLIP((in_sample-128)*2+128)
346 /** Frame decoding info for Advanced profile */
348 uint8_t fcm; ///< 0->Progressive, 2->Frame-Interlace, 3->Field-Interlace
349 uint8_t numpanscanwin;
351 uint8_t rptfrm, tff, rff;
354 uint16_t bottomrightx;
355 uint16_t bottomrighty;
358 int hrd_num_leaky_buckets;
359 uint8_t bit_rate_exponent;
360 uint8_t buffer_size_exponent;
361 uint8_t* acpred_plane; ///< AC prediction flags bitplane
363 uint8_t* over_flags_plane; ///< Overflags bitplane
366 uint16_t *hrd_rate, *hrd_buffer;
367 uint8_t *hrd_fullness;
368 uint8_t range_mapy_flag;
369 uint8_t range_mapuv_flag;
379 * Get unary code of limited length
380 * @fixme FIXME Slow and ugly
381 * @param gb GetBitContext
382 * @param[in] stop The bitstop value (unary code of 1's or 0's)
383 * @param[in] len Maximum length
384 * @return Unary length/index
386 static int get_prefix(GetBitContext *gb, int stop, int len)
391 for(i = 0; i < len && get_bits1(gb) != stop; i++);
393 /* int i = 0, tmp = !stop;
395 while (i != len && tmp != stop)
397 tmp = get_bits(gb, 1);
400 if (i == len && tmp != stop) return len+1;
407 UPDATE_CACHE(re, gb);
408 buf=GET_CACHE(re, gb); //Still not sure
409 if (stop) buf = ~buf;
411 log= av_log2(-buf); //FIXME: -?
413 LAST_SKIP_BITS(re, gb, log+1);
414 CLOSE_READER(re, gb);
418 LAST_SKIP_BITS(re, gb, limit);
419 CLOSE_READER(re, gb);
424 static inline int decode210(GetBitContext *gb){
430 return 2 - get_bits1(gb);
434 * Init VC-1 specific tables and VC1Context members
435 * @param v The VC1Context to initialize
438 static int vc1_init_common(VC1Context *v)
443 v->hrd_rate = v->hrd_buffer = NULL;
449 init_vlc(&vc1_bfraction_vlc, VC1_BFRACTION_VLC_BITS, 23,
450 vc1_bfraction_bits, 1, 1,
451 vc1_bfraction_codes, 1, 1, 1);
452 init_vlc(&vc1_norm2_vlc, VC1_NORM2_VLC_BITS, 4,
453 vc1_norm2_bits, 1, 1,
454 vc1_norm2_codes, 1, 1, 1);
455 init_vlc(&vc1_norm6_vlc, VC1_NORM6_VLC_BITS, 64,
456 vc1_norm6_bits, 1, 1,
457 vc1_norm6_codes, 2, 2, 1);
458 init_vlc(&vc1_imode_vlc, VC1_IMODE_VLC_BITS, 7,
459 vc1_imode_bits, 1, 1,
460 vc1_imode_codes, 1, 1, 1);
463 init_vlc(&vc1_ttmb_vlc[i], VC1_TTMB_VLC_BITS, 16,
464 vc1_ttmb_bits[i], 1, 1,
465 vc1_ttmb_codes[i], 2, 2, 1);
466 init_vlc(&vc1_ttblk_vlc[i], VC1_TTBLK_VLC_BITS, 8,
467 vc1_ttblk_bits[i], 1, 1,
468 vc1_ttblk_codes[i], 1, 1, 1);
469 init_vlc(&vc1_subblkpat_vlc[i], VC1_SUBBLKPAT_VLC_BITS, 15,
470 vc1_subblkpat_bits[i], 1, 1,
471 vc1_subblkpat_codes[i], 1, 1, 1);
475 init_vlc(&vc1_4mv_block_pattern_vlc[i], VC1_4MV_BLOCK_PATTERN_VLC_BITS, 16,
476 vc1_4mv_block_pattern_bits[i], 1, 1,
477 vc1_4mv_block_pattern_codes[i], 1, 1, 1);
478 init_vlc(&vc1_cbpcy_p_vlc[i], VC1_CBPCY_P_VLC_BITS, 64,
479 vc1_cbpcy_p_bits[i], 1, 1,
480 vc1_cbpcy_p_codes[i], 2, 2, 1);
481 init_vlc(&vc1_mv_diff_vlc[i], VC1_MV_DIFF_VLC_BITS, 73,
482 vc1_mv_diff_bits[i], 1, 1,
483 vc1_mv_diff_codes[i], 2, 2, 1);
486 init_vlc(&vc1_ac_coeff_table[i], AC_VLC_BITS, vc1_ac_sizes[i],
487 &vc1_ac_tables[i][0][1], 8, 4,
488 &vc1_ac_tables[i][0][0], 8, 4, 1);
489 init_vlc(&ff_msmp4_mb_i_vlc, MB_INTRA_VLC_BITS, 64,
490 &ff_msmp4_mb_i_table[0][1], 4, 2,
491 &ff_msmp4_mb_i_table[0][0], 4, 2, 1);
496 v->mvrange = 0; /* 7.1.1.18, p80 */
501 /***********************************************************************/
503 * @defgroup bitplane VC9 Bitplane decoding
508 /** @addtogroup bitplane
521 /** @} */ //imode defines
523 /** Decode rows by checking if they are skipped
524 * @param plane Buffer to store decoded bits
525 * @param[in] width Width of this buffer
526 * @param[in] height Height of this buffer
527 * @param[in] stride of this buffer
529 static void decode_rowskip(uint8_t* plane, int width, int height, int stride, GetBitContext *gb){
532 for (y=0; y<height; y++){
533 if (!get_bits(gb, 1)) //rowskip
534 memset(plane, 0, width);
536 for (x=0; x<width; x++)
537 plane[x] = get_bits(gb, 1);
542 /** Decode columns by checking if they are skipped
543 * @param plane Buffer to store decoded bits
544 * @param[in] width Width of this buffer
545 * @param[in] height Height of this buffer
546 * @param[in] stride of this buffer
547 * @fixme FIXME: Optimize
549 static void decode_colskip(uint8_t* plane, int width, int height, int stride, GetBitContext *gb){
552 for (x=0; x<width; x++){
553 if (!get_bits(gb, 1)) //colskip
554 for (y=0; y<height; y++)
557 for (y=0; y<height; y++)
558 plane[y*stride] = get_bits(gb, 1);
563 /** Decode a bitplane's bits
564 * @param bp Bitplane where to store the decode bits
565 * @param v VC-1 context for bit reading and logging
567 * @fixme FIXME: Optimize
569 static int bitplane_decoding(uint8_t* data, int *raw_flag, VC1Context *v)
571 GetBitContext *gb = &v->s.gb;
573 int imode, x, y, code, offset;
574 uint8_t invert, *planep = data;
575 int width, height, stride;
577 width = v->s.mb_width;
578 height = v->s.mb_height;
579 stride = v->s.mb_stride;
580 invert = get_bits(gb, 1);
581 imode = get_vlc2(gb, vc1_imode_vlc.table, VC1_IMODE_VLC_BITS, 1);
587 //Data is actually read in the MB layer (same for all tests == "raw")
588 *raw_flag = 1; //invert ignored
592 if ((height * width) & 1)
594 *planep++ = get_bits(gb, 1);
598 // decode bitplane as one long line
599 for (y = offset; y < height * width; y += 2) {
600 code = get_vlc2(gb, vc1_norm2_vlc.table, VC1_NORM2_VLC_BITS, 1);
601 *planep++ = code & 1;
603 if(offset == width) {
605 planep += stride - width;
607 *planep++ = code >> 1;
609 if(offset == width) {
611 planep += stride - width;
617 if(!(height % 3) && (width % 3)) { // use 2x3 decoding
618 for(y = 0; y < height; y+= 3) {
619 for(x = width & 1; x < width; x += 2) {
620 code = get_vlc2(gb, vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
622 av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
625 planep[x + 0] = (code >> 0) & 1;
626 planep[x + 1] = (code >> 1) & 1;
627 planep[x + 0 + stride] = (code >> 2) & 1;
628 planep[x + 1 + stride] = (code >> 3) & 1;
629 planep[x + 0 + stride * 2] = (code >> 4) & 1;
630 planep[x + 1 + stride * 2] = (code >> 5) & 1;
632 planep += stride * 3;
634 if(width & 1) decode_colskip(data, 1, height, stride, &v->s.gb);
636 planep += (height & 1) * stride;
637 for(y = height & 1; y < height; y += 2) {
638 for(x = width % 3; x < width; x += 3) {
639 code = get_vlc2(gb, vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
641 av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
644 planep[x + 0] = (code >> 0) & 1;
645 planep[x + 1] = (code >> 1) & 1;
646 planep[x + 2] = (code >> 2) & 1;
647 planep[x + 0 + stride] = (code >> 3) & 1;
648 planep[x + 1 + stride] = (code >> 4) & 1;
649 planep[x + 2 + stride] = (code >> 5) & 1;
651 planep += stride * 2;
654 if(x) decode_colskip(data , x, height , stride, &v->s.gb);
655 if(height & 1) decode_rowskip(data+x, width - x, 1, stride, &v->s.gb);
659 decode_rowskip(data, width, height, stride, &v->s.gb);
662 decode_colskip(data, width, height, stride, &v->s.gb);
667 /* Applying diff operator */
668 if (imode == IMODE_DIFF2 || imode == IMODE_DIFF6)
672 for (x=1; x<width; x++)
673 planep[x] ^= planep[x-1];
674 for (y=1; y<height; y++)
677 planep[0] ^= planep[-stride];
678 for (x=1; x<width; x++)
680 if (planep[x-1] != planep[x-stride]) planep[x] ^= invert;
681 else planep[x] ^= planep[x-1];
688 for (x=0; x<stride*height; x++) planep[x] = !planep[x]; //FIXME stride
690 return (imode<<1) + invert;
693 /** @} */ //Bitplane group
695 /***********************************************************************/
696 /** VOP Dquant decoding
697 * @param v VC-1 Context
699 static int vop_dquant_decoding(VC1Context *v)
701 GetBitContext *gb = &v->s.gb;
707 pqdiff = get_bits(gb, 3);
708 if (pqdiff == 7) v->altpq = get_bits(gb, 5);
709 else v->altpq = v->pq + pqdiff + 1;
713 v->dquantfrm = get_bits(gb, 1);
716 v->dqprofile = get_bits(gb, 2);
717 switch (v->dqprofile)
719 case DQPROFILE_SINGLE_EDGE:
720 case DQPROFILE_DOUBLE_EDGES:
721 v->dqsbedge = get_bits(gb, 2);
723 case DQPROFILE_ALL_MBS:
724 v->dqbilevel = get_bits(gb, 1);
725 default: break; //Forbidden ?
727 if (v->dqbilevel || v->dqprofile != DQPROFILE_ALL_MBS)
729 pqdiff = get_bits(gb, 3);
730 if (pqdiff == 7) v->altpq = get_bits(gb, 5);
731 else v->altpq = v->pq + pqdiff + 1;
738 /** Put block onto picture
740 static void vc1_put_block(VC1Context *v, DCTELEM block[6][64])
744 DSPContext *dsp = &v->s.dsp;
748 for(k = 0; k < 6; k++)
749 for(j = 0; j < 8; j++)
750 for(i = 0; i < 8; i++)
751 block[k][i + j*8] = ((block[k][i + j*8] - 128) << 1) + 128;
754 ys = v->s.current_picture.linesize[0];
755 us = v->s.current_picture.linesize[1];
756 vs = v->s.current_picture.linesize[2];
759 dsp->put_pixels_clamped(block[0], Y, ys);
760 dsp->put_pixels_clamped(block[1], Y + 8, ys);
762 dsp->put_pixels_clamped(block[2], Y, ys);
763 dsp->put_pixels_clamped(block[3], Y + 8, ys);
765 if(!(v->s.flags & CODEC_FLAG_GRAY)) {
766 dsp->put_pixels_clamped(block[4], v->s.dest[1], us);
767 dsp->put_pixels_clamped(block[5], v->s.dest[2], vs);
771 /** Do motion compensation over 1 macroblock
772 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
774 static void vc1_mc_1mv(VC1Context *v, int dir)
776 MpegEncContext *s = &v->s;
777 DSPContext *dsp = &v->s.dsp;
778 uint8_t *srcY, *srcU, *srcV;
779 int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
781 if(!v->s.last_picture.data[0])return;
783 mx = s->mv[dir][0][0];
784 my = s->mv[dir][0][1];
786 // store motion vectors for further use in B frames
787 if(s->pict_type == P_TYPE) {
788 s->current_picture.motion_val[1][s->block_index[0]][0] = mx;
789 s->current_picture.motion_val[1][s->block_index[0]][1] = my;
791 uvmx = (mx + ((mx & 3) == 3)) >> 1;
792 uvmy = (my + ((my & 3) == 3)) >> 1;
794 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
795 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
798 srcY = s->last_picture.data[0];
799 srcU = s->last_picture.data[1];
800 srcV = s->last_picture.data[2];
802 srcY = s->next_picture.data[0];
803 srcU = s->next_picture.data[1];
804 srcV = s->next_picture.data[2];
807 src_x = s->mb_x * 16 + (mx >> 2);
808 src_y = s->mb_y * 16 + (my >> 2);
809 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
810 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
812 src_x = clip( src_x, -16, s->mb_width * 16);
813 src_y = clip( src_y, -16, s->mb_height * 16);
814 uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8);
815 uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
817 srcY += src_y * s->linesize + src_x;
818 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
819 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
821 /* for grayscale we should not try to read from unknown area */
822 if(s->flags & CODEC_FLAG_GRAY) {
823 srcU = s->edge_emu_buffer + 18 * s->linesize;
824 srcV = s->edge_emu_buffer + 18 * s->linesize;
827 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
828 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel*3
829 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 16 - s->mspel*3){
830 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
832 srcY -= s->mspel * (1 + s->linesize);
833 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
834 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
835 srcY = s->edge_emu_buffer;
836 ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
837 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
838 ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
839 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
842 /* if we deal with range reduction we need to scale source blocks */
848 for(j = 0; j < 17 + s->mspel*2; j++) {
849 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
852 src = srcU; src2 = srcV;
853 for(j = 0; j < 9; j++) {
854 for(i = 0; i < 9; i++) {
855 src[i] = ((src[i] - 128) >> 1) + 128;
856 src2[i] = ((src2[i] - 128) >> 1) + 128;
858 src += s->uvlinesize;
859 src2 += s->uvlinesize;
862 /* if we deal with intensity compensation we need to scale source blocks */
863 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
868 for(j = 0; j < 17 + s->mspel*2; j++) {
869 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = v->luty[src[i]];
872 src = srcU; src2 = srcV;
873 for(j = 0; j < 9; j++) {
874 for(i = 0; i < 9; i++) {
875 src[i] = v->lutuv[src[i]];
876 src2[i] = v->lutuv[src2[i]];
878 src += s->uvlinesize;
879 src2 += s->uvlinesize;
882 srcY += s->mspel * (1 + s->linesize);
886 dxy = ((my & 3) << 2) | (mx & 3);
887 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
888 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
889 srcY += s->linesize * 8;
890 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
891 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
892 } else { // hpel mc - always used for luma
893 dxy = (my & 2) | ((mx & 2) >> 1);
896 dsp->put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
898 dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
901 if(s->flags & CODEC_FLAG_GRAY) return;
902 /* Chroma MC always uses qpel bilinear */
903 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
907 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
908 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
910 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
911 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
915 /** Do motion compensation for 4-MV macroblock - luminance block
917 static void vc1_mc_4mv_luma(VC1Context *v, int n)
919 MpegEncContext *s = &v->s;
920 DSPContext *dsp = &v->s.dsp;
922 int dxy, mx, my, src_x, src_y;
925 if(!v->s.last_picture.data[0])return;
928 srcY = s->last_picture.data[0];
930 off = s->linesize * 4 * (n&2) + (n&1) * 8;
932 src_x = s->mb_x * 16 + (n&1) * 8 + (mx >> 2);
933 src_y = s->mb_y * 16 + (n&2) * 4 + (my >> 2);
935 src_x = clip( src_x, -16, s->mb_width * 16);
936 src_y = clip( src_y, -16, s->mb_height * 16);
938 srcY += src_y * s->linesize + src_x;
940 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
941 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 8 - s->mspel*2
942 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 8 - s->mspel*2){
943 srcY -= s->mspel * (1 + s->linesize);
944 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 9+s->mspel*2, 9+s->mspel*2,
945 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
946 srcY = s->edge_emu_buffer;
947 /* if we deal with range reduction we need to scale source blocks */
953 for(j = 0; j < 9 + s->mspel*2; j++) {
954 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
958 /* if we deal with intensity compensation we need to scale source blocks */
959 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
964 for(j = 0; j < 9 + s->mspel*2; j++) {
965 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = v->luty[src[i]];
969 srcY += s->mspel * (1 + s->linesize);
973 dxy = ((my & 3) << 2) | (mx & 3);
974 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, v->rnd);
975 } else { // hpel mc - always used for luma
976 dxy = (my & 2) | ((mx & 2) >> 1);
978 dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
980 dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
984 static inline int median4(int a, int b, int c, int d)
987 if(c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
988 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
990 if(c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
991 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
996 /** Do motion compensation for 4-MV macroblock - both chroma blocks
998 static void vc1_mc_4mv_chroma(VC1Context *v)
1000 MpegEncContext *s = &v->s;
1001 DSPContext *dsp = &v->s.dsp;
1002 uint8_t *srcU, *srcV;
1003 int uvdxy, uvmx, uvmy, uvsrc_x, uvsrc_y;
1004 int i, idx, tx = 0, ty = 0;
1005 int mvx[4], mvy[4], intra[4];
1006 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
1008 if(!v->s.last_picture.data[0])return;
1009 if(s->flags & CODEC_FLAG_GRAY) return;
1011 for(i = 0; i < 4; i++) {
1012 mvx[i] = s->mv[0][i][0];
1013 mvy[i] = s->mv[0][i][1];
1014 intra[i] = v->mb_type[0][s->block_index[i]];
1017 /* calculate chroma MV vector from four luma MVs */
1018 idx = (intra[3] << 3) | (intra[2] << 2) | (intra[1] << 1) | intra[0];
1019 if(!idx) { // all blocks are inter
1020 tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
1021 ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
1022 } else if(count[idx] == 1) { // 3 inter blocks
1025 tx = mid_pred(mvx[1], mvx[2], mvx[3]);
1026 ty = mid_pred(mvy[1], mvy[2], mvy[3]);
1029 tx = mid_pred(mvx[0], mvx[2], mvx[3]);
1030 ty = mid_pred(mvy[0], mvy[2], mvy[3]);
1033 tx = mid_pred(mvx[0], mvx[1], mvx[3]);
1034 ty = mid_pred(mvy[0], mvy[1], mvy[3]);
1037 tx = mid_pred(mvx[0], mvx[1], mvx[2]);
1038 ty = mid_pred(mvy[0], mvy[1], mvy[2]);
1041 } else if(count[idx] == 2) {
1043 for(i=0; i<3;i++) if(!intra[i]) {t1 = i; break;}
1044 for(i= t1+1; i<4; i++)if(!intra[i]) {t2 = i; break;}
1045 tx = (mvx[t1] + mvx[t2]) / 2;
1046 ty = (mvy[t1] + mvy[t2]) / 2;
1048 return; //no need to do MC for inter blocks
1050 s->current_picture.motion_val[1][s->block_index[0]][0] = tx;
1051 s->current_picture.motion_val[1][s->block_index[0]][1] = ty;
1052 uvmx = (tx + ((tx&3) == 3)) >> 1;
1053 uvmy = (ty + ((ty&3) == 3)) >> 1;
1055 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
1056 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
1059 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1060 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1062 uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8);
1063 uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
1064 srcU = s->last_picture.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1065 srcV = s->last_picture.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1066 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1067 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
1068 || (unsigned)uvsrc_y > (s->v_edge_pos >> 1) - 9){
1069 ff_emulated_edge_mc(s->edge_emu_buffer , srcU, s->uvlinesize, 8+1, 8+1,
1070 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
1071 ff_emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize, 8+1, 8+1,
1072 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
1073 srcU = s->edge_emu_buffer;
1074 srcV = s->edge_emu_buffer + 16;
1076 /* if we deal with range reduction we need to scale source blocks */
1077 if(v->rangeredfrm) {
1079 uint8_t *src, *src2;
1081 src = srcU; src2 = srcV;
1082 for(j = 0; j < 9; j++) {
1083 for(i = 0; i < 9; i++) {
1084 src[i] = ((src[i] - 128) >> 1) + 128;
1085 src2[i] = ((src2[i] - 128) >> 1) + 128;
1087 src += s->uvlinesize;
1088 src2 += s->uvlinesize;
1091 /* if we deal with intensity compensation we need to scale source blocks */
1092 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1094 uint8_t *src, *src2;
1096 src = srcU; src2 = srcV;
1097 for(j = 0; j < 9; j++) {
1098 for(i = 0; i < 9; i++) {
1099 src[i] = v->lutuv[src[i]];
1100 src2[i] = v->lutuv[src2[i]];
1102 src += s->uvlinesize;
1103 src2 += s->uvlinesize;
1108 /* Chroma MC always uses qpel bilinear */
1109 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
1113 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
1114 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
1116 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
1117 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
1121 static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb);
1124 * Decode Simple/Main Profiles sequence header
1125 * @see Figure 7-8, p16-17
1126 * @param avctx Codec context
1127 * @param gb GetBit context initialized from Codec context extra_data
1130 static int decode_sequence_header(AVCodecContext *avctx, GetBitContext *gb)
1132 VC1Context *v = avctx->priv_data;
1134 av_log(avctx, AV_LOG_DEBUG, "Header: %0X\n", show_bits(gb, 32));
1135 v->profile = get_bits(gb, 2);
1136 if (v->profile == 2)
1138 av_log(avctx, AV_LOG_ERROR, "Profile value 2 is forbidden (and WMV3 Complex Profile is unsupported)\n");
1142 if (v->profile == PROFILE_ADVANCED)
1144 return decode_sequence_header_adv(v, gb);
1148 v->res_sm = get_bits(gb, 2); //reserved
1151 av_log(avctx, AV_LOG_ERROR,
1152 "Reserved RES_SM=%i is forbidden\n", v->res_sm);
1158 v->frmrtq_postproc = get_bits(gb, 3); //common
1159 // (bitrate-32kbps)/64kbps
1160 v->bitrtq_postproc = get_bits(gb, 5); //common
1161 v->s.loop_filter = get_bits(gb, 1); //common
1162 if(v->s.loop_filter == 1 && v->profile == PROFILE_SIMPLE)
1164 av_log(avctx, AV_LOG_ERROR,
1165 "LOOPFILTER shell not be enabled in simple profile\n");
1168 v->res_x8 = get_bits(gb, 1); //reserved
1171 av_log(avctx, AV_LOG_ERROR,
1172 "1 for reserved RES_X8 is forbidden\n");
1175 v->multires = get_bits(gb, 1);
1176 v->res_fasttx = get_bits(gb, 1);
1179 av_log(avctx, AV_LOG_ERROR,
1180 "0 for reserved RES_FASTTX is forbidden\n");
1184 v->fastuvmc = get_bits(gb, 1); //common
1185 if (!v->profile && !v->fastuvmc)
1187 av_log(avctx, AV_LOG_ERROR,
1188 "FASTUVMC unavailable in Simple Profile\n");
1191 v->extended_mv = get_bits(gb, 1); //common
1192 if (!v->profile && v->extended_mv)
1194 av_log(avctx, AV_LOG_ERROR,
1195 "Extended MVs unavailable in Simple Profile\n");
1198 v->dquant = get_bits(gb, 2); //common
1199 v->vstransform = get_bits(gb, 1); //common
1201 v->res_transtab = get_bits(gb, 1);
1202 if (v->res_transtab)
1204 av_log(avctx, AV_LOG_ERROR,
1205 "1 for reserved RES_TRANSTAB is forbidden\n");
1209 v->overlap = get_bits(gb, 1); //common
1211 v->s.resync_marker = get_bits(gb, 1);
1212 v->rangered = get_bits(gb, 1);
1213 if (v->rangered && v->profile == PROFILE_SIMPLE)
1215 av_log(avctx, AV_LOG_INFO,
1216 "RANGERED should be set to 0 in simple profile\n");
1219 v->s.max_b_frames = avctx->max_b_frames = get_bits(gb, 3); //common
1220 v->quantizer_mode = get_bits(gb, 2); //common
1222 v->finterpflag = get_bits(gb, 1); //common
1223 v->res_rtm_flag = get_bits(gb, 1); //reserved
1224 if (!v->res_rtm_flag)
1226 // av_log(avctx, AV_LOG_ERROR,
1227 // "0 for reserved RES_RTM_FLAG is forbidden\n");
1228 av_log(avctx, AV_LOG_ERROR,
1229 "Old WMV3 version detected, only I-frames will be decoded\n");
1232 av_log(avctx, AV_LOG_DEBUG,
1233 "Profile %i:\nfrmrtq_postproc=%i, bitrtq_postproc=%i\n"
1234 "LoopFilter=%i, MultiRes=%i, FastUVMC=%i, Extended MV=%i\n"
1235 "Rangered=%i, VSTransform=%i, Overlap=%i, SyncMarker=%i\n"
1236 "DQuant=%i, Quantizer mode=%i, Max B frames=%i\n",
1237 v->profile, v->frmrtq_postproc, v->bitrtq_postproc,
1238 v->s.loop_filter, v->multires, v->fastuvmc, v->extended_mv,
1239 v->rangered, v->vstransform, v->overlap, v->s.resync_marker,
1240 v->dquant, v->quantizer_mode, avctx->max_b_frames
1245 static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb)
1247 v->res_rtm_flag = 1;
1248 v->level = get_bits(gb, 3);
1251 av_log(v->s.avctx, AV_LOG_ERROR, "Reserved LEVEL %i\n",v->level);
1253 v->chromaformat = get_bits(gb, 2);
1254 if (v->chromaformat != 1)
1256 av_log(v->s.avctx, AV_LOG_ERROR,
1257 "Only 4:2:0 chroma format supported\n");
1262 v->frmrtq_postproc = get_bits(gb, 3); //common
1263 // (bitrate-32kbps)/64kbps
1264 v->bitrtq_postproc = get_bits(gb, 5); //common
1265 v->postprocflag = get_bits(gb, 1); //common
1267 v->s.avctx->coded_width = (get_bits(gb, 12) + 1) << 1;
1268 v->s.avctx->coded_height = (get_bits(gb, 12) + 1) << 1;
1269 v->broadcast = get_bits1(gb);
1270 v->interlace = get_bits1(gb);
1272 av_log(v->s.avctx, AV_LOG_ERROR, "Interlaced mode not supported (yet)\n");
1275 v->tfcntrflag = get_bits1(gb);
1276 v->finterpflag = get_bits1(gb);
1277 get_bits1(gb); // reserved
1278 v->psf = get_bits1(gb);
1279 if(v->psf) { //PsF, 6.1.13
1280 av_log(v->s.avctx, AV_LOG_ERROR, "Progressive Segmented Frame mode: not supported (yet)\n");
1283 if(get_bits1(gb)) { //Display Info - decoding is not affected by it
1285 av_log(v->s.avctx, AV_LOG_INFO, "Display extended info:\n");
1286 w = get_bits(gb, 14);
1287 h = get_bits(gb, 14);
1288 av_log(v->s.avctx, AV_LOG_INFO, "Display dimensions: %ix%i\n", w, h);
1289 //TODO: store aspect ratio in AVCodecContext
1291 ar = get_bits(gb, 4);
1293 w = get_bits(gb, 8);
1294 h = get_bits(gb, 8);
1297 if(get_bits1(gb)){ //framerate stuff
1307 v->color_prim = get_bits(gb, 8);
1308 v->transfer_char = get_bits(gb, 8);
1309 v->matrix_coef = get_bits(gb, 8);
1313 v->hrd_param_flag = get_bits1(gb);
1314 if(v->hrd_param_flag) {
1316 v->hrd_num_leaky_buckets = get_bits(gb, 5);
1317 get_bits(gb, 4); //bitrate exponent
1318 get_bits(gb, 4); //buffer size exponent
1319 for(i = 0; i < v->hrd_num_leaky_buckets; i++) {
1320 get_bits(gb, 16); //hrd_rate[n]
1321 get_bits(gb, 16); //hrd_buffer[n]
1327 static int decode_entry_point(AVCodecContext *avctx, GetBitContext *gb)
1329 VC1Context *v = avctx->priv_data;
1332 av_log(avctx, AV_LOG_DEBUG, "Entry point: %08X\n", show_bits_long(gb, 32));
1333 get_bits1(gb); // broken link
1334 avctx->max_b_frames = 1 - get_bits1(gb); // 'closed entry' also signalize possible B-frames
1335 v->panscanflag = get_bits1(gb);
1336 get_bits1(gb); // refdist flag
1337 v->s.loop_filter = get_bits1(gb);
1338 v->fastuvmc = get_bits1(gb);
1339 v->extended_mv = get_bits1(gb);
1340 v->dquant = get_bits(gb, 2);
1341 v->vstransform = get_bits1(gb);
1342 v->overlap = get_bits1(gb);
1343 v->quantizer_mode = get_bits(gb, 2);
1345 if(v->hrd_param_flag){
1346 for(i = 0; i < v->hrd_num_leaky_buckets; i++) {
1347 get_bits(gb, 8); //hrd_full[n]
1352 avctx->coded_width = (get_bits(gb, 12)+1)<<1;
1353 avctx->coded_height = (get_bits(gb, 12)+1)<<1;
1356 v->extended_dmv = get_bits1(gb);
1358 av_log(avctx, AV_LOG_ERROR, "Luma scaling is not supported, expect wrong picture\n");
1359 skip_bits(gb, 3); // Y range, ignored for now
1362 av_log(avctx, AV_LOG_ERROR, "Chroma scaling is not supported, expect wrong picture\n");
1363 skip_bits(gb, 3); // UV range, ignored for now
1369 static int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
1371 int pqindex, lowquant, status;
1373 if(v->finterpflag) v->interpfrm = get_bits(gb, 1);
1374 skip_bits(gb, 2); //framecnt unused
1376 if (v->rangered) v->rangeredfrm = get_bits(gb, 1);
1377 v->s.pict_type = get_bits(gb, 1);
1378 if (v->s.avctx->max_b_frames) {
1379 if (!v->s.pict_type) {
1380 if (get_bits(gb, 1)) v->s.pict_type = I_TYPE;
1381 else v->s.pict_type = B_TYPE;
1382 } else v->s.pict_type = P_TYPE;
1383 } else v->s.pict_type = v->s.pict_type ? P_TYPE : I_TYPE;
1386 if(v->s.pict_type == B_TYPE) {
1387 v->bfraction = get_vlc2(gb, vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
1388 v->bfraction = vc1_bfraction_lut[v->bfraction];
1389 if(v->bfraction == 0) {
1390 v->s.pict_type = BI_TYPE;
1393 if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1394 get_bits(gb, 7); // skip buffer fullness
1397 if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1399 if(v->s.pict_type == P_TYPE)
1402 /* Quantizer stuff */
1403 pqindex = get_bits(gb, 5);
1404 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1405 v->pq = pquant_table[0][pqindex];
1407 v->pq = pquant_table[1][pqindex];
1410 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1411 v->pquantizer = pqindex < 9;
1412 if (v->quantizer_mode == QUANT_NON_UNIFORM)
1414 v->pqindex = pqindex;
1415 if (pqindex < 9) v->halfpq = get_bits(gb, 1);
1417 if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
1418 v->pquantizer = get_bits(gb, 1);
1420 if (v->extended_mv == 1) v->mvrange = get_prefix(gb, 0, 3);
1421 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1422 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1423 v->range_x = 1 << (v->k_x - 1);
1424 v->range_y = 1 << (v->k_y - 1);
1425 if (v->profile == PROFILE_ADVANCED)
1427 if (v->postprocflag) v->postproc = get_bits(gb, 1);
1430 if (v->multires && v->s.pict_type != B_TYPE) v->respic = get_bits(gb, 2);
1432 //av_log(v->s.avctx, AV_LOG_INFO, "%c Frame: QP=[%i]%i (+%i/2) %i\n",
1433 // (v->s.pict_type == P_TYPE) ? 'P' : ((v->s.pict_type == I_TYPE) ? 'I' : 'B'), pqindex, v->pq, v->halfpq, v->rangeredfrm);
1435 if(v->s.pict_type == I_TYPE || v->s.pict_type == P_TYPE) v->use_ic = 0;
1437 switch(v->s.pict_type) {
1439 if (v->pq < 5) v->tt_index = 0;
1440 else if(v->pq < 13) v->tt_index = 1;
1441 else v->tt_index = 2;
1443 lowquant = (v->pq > 12) ? 0 : 1;
1444 v->mv_mode = mv_pmode_table[lowquant][get_prefix(gb, 1, 4)];
1445 if (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1447 int scale, shift, i;
1448 v->mv_mode2 = mv_pmode_table2[lowquant][get_prefix(gb, 1, 3)];
1449 v->lumscale = get_bits(gb, 6);
1450 v->lumshift = get_bits(gb, 6);
1452 /* fill lookup tables for intensity compensation */
1455 shift = (255 - v->lumshift * 2) << 6;
1456 if(v->lumshift > 31)
1459 scale = v->lumscale + 32;
1460 if(v->lumshift > 31)
1461 shift = (v->lumshift - 64) << 6;
1463 shift = v->lumshift << 6;
1465 for(i = 0; i < 256; i++) {
1466 v->luty[i] = clip_uint8((scale * i + shift + 32) >> 6);
1467 v->lutuv[i] = clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
1470 if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
1471 v->s.quarter_sample = 0;
1472 else if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1473 if(v->mv_mode2 == MV_PMODE_1MV_HPEL || v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)
1474 v->s.quarter_sample = 0;
1476 v->s.quarter_sample = 1;
1478 v->s.quarter_sample = 1;
1479 v->s.mspel = !(v->mv_mode == MV_PMODE_1MV_HPEL_BILIN || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN));
1481 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP &&
1482 v->mv_mode2 == MV_PMODE_MIXED_MV)
1483 || v->mv_mode == MV_PMODE_MIXED_MV)
1485 status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v);
1486 if (status < 0) return -1;
1487 av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
1488 "Imode: %i, Invert: %i\n", status>>1, status&1);
1490 v->mv_type_is_raw = 0;
1491 memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
1493 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1494 if (status < 0) return -1;
1495 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1496 "Imode: %i, Invert: %i\n", status>>1, status&1);
1498 /* Hopefully this is correct for P frames */
1499 v->s.mv_table_index = get_bits(gb, 2); //but using vc1_ tables
1500 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1504 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1505 vop_dquant_decoding(v);
1508 v->ttfrm = 0; //FIXME Is that so ?
1511 v->ttmbf = get_bits(gb, 1);
1514 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1522 if (v->pq < 5) v->tt_index = 0;
1523 else if(v->pq < 13) v->tt_index = 1;
1524 else v->tt_index = 2;
1526 lowquant = (v->pq > 12) ? 0 : 1;
1527 v->mv_mode = get_bits1(gb) ? MV_PMODE_1MV : MV_PMODE_1MV_HPEL_BILIN;
1528 v->s.quarter_sample = (v->mv_mode == MV_PMODE_1MV);
1529 v->s.mspel = v->s.quarter_sample;
1531 status = bitplane_decoding(v->direct_mb_plane, &v->dmb_is_raw, v);
1532 if (status < 0) return -1;
1533 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
1534 "Imode: %i, Invert: %i\n", status>>1, status&1);
1535 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1536 if (status < 0) return -1;
1537 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1538 "Imode: %i, Invert: %i\n", status>>1, status&1);
1540 v->s.mv_table_index = get_bits(gb, 2);
1541 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1545 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1546 vop_dquant_decoding(v);
1552 v->ttmbf = get_bits(gb, 1);
1555 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1565 v->c_ac_table_index = decode012(gb);
1566 if (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1568 v->y_ac_table_index = decode012(gb);
1571 v->s.dc_table_index = get_bits(gb, 1);
1573 if(v->s.pict_type == BI_TYPE) {
1574 v->s.pict_type = B_TYPE;
1580 static int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
1583 int pqindex, lowquant;
1586 v->p_frame_skipped = 0;
1589 fcm = decode012(gb);
1590 switch(get_prefix(gb, 0, 4)) {
1592 v->s.pict_type = P_TYPE;
1595 v->s.pict_type = B_TYPE;
1598 v->s.pict_type = I_TYPE;
1601 v->s.pict_type = BI_TYPE;
1604 v->s.pict_type = P_TYPE; // skipped pic
1605 v->p_frame_skipped = 1;
1611 if(!v->interlace || v->panscanflag) {
1618 if(v->panscanflag) {
1621 v->rnd = get_bits1(gb);
1623 v->uvsamp = get_bits1(gb);
1624 if(v->finterpflag) v->interpfrm = get_bits(gb, 1);
1625 if(v->s.pict_type == B_TYPE) {
1626 v->bfraction = get_vlc2(gb, vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
1627 v->bfraction = vc1_bfraction_lut[v->bfraction];
1628 if(v->bfraction == 0) {
1629 v->s.pict_type = BI_TYPE; /* XXX: should not happen here */
1632 pqindex = get_bits(gb, 5);
1633 v->pqindex = pqindex;
1634 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1635 v->pq = pquant_table[0][pqindex];
1637 v->pq = pquant_table[1][pqindex];
1640 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1641 v->pquantizer = pqindex < 9;
1642 if (v->quantizer_mode == QUANT_NON_UNIFORM)
1644 v->pqindex = pqindex;
1645 if (pqindex < 9) v->halfpq = get_bits(gb, 1);
1647 if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
1648 v->pquantizer = get_bits(gb, 1);
1650 switch(v->s.pict_type) {
1653 status = bitplane_decoding(v->acpred_plane, &v->acpred_is_raw, v);
1654 if (status < 0) return -1;
1655 av_log(v->s.avctx, AV_LOG_DEBUG, "ACPRED plane encoding: "
1656 "Imode: %i, Invert: %i\n", status>>1, status&1);
1657 v->condover = CONDOVER_NONE;
1658 if(v->overlap && v->pq <= 8) {
1659 v->condover = decode012(gb);
1660 if(v->condover == CONDOVER_SELECT) {
1661 status = bitplane_decoding(v->over_flags_plane, &v->overflg_is_raw, v);
1662 if (status < 0) return -1;
1663 av_log(v->s.avctx, AV_LOG_DEBUG, "CONDOVER plane encoding: "
1664 "Imode: %i, Invert: %i\n", status>>1, status&1);
1670 v->postproc = get_bits1(gb);
1671 if (v->extended_mv) v->mvrange = get_prefix(gb, 0, 3);
1672 else v->mvrange = 0;
1673 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1674 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1675 v->range_x = 1 << (v->k_x - 1);
1676 v->range_y = 1 << (v->k_y - 1);
1678 if (v->pq < 5) v->tt_index = 0;
1679 else if(v->pq < 13) v->tt_index = 1;
1680 else v->tt_index = 2;
1682 lowquant = (v->pq > 12) ? 0 : 1;
1683 v->mv_mode = mv_pmode_table[lowquant][get_prefix(gb, 1, 4)];
1684 if (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1686 int scale, shift, i;
1687 v->mv_mode2 = mv_pmode_table2[lowquant][get_prefix(gb, 1, 3)];
1688 v->lumscale = get_bits(gb, 6);
1689 v->lumshift = get_bits(gb, 6);
1690 /* fill lookup tables for intensity compensation */
1693 shift = (255 - v->lumshift * 2) << 6;
1694 if(v->lumshift > 31)
1697 scale = v->lumscale + 32;
1698 if(v->lumshift > 31)
1699 shift = (v->lumshift - 64) << 6;
1701 shift = v->lumshift << 6;
1703 for(i = 0; i < 256; i++) {
1704 v->luty[i] = clip_uint8((scale * i + shift + 32) >> 6);
1705 v->lutuv[i] = clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
1708 if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
1709 v->s.quarter_sample = 0;
1710 else if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1711 if(v->mv_mode2 == MV_PMODE_1MV_HPEL || v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)
1712 v->s.quarter_sample = 0;
1714 v->s.quarter_sample = 1;
1716 v->s.quarter_sample = 1;
1717 v->s.mspel = !(v->mv_mode == MV_PMODE_1MV_HPEL_BILIN || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN));
1719 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP &&
1720 v->mv_mode2 == MV_PMODE_MIXED_MV)
1721 || v->mv_mode == MV_PMODE_MIXED_MV)
1723 status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v);
1724 if (status < 0) return -1;
1725 av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
1726 "Imode: %i, Invert: %i\n", status>>1, status&1);
1728 v->mv_type_is_raw = 0;
1729 memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
1731 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1732 if (status < 0) return -1;
1733 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1734 "Imode: %i, Invert: %i\n", status>>1, status&1);
1736 /* Hopefully this is correct for P frames */
1737 v->s.mv_table_index = get_bits(gb, 2); //but using vc1_ tables
1738 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1741 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1742 vop_dquant_decoding(v);
1745 v->ttfrm = 0; //FIXME Is that so ?
1748 v->ttmbf = get_bits(gb, 1);
1751 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1760 v->postproc = get_bits1(gb);
1761 if (v->extended_mv) v->mvrange = get_prefix(gb, 0, 3);
1762 else v->mvrange = 0;
1763 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1764 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1765 v->range_x = 1 << (v->k_x - 1);
1766 v->range_y = 1 << (v->k_y - 1);
1768 if (v->pq < 5) v->tt_index = 0;
1769 else if(v->pq < 13) v->tt_index = 1;
1770 else v->tt_index = 2;
1772 lowquant = (v->pq > 12) ? 0 : 1;
1773 v->mv_mode = get_bits1(gb) ? MV_PMODE_1MV : MV_PMODE_1MV_HPEL_BILIN;
1774 v->s.quarter_sample = (v->mv_mode == MV_PMODE_1MV);
1775 v->s.mspel = v->s.quarter_sample;
1777 status = bitplane_decoding(v->direct_mb_plane, &v->dmb_is_raw, v);
1778 if (status < 0) return -1;
1779 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
1780 "Imode: %i, Invert: %i\n", status>>1, status&1);
1781 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1782 if (status < 0) return -1;
1783 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1784 "Imode: %i, Invert: %i\n", status>>1, status&1);
1786 v->s.mv_table_index = get_bits(gb, 2);
1787 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1791 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1792 vop_dquant_decoding(v);
1798 v->ttmbf = get_bits(gb, 1);
1801 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1811 v->c_ac_table_index = decode012(gb);
1812 if (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1814 v->y_ac_table_index = decode012(gb);
1817 v->s.dc_table_index = get_bits(gb, 1);
1818 if (v->s.pict_type == I_TYPE && v->dquant) {
1819 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1820 vop_dquant_decoding(v);
1824 if(v->s.pict_type == BI_TYPE) {
1825 v->s.pict_type = B_TYPE;
1831 /***********************************************************************/
1833 * @defgroup block VC-1 Block-level functions
1834 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1840 * @brief Get macroblock-level quantizer scale
1842 #define GET_MQUANT() \
1846 if (v->dqprofile == DQPROFILE_ALL_MBS) \
1850 mquant = (get_bits(gb, 1)) ? v->altpq : v->pq; \
1854 mqdiff = get_bits(gb, 3); \
1855 if (mqdiff != 7) mquant = v->pq + mqdiff; \
1856 else mquant = get_bits(gb, 5); \
1859 if(v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1860 edges = 1 << v->dqsbedge; \
1861 else if(v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1862 edges = (3 << v->dqsbedge) % 15; \
1863 else if(v->dqprofile == DQPROFILE_FOUR_EDGES) \
1865 if((edges&1) && !s->mb_x) \
1866 mquant = v->altpq; \
1867 if((edges&2) && s->first_slice_line) \
1868 mquant = v->altpq; \
1869 if((edges&4) && s->mb_x == (s->mb_width - 1)) \
1870 mquant = v->altpq; \
1871 if((edges&8) && s->mb_y == (s->mb_height - 1)) \
1872 mquant = v->altpq; \
1876 * @def GET_MVDATA(_dmv_x, _dmv_y)
1877 * @brief Get MV differentials
1878 * @see MVDATA decoding from 8.3.5.2, p(1)20
1879 * @param _dmv_x Horizontal differential for decoded MV
1880 * @param _dmv_y Vertical differential for decoded MV
1882 #define GET_MVDATA(_dmv_x, _dmv_y) \
1883 index = 1 + get_vlc2(gb, vc1_mv_diff_vlc[s->mv_table_index].table,\
1884 VC1_MV_DIFF_VLC_BITS, 2); \
1887 mb_has_coeffs = 1; \
1890 else mb_has_coeffs = 0; \
1892 if (!index) { _dmv_x = _dmv_y = 0; } \
1893 else if (index == 35) \
1895 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1896 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1898 else if (index == 36) \
1907 if (!s->quarter_sample && index1 == 5) val = 1; \
1909 if(size_table[index1] - val > 0) \
1910 val = get_bits(gb, size_table[index1] - val); \
1912 sign = 0 - (val&1); \
1913 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1916 if (!s->quarter_sample && index1 == 5) val = 1; \
1918 if(size_table[index1] - val > 0) \
1919 val = get_bits(gb, size_table[index1] - val); \
1921 sign = 0 - (val&1); \
1922 _dmv_y = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1925 /** Predict and set motion vector
1927 static inline void vc1_pred_mv(MpegEncContext *s, int n, int dmv_x, int dmv_y, int mv1, int r_x, int r_y, uint8_t* is_intra)
1929 int xy, wrap, off = 0;
1934 /* scale MV difference to be quad-pel */
1935 dmv_x <<= 1 - s->quarter_sample;
1936 dmv_y <<= 1 - s->quarter_sample;
1938 wrap = s->b8_stride;
1939 xy = s->block_index[n];
1942 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
1943 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
1944 if(mv1) { /* duplicate motion data for 1-MV block */
1945 s->current_picture.motion_val[0][xy + 1][0] = 0;
1946 s->current_picture.motion_val[0][xy + 1][1] = 0;
1947 s->current_picture.motion_val[0][xy + wrap][0] = 0;
1948 s->current_picture.motion_val[0][xy + wrap][1] = 0;
1949 s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
1950 s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
1955 C = s->current_picture.motion_val[0][xy - 1];
1956 A = s->current_picture.motion_val[0][xy - wrap];
1958 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1960 //in 4-MV mode different blocks have different B predictor position
1963 off = (s->mb_x > 0) ? -1 : 1;
1966 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1975 B = s->current_picture.motion_val[0][xy - wrap + off];
1977 if(!s->first_slice_line || (n==2 || n==3)) { // predictor A is not out of bounds
1978 if(s->mb_width == 1) {
1982 px = mid_pred(A[0], B[0], C[0]);
1983 py = mid_pred(A[1], B[1], C[1]);
1985 } else if(s->mb_x || (n==1 || n==3)) { // predictor C is not out of bounds
1991 /* Pullback MV as specified in 8.3.5.3.4 */
1994 qx = (s->mb_x << 6) + ((n==1 || n==3) ? 32 : 0);
1995 qy = (s->mb_y << 6) + ((n==2 || n==3) ? 32 : 0);
1996 X = (s->mb_width << 6) - 4;
1997 Y = (s->mb_height << 6) - 4;
1999 if(qx + px < -60) px = -60 - qx;
2000 if(qy + py < -60) py = -60 - qy;
2002 if(qx + px < -28) px = -28 - qx;
2003 if(qy + py < -28) py = -28 - qy;
2005 if(qx + px > X) px = X - qx;
2006 if(qy + py > Y) py = Y - qy;
2008 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2009 if((!s->first_slice_line || (n==2 || n==3)) && (s->mb_x || (n==1 || n==3))) {
2010 if(is_intra[xy - wrap])
2011 sum = FFABS(px) + FFABS(py);
2013 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2015 if(get_bits1(&s->gb)) {
2023 if(is_intra[xy - 1])
2024 sum = FFABS(px) + FFABS(py);
2026 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2028 if(get_bits1(&s->gb)) {
2038 /* store MV using signed modulus of MV range defined in 4.11 */
2039 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
2040 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
2041 if(mv1) { /* duplicate motion data for 1-MV block */
2042 s->current_picture.motion_val[0][xy + 1][0] = s->current_picture.motion_val[0][xy][0];
2043 s->current_picture.motion_val[0][xy + 1][1] = s->current_picture.motion_val[0][xy][1];
2044 s->current_picture.motion_val[0][xy + wrap][0] = s->current_picture.motion_val[0][xy][0];
2045 s->current_picture.motion_val[0][xy + wrap][1] = s->current_picture.motion_val[0][xy][1];
2046 s->current_picture.motion_val[0][xy + wrap + 1][0] = s->current_picture.motion_val[0][xy][0];
2047 s->current_picture.motion_val[0][xy + wrap + 1][1] = s->current_picture.motion_val[0][xy][1];
2051 /** Motion compensation for direct or interpolated blocks in B-frames
2053 static void vc1_interp_mc(VC1Context *v)
2055 MpegEncContext *s = &v->s;
2056 DSPContext *dsp = &v->s.dsp;
2057 uint8_t *srcY, *srcU, *srcV;
2058 int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
2060 if(!v->s.next_picture.data[0])return;
2062 mx = s->mv[1][0][0];
2063 my = s->mv[1][0][1];
2064 uvmx = (mx + ((mx & 3) == 3)) >> 1;
2065 uvmy = (my + ((my & 3) == 3)) >> 1;
2067 uvmx = uvmx + ((uvmx<0)?-(uvmx&1):(uvmx&1));
2068 uvmy = uvmy + ((uvmy<0)?-(uvmy&1):(uvmy&1));
2070 srcY = s->next_picture.data[0];
2071 srcU = s->next_picture.data[1];
2072 srcV = s->next_picture.data[2];
2074 src_x = s->mb_x * 16 + (mx >> 2);
2075 src_y = s->mb_y * 16 + (my >> 2);
2076 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
2077 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
2079 src_x = clip( src_x, -16, s->mb_width * 16);
2080 src_y = clip( src_y, -16, s->mb_height * 16);
2081 uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8);
2082 uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
2084 srcY += src_y * s->linesize + src_x;
2085 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
2086 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
2088 /* for grayscale we should not try to read from unknown area */
2089 if(s->flags & CODEC_FLAG_GRAY) {
2090 srcU = s->edge_emu_buffer + 18 * s->linesize;
2091 srcV = s->edge_emu_buffer + 18 * s->linesize;
2095 || (unsigned)src_x > s->h_edge_pos - (mx&3) - 16
2096 || (unsigned)src_y > s->v_edge_pos - (my&3) - 16){
2097 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
2099 srcY -= s->mspel * (1 + s->linesize);
2100 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
2101 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
2102 srcY = s->edge_emu_buffer;
2103 ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
2104 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
2105 ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
2106 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
2109 /* if we deal with range reduction we need to scale source blocks */
2110 if(v->rangeredfrm) {
2112 uint8_t *src, *src2;
2115 for(j = 0; j < 17 + s->mspel*2; j++) {
2116 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
2119 src = srcU; src2 = srcV;
2120 for(j = 0; j < 9; j++) {
2121 for(i = 0; i < 9; i++) {
2122 src[i] = ((src[i] - 128) >> 1) + 128;
2123 src2[i] = ((src2[i] - 128) >> 1) + 128;
2125 src += s->uvlinesize;
2126 src2 += s->uvlinesize;
2129 srcY += s->mspel * (1 + s->linesize);
2134 dxy = ((my & 1) << 1) | (mx & 1);
2136 dsp->avg_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
2138 if(s->flags & CODEC_FLAG_GRAY) return;
2139 /* Chroma MC always uses qpel blilinear */
2140 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
2143 dsp->avg_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
2144 dsp->avg_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
2147 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2151 #if B_FRACTION_DEN==256
2155 return 2 * ((value * n + 255) >> 9);
2156 return (value * n + 128) >> 8;
2159 n -= B_FRACTION_DEN;
2161 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2162 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2166 /** Reconstruct motion vector for B-frame and do motion compensation
2168 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mode)
2171 v->mv_mode2 = v->mv_mode;
2172 v->mv_mode = MV_PMODE_INTENSITY_COMP;
2177 if(v->use_ic) v->mv_mode = v->mv_mode2;
2180 if(mode == BMV_TYPE_INTERPOLATED) {
2183 if(v->use_ic) v->mv_mode = v->mv_mode2;
2187 if(v->use_ic && (mode == BMV_TYPE_BACKWARD)) v->mv_mode = v->mv_mode2;
2188 vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2189 if(v->use_ic) v->mv_mode = v->mv_mode2;
2192 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mvtype)
2194 MpegEncContext *s = &v->s;
2195 int xy, wrap, off = 0;
2200 const uint8_t *is_intra = v->mb_type[0];
2204 /* scale MV difference to be quad-pel */
2205 dmv_x[0] <<= 1 - s->quarter_sample;
2206 dmv_y[0] <<= 1 - s->quarter_sample;
2207 dmv_x[1] <<= 1 - s->quarter_sample;
2208 dmv_y[1] <<= 1 - s->quarter_sample;
2210 wrap = s->b8_stride;
2211 xy = s->block_index[0];
2214 s->current_picture.motion_val[0][xy][0] =
2215 s->current_picture.motion_val[0][xy][1] =
2216 s->current_picture.motion_val[1][xy][0] =
2217 s->current_picture.motion_val[1][xy][1] = 0;
2220 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2221 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2222 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2223 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2225 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2226 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2227 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2228 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2232 if((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2233 C = s->current_picture.motion_val[0][xy - 2];
2234 A = s->current_picture.motion_val[0][xy - wrap*2];
2235 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2236 B = s->current_picture.motion_val[0][xy - wrap*2 + off];
2238 if(!s->first_slice_line) { // predictor A is not out of bounds
2239 if(s->mb_width == 1) {
2243 px = mid_pred(A[0], B[0], C[0]);
2244 py = mid_pred(A[1], B[1], C[1]);
2246 } else if(s->mb_x) { // predictor C is not out of bounds
2252 /* Pullback MV as specified in 8.3.5.3.4 */
2255 if(v->profile < PROFILE_ADVANCED) {
2256 qx = (s->mb_x << 5);
2257 qy = (s->mb_y << 5);
2258 X = (s->mb_width << 5) - 4;
2259 Y = (s->mb_height << 5) - 4;
2260 if(qx + px < -28) px = -28 - qx;
2261 if(qy + py < -28) py = -28 - qy;
2262 if(qx + px > X) px = X - qx;
2263 if(qy + py > Y) py = Y - qy;
2265 qx = (s->mb_x << 6);
2266 qy = (s->mb_y << 6);
2267 X = (s->mb_width << 6) - 4;
2268 Y = (s->mb_height << 6) - 4;
2269 if(qx + px < -60) px = -60 - qx;
2270 if(qy + py < -60) py = -60 - qy;
2271 if(qx + px > X) px = X - qx;
2272 if(qy + py > Y) py = Y - qy;
2275 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2276 if(0 && !s->first_slice_line && s->mb_x) {
2277 if(is_intra[xy - wrap])
2278 sum = FFABS(px) + FFABS(py);
2280 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2282 if(get_bits1(&s->gb)) {
2290 if(is_intra[xy - 2])
2291 sum = FFABS(px) + FFABS(py);
2293 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2295 if(get_bits1(&s->gb)) {
2305 /* store MV using signed modulus of MV range defined in 4.11 */
2306 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2307 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2309 if((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2310 C = s->current_picture.motion_val[1][xy - 2];
2311 A = s->current_picture.motion_val[1][xy - wrap*2];
2312 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2313 B = s->current_picture.motion_val[1][xy - wrap*2 + off];
2315 if(!s->first_slice_line) { // predictor A is not out of bounds
2316 if(s->mb_width == 1) {
2320 px = mid_pred(A[0], B[0], C[0]);
2321 py = mid_pred(A[1], B[1], C[1]);
2323 } else if(s->mb_x) { // predictor C is not out of bounds
2329 /* Pullback MV as specified in 8.3.5.3.4 */
2332 if(v->profile < PROFILE_ADVANCED) {
2333 qx = (s->mb_x << 5);
2334 qy = (s->mb_y << 5);
2335 X = (s->mb_width << 5) - 4;
2336 Y = (s->mb_height << 5) - 4;
2337 if(qx + px < -28) px = -28 - qx;
2338 if(qy + py < -28) py = -28 - qy;
2339 if(qx + px > X) px = X - qx;
2340 if(qy + py > Y) py = Y - qy;
2342 qx = (s->mb_x << 6);
2343 qy = (s->mb_y << 6);
2344 X = (s->mb_width << 6) - 4;
2345 Y = (s->mb_height << 6) - 4;
2346 if(qx + px < -60) px = -60 - qx;
2347 if(qy + py < -60) py = -60 - qy;
2348 if(qx + px > X) px = X - qx;
2349 if(qy + py > Y) py = Y - qy;
2352 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2353 if(0 && !s->first_slice_line && s->mb_x) {
2354 if(is_intra[xy - wrap])
2355 sum = FFABS(px) + FFABS(py);
2357 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2359 if(get_bits1(&s->gb)) {
2367 if(is_intra[xy - 2])
2368 sum = FFABS(px) + FFABS(py);
2370 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2372 if(get_bits1(&s->gb)) {
2382 /* store MV using signed modulus of MV range defined in 4.11 */
2384 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2385 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2387 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2388 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2389 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2390 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2393 /** Get predicted DC value for I-frames only
2394 * prediction dir: left=0, top=1
2395 * @param s MpegEncContext
2396 * @param[in] n block index in the current MB
2397 * @param dc_val_ptr Pointer to DC predictor
2398 * @param dir_ptr Prediction direction for use in AC prediction
2400 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2401 int16_t **dc_val_ptr, int *dir_ptr)
2403 int a, b, c, wrap, pred, scale;
2405 static const uint16_t dcpred[32] = {
2406 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2407 114, 102, 93, 85, 79, 73, 68, 64,
2408 60, 57, 54, 51, 49, 47, 45, 43,
2409 41, 39, 38, 37, 35, 34, 33
2412 /* find prediction - wmv3_dc_scale always used here in fact */
2413 if (n < 4) scale = s->y_dc_scale;
2414 else scale = s->c_dc_scale;
2416 wrap = s->block_wrap[n];
2417 dc_val= s->dc_val[0] + s->block_index[n];
2423 b = dc_val[ - 1 - wrap];
2424 a = dc_val[ - wrap];
2426 if (pq < 9 || !overlap)
2428 /* Set outer values */
2429 if (s->first_slice_line && (n!=2 && n!=3)) b=a=dcpred[scale];
2430 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=dcpred[scale];
2434 /* Set outer values */
2435 if (s->first_slice_line && (n!=2 && n!=3)) b=a=0;
2436 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=0;
2439 if (abs(a - b) <= abs(b - c)) {
2447 /* update predictor */
2448 *dc_val_ptr = &dc_val[0];
2453 /** Get predicted DC value
2454 * prediction dir: left=0, top=1
2455 * @param s MpegEncContext
2456 * @param[in] n block index in the current MB
2457 * @param dc_val_ptr Pointer to DC predictor
2458 * @param dir_ptr Prediction direction for use in AC prediction
2460 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2461 int a_avail, int c_avail,
2462 int16_t **dc_val_ptr, int *dir_ptr)
2464 int a, b, c, wrap, pred, scale;
2466 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2469 /* find prediction - wmv3_dc_scale always used here in fact */
2470 if (n < 4) scale = s->y_dc_scale;
2471 else scale = s->c_dc_scale;
2473 wrap = s->block_wrap[n];
2474 dc_val= s->dc_val[0] + s->block_index[n];
2480 b = dc_val[ - 1 - wrap];
2481 a = dc_val[ - wrap];
2482 /* scale predictors if needed */
2483 q1 = s->current_picture.qscale_table[mb_pos];
2484 if(c_avail && (n!= 1 && n!=3)) {
2485 q2 = s->current_picture.qscale_table[mb_pos - 1];
2487 c = (c * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2489 if(a_avail && (n!= 2 && n!=3)) {
2490 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2492 a = (a * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2494 if(a_avail && c_avail && (n!=3)) {
2497 if(n != 2) off -= s->mb_stride;
2498 q2 = s->current_picture.qscale_table[off];
2500 b = (b * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2503 if(a_avail && c_avail) {
2504 if(abs(a - b) <= abs(b - c)) {
2511 } else if(a_avail) {
2514 } else if(c_avail) {
2522 /* update predictor */
2523 *dc_val_ptr = &dc_val[0];
2529 * @defgroup std_mb VC1 Macroblock-level functions in Simple/Main Profiles
2530 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2534 static inline int vc1_coded_block_pred(MpegEncContext * s, int n, uint8_t **coded_block_ptr)
2536 int xy, wrap, pred, a, b, c;
2538 xy = s->block_index[n];
2539 wrap = s->b8_stride;
2544 a = s->coded_block[xy - 1 ];
2545 b = s->coded_block[xy - 1 - wrap];
2546 c = s->coded_block[xy - wrap];
2555 *coded_block_ptr = &s->coded_block[xy];
2561 * Decode one AC coefficient
2562 * @param v The VC1 context
2563 * @param last Last coefficient
2564 * @param skip How much zero coefficients to skip
2565 * @param value Decoded AC coefficient value
2568 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip, int *value, int codingset)
2570 GetBitContext *gb = &v->s.gb;
2571 int index, escape, run = 0, level = 0, lst = 0;
2573 index = get_vlc2(gb, vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2574 if (index != vc1_ac_sizes[codingset] - 1) {
2575 run = vc1_index_decode_table[codingset][index][0];
2576 level = vc1_index_decode_table[codingset][index][1];
2577 lst = index >= vc1_last_decode_table[codingset];
2581 escape = decode210(gb);
2583 index = get_vlc2(gb, vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2584 run = vc1_index_decode_table[codingset][index][0];
2585 level = vc1_index_decode_table[codingset][index][1];
2586 lst = index >= vc1_last_decode_table[codingset];
2589 level += vc1_last_delta_level_table[codingset][run];
2591 level += vc1_delta_level_table[codingset][run];
2594 run += vc1_last_delta_run_table[codingset][level] + 1;
2596 run += vc1_delta_run_table[codingset][level] + 1;
2602 lst = get_bits(gb, 1);
2603 if(v->s.esc3_level_length == 0) {
2604 if(v->pq < 8 || v->dquantfrm) { // table 59
2605 v->s.esc3_level_length = get_bits(gb, 3);
2606 if(!v->s.esc3_level_length)
2607 v->s.esc3_level_length = get_bits(gb, 2) + 8;
2609 v->s.esc3_level_length = get_prefix(gb, 1, 6) + 2;
2611 v->s.esc3_run_length = 3 + get_bits(gb, 2);
2613 run = get_bits(gb, v->s.esc3_run_length);
2614 sign = get_bits(gb, 1);
2615 level = get_bits(gb, v->s.esc3_level_length);
2626 /** Decode intra block in intra frames - should be faster than decode_intra_block
2627 * @param v VC1Context
2628 * @param block block to decode
2629 * @param coded are AC coeffs present or not
2630 * @param codingset set of VLC to decode data
2632 static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset)
2634 GetBitContext *gb = &v->s.gb;
2635 MpegEncContext *s = &v->s;
2636 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2639 int16_t *ac_val, *ac_val2;
2642 /* Get DC differential */
2644 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2646 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2649 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2654 if (dcdiff == 119 /* ESC index value */)
2656 /* TODO: Optimize */
2657 if (v->pq == 1) dcdiff = get_bits(gb, 10);
2658 else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2659 else dcdiff = get_bits(gb, 8);
2664 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2665 else if (v->pq == 2)
2666 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
2668 if (get_bits(gb, 1))
2673 dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2676 /* Store the quantized DC coeff, used for prediction */
2678 block[0] = dcdiff * s->y_dc_scale;
2680 block[0] = dcdiff * s->c_dc_scale;
2693 int last = 0, skip, value;
2694 const int8_t *zz_table;
2698 scale = v->pq * 2 + v->halfpq;
2702 zz_table = vc1_horizontal_zz;
2704 zz_table = vc1_vertical_zz;
2706 zz_table = vc1_normal_zz;
2708 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2710 if(dc_pred_dir) //left
2713 ac_val -= 16 * s->block_wrap[n];
2716 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2720 block[zz_table[i++]] = value;
2723 /* apply AC prediction if needed */
2725 if(dc_pred_dir) { //left
2726 for(k = 1; k < 8; k++)
2727 block[k << 3] += ac_val[k];
2729 for(k = 1; k < 8; k++)
2730 block[k] += ac_val[k + 8];
2733 /* save AC coeffs for further prediction */
2734 for(k = 1; k < 8; k++) {
2735 ac_val2[k] = block[k << 3];
2736 ac_val2[k + 8] = block[k];
2739 /* scale AC coeffs */
2740 for(k = 1; k < 64; k++)
2744 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2747 if(s->ac_pred) i = 63;
2753 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2756 scale = v->pq * 2 + v->halfpq;
2757 memset(ac_val2, 0, 16 * 2);
2758 if(dc_pred_dir) {//left
2761 memcpy(ac_val2, ac_val, 8 * 2);
2763 ac_val -= 16 * s->block_wrap[n];
2765 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2768 /* apply AC prediction if needed */
2770 if(dc_pred_dir) { //left
2771 for(k = 1; k < 8; k++) {
2772 block[k << 3] = ac_val[k] * scale;
2773 if(!v->pquantizer && block[k << 3])
2774 block[k << 3] += (block[k << 3] < 0) ? -v->pq : v->pq;
2777 for(k = 1; k < 8; k++) {
2778 block[k] = ac_val[k + 8] * scale;
2779 if(!v->pquantizer && block[k])
2780 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2786 s->block_last_index[n] = i;
2791 /** Decode intra block in intra frames - should be faster than decode_intra_block
2792 * @param v VC1Context
2793 * @param block block to decode
2794 * @param coded are AC coeffs present or not
2795 * @param codingset set of VLC to decode data
2797 static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset, int mquant)
2799 GetBitContext *gb = &v->s.gb;
2800 MpegEncContext *s = &v->s;
2801 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2804 int16_t *ac_val, *ac_val2;
2806 int a_avail = v->a_avail, c_avail = v->c_avail;
2807 int use_pred = s->ac_pred;
2810 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2812 /* Get DC differential */
2814 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2816 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2819 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2824 if (dcdiff == 119 /* ESC index value */)
2826 /* TODO: Optimize */
2827 if (mquant == 1) dcdiff = get_bits(gb, 10);
2828 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2829 else dcdiff = get_bits(gb, 8);
2834 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2835 else if (mquant == 2)
2836 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
2838 if (get_bits(gb, 1))
2843 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2846 /* Store the quantized DC coeff, used for prediction */
2848 block[0] = dcdiff * s->y_dc_scale;
2850 block[0] = dcdiff * s->c_dc_scale;
2859 /* check if AC is needed at all and adjust direction if needed */
2860 if(!a_avail) dc_pred_dir = 1;
2861 if(!c_avail) dc_pred_dir = 0;
2862 if(!a_avail && !c_avail) use_pred = 0;
2863 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2866 scale = mquant * 2 + v->halfpq;
2868 if(dc_pred_dir) //left
2871 ac_val -= 16 * s->block_wrap[n];
2873 q1 = s->current_picture.qscale_table[mb_pos];
2874 if(dc_pred_dir && c_avail) q2 = s->current_picture.qscale_table[mb_pos - 1];
2875 if(!dc_pred_dir && a_avail) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2876 if(n && n<4) q2 = q1;
2879 int last = 0, skip, value;
2880 const int8_t *zz_table;
2885 zz_table = vc1_horizontal_zz;
2887 zz_table = vc1_vertical_zz;
2889 zz_table = vc1_normal_zz;
2892 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2896 block[zz_table[i++]] = value;
2899 /* apply AC prediction if needed */
2901 /* scale predictors if needed*/
2903 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2904 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2906 if(dc_pred_dir) { //left
2907 for(k = 1; k < 8; k++)
2908 block[k << 3] += (ac_val[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2910 for(k = 1; k < 8; k++)
2911 block[k] += (ac_val[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2914 if(dc_pred_dir) { //left
2915 for(k = 1; k < 8; k++)
2916 block[k << 3] += ac_val[k];
2918 for(k = 1; k < 8; k++)
2919 block[k] += ac_val[k + 8];
2923 /* save AC coeffs for further prediction */
2924 for(k = 1; k < 8; k++) {
2925 ac_val2[k] = block[k << 3];
2926 ac_val2[k + 8] = block[k];
2929 /* scale AC coeffs */
2930 for(k = 1; k < 64; k++)
2934 block[k] += (block[k] < 0) ? -mquant : mquant;
2937 if(use_pred) i = 63;
2938 } else { // no AC coeffs
2941 memset(ac_val2, 0, 16 * 2);
2942 if(dc_pred_dir) {//left
2944 memcpy(ac_val2, ac_val, 8 * 2);
2946 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2947 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2948 for(k = 1; k < 8; k++)
2949 ac_val2[k] = (ac_val2[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2954 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2956 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2957 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2958 for(k = 1; k < 8; k++)
2959 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2964 /* apply AC prediction if needed */
2966 if(dc_pred_dir) { //left
2967 for(k = 1; k < 8; k++) {
2968 block[k << 3] = ac_val2[k] * scale;
2969 if(!v->pquantizer && block[k << 3])
2970 block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant;
2973 for(k = 1; k < 8; k++) {
2974 block[k] = ac_val2[k + 8] * scale;
2975 if(!v->pquantizer && block[k])
2976 block[k] += (block[k] < 0) ? -mquant : mquant;
2982 s->block_last_index[n] = i;
2987 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
2988 * @param v VC1Context
2989 * @param block block to decode
2990 * @param coded are AC coeffs present or not
2991 * @param mquant block quantizer
2992 * @param codingset set of VLC to decode data
2994 static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int coded, int mquant, int codingset)
2996 GetBitContext *gb = &v->s.gb;
2997 MpegEncContext *s = &v->s;
2998 int dc_pred_dir = 0; /* Direction of the DC prediction used */
3001 int16_t *ac_val, *ac_val2;
3003 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3004 int a_avail = v->a_avail, c_avail = v->c_avail;
3005 int use_pred = s->ac_pred;
3009 /* XXX: Guard against dumb values of mquant */
3010 mquant = (mquant < 1) ? 0 : ( (mquant>31) ? 31 : mquant );
3012 /* Set DC scale - y and c use the same */
3013 s->y_dc_scale = s->y_dc_scale_table[mquant];
3014 s->c_dc_scale = s->c_dc_scale_table[mquant];
3016 /* Get DC differential */
3018 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3020 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3023 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
3028 if (dcdiff == 119 /* ESC index value */)
3030 /* TODO: Optimize */
3031 if (mquant == 1) dcdiff = get_bits(gb, 10);
3032 else if (mquant == 2) dcdiff = get_bits(gb, 9);
3033 else dcdiff = get_bits(gb, 8);
3038 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
3039 else if (mquant == 2)
3040 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
3042 if (get_bits(gb, 1))
3047 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
3050 /* Store the quantized DC coeff, used for prediction */
3053 block[0] = dcdiff * s->y_dc_scale;
3055 block[0] = dcdiff * s->c_dc_scale;
3064 /* check if AC is needed at all and adjust direction if needed */
3065 if(!a_avail) dc_pred_dir = 1;
3066 if(!c_avail) dc_pred_dir = 0;
3067 if(!a_avail && !c_avail) use_pred = 0;
3068 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3071 scale = mquant * 2 + v->halfpq;
3073 if(dc_pred_dir) //left
3076 ac_val -= 16 * s->block_wrap[n];
3078 q1 = s->current_picture.qscale_table[mb_pos];
3079 if(dc_pred_dir && c_avail && mb_pos) q2 = s->current_picture.qscale_table[mb_pos - 1];
3080 if(!dc_pred_dir && a_avail && mb_pos >= s->mb_stride) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
3081 if(n && n<4) q2 = q1;
3084 int last = 0, skip, value;
3085 const int8_t *zz_table;
3088 zz_table = vc1_simple_progressive_8x8_zz;
3091 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3095 block[zz_table[i++]] = value;
3098 /* apply AC prediction if needed */
3100 /* scale predictors if needed*/
3102 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3103 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3105 if(dc_pred_dir) { //left
3106 for(k = 1; k < 8; k++)
3107 block[k << 3] += (ac_val[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3109 for(k = 1; k < 8; k++)
3110 block[k] += (ac_val[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3113 if(dc_pred_dir) { //left
3114 for(k = 1; k < 8; k++)
3115 block[k << 3] += ac_val[k];
3117 for(k = 1; k < 8; k++)
3118 block[k] += ac_val[k + 8];
3122 /* save AC coeffs for further prediction */
3123 for(k = 1; k < 8; k++) {
3124 ac_val2[k] = block[k << 3];
3125 ac_val2[k + 8] = block[k];
3128 /* scale AC coeffs */
3129 for(k = 1; k < 64; k++)
3133 block[k] += (block[k] < 0) ? -mquant : mquant;
3136 if(use_pred) i = 63;
3137 } else { // no AC coeffs
3140 memset(ac_val2, 0, 16 * 2);
3141 if(dc_pred_dir) {//left
3143 memcpy(ac_val2, ac_val, 8 * 2);
3145 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3146 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3147 for(k = 1; k < 8; k++)
3148 ac_val2[k] = (ac_val2[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3153 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3155 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3156 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3157 for(k = 1; k < 8; k++)
3158 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3163 /* apply AC prediction if needed */
3165 if(dc_pred_dir) { //left
3166 for(k = 1; k < 8; k++) {
3167 block[k << 3] = ac_val2[k] * scale;
3168 if(!v->pquantizer && block[k << 3])
3169 block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant;
3172 for(k = 1; k < 8; k++) {
3173 block[k] = ac_val2[k + 8] * scale;
3174 if(!v->pquantizer && block[k])
3175 block[k] += (block[k] < 0) ? -mquant : mquant;
3181 s->block_last_index[n] = i;
3188 static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n, int mquant, int ttmb, int first_block)
3190 MpegEncContext *s = &v->s;
3191 GetBitContext *gb = &s->gb;
3194 int scale, off, idx, last, skip, value;
3195 int ttblk = ttmb & 7;
3198 ttblk = ttblk_to_tt[v->tt_index][get_vlc2(gb, vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
3200 if(ttblk == TT_4X4) {
3201 subblkpat = ~(get_vlc2(gb, vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3203 if((ttblk != TT_8X8 && ttblk != TT_4X4) && (v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))) {
3204 subblkpat = decode012(gb);
3205 if(subblkpat) subblkpat ^= 3; //swap decoded pattern bits
3206 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) ttblk = TT_8X4;
3207 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) ttblk = TT_4X8;
3209 scale = 2 * mquant + v->halfpq;
3211 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3212 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3213 subblkpat = 2 - (ttblk == TT_8X4_TOP);
3216 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3217 subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3225 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3229 idx = vc1_simple_progressive_8x8_zz[i++];
3230 block[idx] = value * scale;
3232 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3234 s->dsp.vc1_inv_trans_8x8(block);
3237 for(j = 0; j < 4; j++) {
3238 last = subblkpat & (1 << (3 - j));
3240 off = (j & 1) * 4 + (j & 2) * 16;
3242 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3246 idx = vc1_simple_progressive_4x4_zz[i++];
3247 block[idx + off] = value * scale;
3249 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3251 if(!(subblkpat & (1 << (3 - j))))
3252 s->dsp.vc1_inv_trans_4x4(block, j);
3256 for(j = 0; j < 2; j++) {
3257 last = subblkpat & (1 << (1 - j));
3261 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3265 if(v->profile < PROFILE_ADVANCED)
3266 idx = vc1_simple_progressive_8x4_zz[i++];
3268 idx = vc1_adv_progressive_8x4_zz[i++];
3269 block[idx + off] = value * scale;
3271 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3273 if(!(subblkpat & (1 << (1 - j))))
3274 s->dsp.vc1_inv_trans_8x4(block, j);
3278 for(j = 0; j < 2; j++) {
3279 last = subblkpat & (1 << (1 - j));
3283 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3287 if(v->profile < PROFILE_ADVANCED)
3288 idx = vc1_simple_progressive_4x8_zz[i++];
3290 idx = vc1_adv_progressive_4x8_zz[i++];
3291 block[idx + off] = value * scale;
3293 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3295 if(!(subblkpat & (1 << (1 - j))))
3296 s->dsp.vc1_inv_trans_4x8(block, j);
3304 /** Decode one P-frame MB (in Simple/Main profile)
3306 static int vc1_decode_p_mb(VC1Context *v)
3308 MpegEncContext *s = &v->s;
3309 GetBitContext *gb = &s->gb;
3311 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3312 int cbp; /* cbp decoding stuff */
3313 int mqdiff, mquant; /* MB quantization */
3314 int ttmb = v->ttfrm; /* MB Transform type */
3317 static const int size_table[6] = { 0, 2, 3, 4, 5, 8 },
3318 offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3319 int mb_has_coeffs = 1; /* last_flag */
3320 int dmv_x, dmv_y; /* Differential MV components */
3321 int index, index1; /* LUT indices */
3322 int val, sign; /* temp values */
3323 int first_block = 1;
3325 int skipped, fourmv;
3327 mquant = v->pq; /* Loosy initialization */
3329 if (v->mv_type_is_raw)
3330 fourmv = get_bits1(gb);
3332 fourmv = v->mv_type_mb_plane[mb_pos];
3334 skipped = get_bits1(gb);
3336 skipped = v->s.mbskip_table[mb_pos];
3338 s->dsp.clear_blocks(s->block[0]);
3340 if (!fourmv) /* 1MV mode */
3344 GET_MVDATA(dmv_x, dmv_y);
3347 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3348 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3350 s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
3351 vc1_pred_mv(s, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
3353 /* FIXME Set DC val for inter block ? */
3354 if (s->mb_intra && !mb_has_coeffs)
3357 s->ac_pred = get_bits(gb, 1);
3360 else if (mb_has_coeffs)
3362 if (s->mb_intra) s->ac_pred = get_bits(gb, 1);
3363 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3371 s->current_picture.qscale_table[mb_pos] = mquant;
3373 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3374 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table,
3375 VC1_TTMB_VLC_BITS, 2);
3376 if(!s->mb_intra) vc1_mc_1mv(v, 0);
3380 s->dc_val[0][s->block_index[i]] = 0;
3382 val = ((cbp >> (5 - i)) & 1);
3383 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3384 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3386 /* check if prediction blocks A and C are available */
3387 v->a_avail = v->c_avail = 0;
3388 if(i == 2 || i == 3 || !s->first_slice_line)
3389 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3390 if(i == 1 || i == 3 || s->mb_x)
3391 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3393 vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
3394 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3395 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3396 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3397 for(j = 0; j < 64; j++) s->block[i][j] += 128;
3398 s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3399 if(v->pq >= 9 && v->overlap) {
3401 s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3403 s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3406 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
3407 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3409 if((i<4) || !(s->flags & CODEC_FLAG_GRAY))
3410 s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3417 for(i = 0; i < 6; i++) {
3418 v->mb_type[0][s->block_index[i]] = 0;
3419 s->dc_val[0][s->block_index[i]] = 0;
3421 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3422 s->current_picture.qscale_table[mb_pos] = 0;
3423 vc1_pred_mv(s, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
3430 if (!skipped /* unskipped MB */)
3432 int intra_count = 0, coded_inter = 0;
3433 int is_intra[6], is_coded[6];
3435 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3438 val = ((cbp >> (5 - i)) & 1);
3439 s->dc_val[0][s->block_index[i]] = 0;
3446 GET_MVDATA(dmv_x, dmv_y);
3448 vc1_pred_mv(s, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]);
3449 if(!s->mb_intra) vc1_mc_4mv_luma(v, i);
3450 intra_count += s->mb_intra;
3451 is_intra[i] = s->mb_intra;
3452 is_coded[i] = mb_has_coeffs;
3455 is_intra[i] = (intra_count >= 3);
3458 if(i == 4) vc1_mc_4mv_chroma(v);
3459 v->mb_type[0][s->block_index[i]] = is_intra[i];
3460 if(!coded_inter) coded_inter = !is_intra[i] & is_coded[i];
3462 // if there are no coded blocks then don't do anything more
3463 if(!intra_count && !coded_inter) return 0;
3466 s->current_picture.qscale_table[mb_pos] = mquant;
3467 /* test if block is intra and has pred */
3472 if(((!s->first_slice_line || (i==2 || i==3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3473 || ((s->mb_x || (i==1 || i==3)) && v->mb_type[0][s->block_index[i] - 1])) {
3478 if(intrapred)s->ac_pred = get_bits(gb, 1);
3479 else s->ac_pred = 0;
3481 if (!v->ttmbf && coded_inter)
3482 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3486 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3487 s->mb_intra = is_intra[i];
3489 /* check if prediction blocks A and C are available */
3490 v->a_avail = v->c_avail = 0;
3491 if(i == 2 || i == 3 || !s->first_slice_line)
3492 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3493 if(i == 1 || i == 3 || s->mb_x)
3494 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3496 vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant, (i&4)?v->codingset2:v->codingset);
3497 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3498 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3499 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3500 for(j = 0; j < 64; j++) s->block[i][j] += 128;
3501 s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3502 if(v->pq >= 9 && v->overlap) {
3504 s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3506 s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3508 } else if(is_coded[i]) {
3509 status = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
3510 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3512 if((i<4) || !(s->flags & CODEC_FLAG_GRAY))
3513 s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3521 s->current_picture.qscale_table[mb_pos] = 0;
3522 for (i=0; i<6; i++) {
3523 v->mb_type[0][s->block_index[i]] = 0;
3524 s->dc_val[0][s->block_index[i]] = 0;