2 * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
3 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 * H.264 / AVC / MPEG4 part10 codec.
26 * @author Michael Niedermayer <michaelni@gmx.at>
32 #include "mpegvideo.h"
41 #define interlaced_dct interlaced_dct_is_a_bad_name
42 #define mb_intra mb_intra_isnt_initalized_see_mb_type
44 #define LUMA_DC_BLOCK_INDEX 25
45 #define CHROMA_DC_BLOCK_INDEX 26
47 #define CHROMA_DC_COEFF_TOKEN_VLC_BITS 8
48 #define COEFF_TOKEN_VLC_BITS 8
49 #define TOTAL_ZEROS_VLC_BITS 9
50 #define CHROMA_DC_TOTAL_ZEROS_VLC_BITS 3
51 #define RUN_VLC_BITS 3
52 #define RUN7_VLC_BITS 6
54 #define MAX_SPS_COUNT 32
55 #define MAX_PPS_COUNT 256
57 #define MAX_MMCO_COUNT 66
59 /* Compiling in interlaced support reduces the speed
60 * of progressive decoding by about 2%. */
61 #define ALLOW_INTERLACE
63 #ifdef ALLOW_INTERLACE
64 #define MB_MBAFF h->mb_mbaff
65 #define MB_FIELD h->mb_field_decoding_flag
66 #define FRAME_MBAFF h->mb_aff_frame
72 #define IS_INTERLACED(mb_type) 0
76 * Sequence parameter set
82 int transform_bypass; ///< qpprime_y_zero_transform_bypass_flag
83 int log2_max_frame_num; ///< log2_max_frame_num_minus4 + 4
84 int poc_type; ///< pic_order_cnt_type
85 int log2_max_poc_lsb; ///< log2_max_pic_order_cnt_lsb_minus4
86 int delta_pic_order_always_zero_flag;
87 int offset_for_non_ref_pic;
88 int offset_for_top_to_bottom_field;
89 int poc_cycle_length; ///< num_ref_frames_in_pic_order_cnt_cycle
90 int ref_frame_count; ///< num_ref_frames
91 int gaps_in_frame_num_allowed_flag;
92 int mb_width; ///< frame_width_in_mbs_minus1 + 1
93 int mb_height; ///< frame_height_in_mbs_minus1 + 1
94 int frame_mbs_only_flag;
95 int mb_aff; ///<mb_adaptive_frame_field_flag
96 int direct_8x8_inference_flag;
97 int crop; ///< frame_cropping_flag
98 int crop_left; ///< frame_cropping_rect_left_offset
99 int crop_right; ///< frame_cropping_rect_right_offset
100 int crop_top; ///< frame_cropping_rect_top_offset
101 int crop_bottom; ///< frame_cropping_rect_bottom_offset
102 int vui_parameters_present_flag;
104 int timing_info_present_flag;
105 uint32_t num_units_in_tick;
107 int fixed_frame_rate_flag;
108 short offset_for_ref_frame[256]; //FIXME dyn aloc?
109 int bitstream_restriction_flag;
110 int num_reorder_frames;
111 int scaling_matrix_present;
112 uint8_t scaling_matrix4[6][16];
113 uint8_t scaling_matrix8[2][64];
117 * Picture parameter set
121 int cabac; ///< entropy_coding_mode_flag
122 int pic_order_present; ///< pic_order_present_flag
123 int slice_group_count; ///< num_slice_groups_minus1 + 1
124 int mb_slice_group_map_type;
125 unsigned int ref_count[2]; ///< num_ref_idx_l0/1_active_minus1 + 1
126 int weighted_pred; ///< weighted_pred_flag
127 int weighted_bipred_idc;
128 int init_qp; ///< pic_init_qp_minus26 + 26
129 int init_qs; ///< pic_init_qs_minus26 + 26
130 int chroma_qp_index_offset;
131 int deblocking_filter_parameters_present; ///< deblocking_filter_parameters_present_flag
132 int constrained_intra_pred; ///< constrained_intra_pred_flag
133 int redundant_pic_cnt_present; ///< redundant_pic_cnt_present_flag
134 int transform_8x8_mode; ///< transform_8x8_mode_flag
135 uint8_t scaling_matrix4[6][16];
136 uint8_t scaling_matrix8[2][64];
140 * Memory management control operation opcode.
142 typedef enum MMCOOpcode{
153 * Memory management control operation.
164 typedef struct H264Context{
168 uint8_t *rbsp_buffer;
169 unsigned int rbsp_buffer_size;
172 * Used to parse AVC variant of h264
174 int is_avc; ///< this flag is != 0 if codec is avc1
175 int got_avcC; ///< flag used to parse avcC data only once
176 int nal_length_size; ///< Number of bytes used for nal length (1, 2 or 4)
184 int chroma_pred_mode;
185 int intra16x16_pred_mode;
190 int8_t intra4x4_pred_mode_cache[5*8];
191 int8_t (*intra4x4_pred_mode)[8];
192 void (*pred4x4 [9+3])(uint8_t *src, uint8_t *topright, int stride);//FIXME move to dsp?
193 void (*pred8x8l [9+3])(uint8_t *src, int topleft, int topright, int stride);
194 void (*pred8x8 [4+3])(uint8_t *src, int stride);
195 void (*pred16x16[4+3])(uint8_t *src, int stride);
196 unsigned int topleft_samples_available;
197 unsigned int top_samples_available;
198 unsigned int topright_samples_available;
199 unsigned int left_samples_available;
200 uint8_t (*top_borders[2])[16+2*8];
201 uint8_t left_border[2*(17+2*9)];
204 * non zero coeff count cache.
205 * is 64 if not available.
207 DECLARE_ALIGNED_8(uint8_t, non_zero_count_cache[6*8]);
208 uint8_t (*non_zero_count)[16];
211 * Motion vector cache.
213 DECLARE_ALIGNED_8(int16_t, mv_cache[2][5*8][2]);
214 DECLARE_ALIGNED_8(int8_t, ref_cache[2][5*8]);
215 #define LIST_NOT_USED -1 //FIXME rename?
216 #define PART_NOT_AVAILABLE -2
219 * is 1 if the specific list MV&references are set to 0,0,-2.
221 int mv_cache_clean[2];
224 * number of neighbors (top and/or left) that used 8x8 dct
226 int neighbor_transform_size;
229 * block_offset[ 0..23] for frame macroblocks
230 * block_offset[24..47] for field macroblocks
232 int block_offset[2*(16+8)];
234 uint32_t *mb2b_xy; //FIXME are these 4 a good idea?
236 int b_stride; //FIXME use s->b4_stride
239 int mb_linesize; ///< may be equal to s->linesize or s->linesize*2, for mbaff
248 int unknown_svq3_flag;
249 int next_slice_index;
251 SPS sps_buffer[MAX_SPS_COUNT];
252 SPS sps; ///< current sps
254 PPS pps_buffer[MAX_PPS_COUNT];
258 PPS pps; //FIXME move to Picture perhaps? (->no) do we need that?
260 uint32_t dequant4_buffer[6][52][16];
261 uint32_t dequant8_buffer[2][52][64];
262 uint32_t (*dequant4_coeff[6])[16];
263 uint32_t (*dequant8_coeff[2])[64];
264 int dequant_coeff_pps; ///< reinit tables when pps changes
267 uint8_t *slice_table_base;
268 uint8_t *slice_table; ///< slice_table_base + 2*mb_stride + 1
270 int slice_type_fixed;
272 //interlacing specific flags
274 int mb_field_decoding_flag;
275 int mb_mbaff; ///< mb_aff_frame && mb_field_decoding_flag
277 unsigned int sub_mb_type[4];
282 int delta_poc_bottom;
285 int prev_poc_msb; ///< poc_msb of the last reference pic for POC type 0
286 int prev_poc_lsb; ///< poc_lsb of the last reference pic for POC type 0
287 int frame_num_offset; ///< for POC type 2
288 int prev_frame_num_offset; ///< for POC type 2
289 int prev_frame_num; ///< frame_num of the last pic for POC type 1/2
292 * frame_num for frames or 2*frame_num for field pics.
297 * max_frame_num or 2*max_frame_num for field pics.
301 //Weighted pred stuff
303 int use_weight_chroma;
304 int luma_log2_weight_denom;
305 int chroma_log2_weight_denom;
306 int luma_weight[2][48];
307 int luma_offset[2][48];
308 int chroma_weight[2][48][2];
309 int chroma_offset[2][48][2];
310 int implicit_weight[48][48];
313 int deblocking_filter; ///< disable_deblocking_filter_idc with 1<->0
314 int slice_alpha_c0_offset;
315 int slice_beta_offset;
317 int redundant_pic_count;
319 int direct_spatial_mv_pred;
320 int dist_scale_factor[16];
321 int dist_scale_factor_field[32];
322 int map_col_to_list0[2][16];
323 int map_col_to_list0_field[2][32];
326 * num_ref_idx_l0/1_active_minus1 + 1
328 unsigned int ref_count[2]; ///< counts frames or fields, depending on current mb mode
329 unsigned int list_count;
330 Picture *short_ref[32];
331 Picture *long_ref[32];
332 Picture default_ref_list[2][32];
333 Picture ref_list[2][48]; ///< 0..15: frame refs, 16..47: mbaff field refs
334 Picture *delayed_pic[18]; //FIXME size?
335 Picture *delayed_output_pic;
338 * memory management control operations buffer.
340 MMCO mmco[MAX_MMCO_COUNT];
343 int long_ref_count; ///< number of actual long term references
344 int short_ref_count; ///< number of actual short term references
347 GetBitContext intra_gb;
348 GetBitContext inter_gb;
349 GetBitContext *intra_gb_ptr;
350 GetBitContext *inter_gb_ptr;
352 DECLARE_ALIGNED_8(DCTELEM, mb[16*24]);
353 DCTELEM mb_padding[256]; ///< as mb is addressed by scantable[i] and scantable is uint8_t we can either check that i is not to large or ensure that there is some unused stuff after mb
359 uint8_t cabac_state[460];
362 /* 0x100 -> non null luma_dc, 0x80/0x40 -> non null chroma_dc (cb/cr), 0x?0 -> chroma_cbp(0,1,2), 0x0? luma_cbp */
367 /* chroma_pred_mode for i4x4 or i16x16, else 0 */
368 uint8_t *chroma_pred_mode_table;
369 int last_qscale_diff;
370 int16_t (*mvd_table[2])[2];
371 DECLARE_ALIGNED_8(int16_t, mvd_cache[2][5*8][2]);
372 uint8_t *direct_table;
373 uint8_t direct_cache[5*8];
375 uint8_t zigzag_scan[16];
376 uint8_t zigzag_scan8x8[64];
377 uint8_t zigzag_scan8x8_cavlc[64];
378 uint8_t field_scan[16];
379 uint8_t field_scan8x8[64];
380 uint8_t field_scan8x8_cavlc[64];
381 const uint8_t *zigzag_scan_q0;
382 const uint8_t *zigzag_scan8x8_q0;
383 const uint8_t *zigzag_scan8x8_cavlc_q0;
384 const uint8_t *field_scan_q0;
385 const uint8_t *field_scan8x8_q0;
386 const uint8_t *field_scan8x8_cavlc_q0;
391 static VLC coeff_token_vlc[4];
392 static VLC chroma_dc_coeff_token_vlc;
394 static VLC total_zeros_vlc[15];
395 static VLC chroma_dc_total_zeros_vlc[3];
397 static VLC run_vlc[6];
400 static void svq3_luma_dc_dequant_idct_c(DCTELEM *block, int qp);
401 static void svq3_add_idct_c(uint8_t *dst, DCTELEM *block, int stride, int qp, int dc);
402 static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize);
403 static void filter_mb_fast( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize);
405 static av_always_inline uint32_t pack16to32(int a, int b){
406 #ifdef WORDS_BIGENDIAN
407 return (b&0xFFFF) + (a<<16);
409 return (a&0xFFFF) + (b<<16);
413 const uint8_t ff_rem6[52]={
414 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
417 const uint8_t ff_div6[52]={
418 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8,
424 * @param h height of the rectangle, should be a constant
425 * @param w width of the rectangle, should be a constant
426 * @param size the size of val (1 or 4), should be a constant
428 static av_always_inline void fill_rectangle(void *vp, int w, int h, int stride, uint32_t val, int size){
429 uint8_t *p= (uint8_t*)vp;
430 assert(size==1 || size==4);
436 assert((((long)vp)&(FFMIN(w, STRIDE_ALIGN)-1)) == 0);
437 assert((stride&(w-1))==0);
439 const uint16_t v= size==4 ? val : val*0x0101;
440 *(uint16_t*)(p + 0*stride)= v;
442 *(uint16_t*)(p + 1*stride)= v;
444 *(uint16_t*)(p + 2*stride)=
445 *(uint16_t*)(p + 3*stride)= v;
447 const uint32_t v= size==4 ? val : val*0x01010101;
448 *(uint32_t*)(p + 0*stride)= v;
450 *(uint32_t*)(p + 1*stride)= v;
452 *(uint32_t*)(p + 2*stride)=
453 *(uint32_t*)(p + 3*stride)= v;
455 //gcc can't optimize 64bit math on x86_32
456 #if defined(ARCH_X86_64) || (defined(MP_WORDSIZE) && MP_WORDSIZE >= 64)
457 const uint64_t v= val*0x0100000001ULL;
458 *(uint64_t*)(p + 0*stride)= v;
460 *(uint64_t*)(p + 1*stride)= v;
462 *(uint64_t*)(p + 2*stride)=
463 *(uint64_t*)(p + 3*stride)= v;
465 const uint64_t v= val*0x0100000001ULL;
466 *(uint64_t*)(p + 0+0*stride)=
467 *(uint64_t*)(p + 8+0*stride)=
468 *(uint64_t*)(p + 0+1*stride)=
469 *(uint64_t*)(p + 8+1*stride)= v;
471 *(uint64_t*)(p + 0+2*stride)=
472 *(uint64_t*)(p + 8+2*stride)=
473 *(uint64_t*)(p + 0+3*stride)=
474 *(uint64_t*)(p + 8+3*stride)= v;
476 *(uint32_t*)(p + 0+0*stride)=
477 *(uint32_t*)(p + 4+0*stride)= val;
479 *(uint32_t*)(p + 0+1*stride)=
480 *(uint32_t*)(p + 4+1*stride)= val;
482 *(uint32_t*)(p + 0+2*stride)=
483 *(uint32_t*)(p + 4+2*stride)=
484 *(uint32_t*)(p + 0+3*stride)=
485 *(uint32_t*)(p + 4+3*stride)= val;
487 *(uint32_t*)(p + 0+0*stride)=
488 *(uint32_t*)(p + 4+0*stride)=
489 *(uint32_t*)(p + 8+0*stride)=
490 *(uint32_t*)(p +12+0*stride)=
491 *(uint32_t*)(p + 0+1*stride)=
492 *(uint32_t*)(p + 4+1*stride)=
493 *(uint32_t*)(p + 8+1*stride)=
494 *(uint32_t*)(p +12+1*stride)= val;
496 *(uint32_t*)(p + 0+2*stride)=
497 *(uint32_t*)(p + 4+2*stride)=
498 *(uint32_t*)(p + 8+2*stride)=
499 *(uint32_t*)(p +12+2*stride)=
500 *(uint32_t*)(p + 0+3*stride)=
501 *(uint32_t*)(p + 4+3*stride)=
502 *(uint32_t*)(p + 8+3*stride)=
503 *(uint32_t*)(p +12+3*stride)= val;
510 static void fill_caches(H264Context *h, int mb_type, int for_deblock){
511 MpegEncContext * const s = &h->s;
512 const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
513 int topleft_xy, top_xy, topright_xy, left_xy[2];
514 int topleft_type, top_type, topright_type, left_type[2];
518 //FIXME deblocking could skip the intra and nnz parts.
519 if(for_deblock && (h->slice_num == 1 || h->slice_table[mb_xy] == h->slice_table[mb_xy-s->mb_stride]) && !FRAME_MBAFF)
522 //wow what a mess, why didn't they simplify the interlacing&intra stuff, i can't imagine that these complex rules are worth it
524 top_xy = mb_xy - s->mb_stride;
525 topleft_xy = top_xy - 1;
526 topright_xy= top_xy + 1;
527 left_xy[1] = left_xy[0] = mb_xy-1;
537 const int pair_xy = s->mb_x + (s->mb_y & ~1)*s->mb_stride;
538 const int top_pair_xy = pair_xy - s->mb_stride;
539 const int topleft_pair_xy = top_pair_xy - 1;
540 const int topright_pair_xy = top_pair_xy + 1;
541 const int topleft_mb_frame_flag = !IS_INTERLACED(s->current_picture.mb_type[topleft_pair_xy]);
542 const int top_mb_frame_flag = !IS_INTERLACED(s->current_picture.mb_type[top_pair_xy]);
543 const int topright_mb_frame_flag = !IS_INTERLACED(s->current_picture.mb_type[topright_pair_xy]);
544 const int left_mb_frame_flag = !IS_INTERLACED(s->current_picture.mb_type[pair_xy-1]);
545 const int curr_mb_frame_flag = !IS_INTERLACED(mb_type);
546 const int bottom = (s->mb_y & 1);
547 tprintf("fill_caches: curr_mb_frame_flag:%d, left_mb_frame_flag:%d, topleft_mb_frame_flag:%d, top_mb_frame_flag:%d, topright_mb_frame_flag:%d\n", curr_mb_frame_flag, left_mb_frame_flag, topleft_mb_frame_flag, top_mb_frame_flag, topright_mb_frame_flag);
549 ? !curr_mb_frame_flag // bottom macroblock
550 : (!curr_mb_frame_flag && !top_mb_frame_flag) // top macroblock
552 top_xy -= s->mb_stride;
555 ? !curr_mb_frame_flag // bottom macroblock
556 : (!curr_mb_frame_flag && !topleft_mb_frame_flag) // top macroblock
558 topleft_xy -= s->mb_stride;
561 ? !curr_mb_frame_flag // bottom macroblock
562 : (!curr_mb_frame_flag && !topright_mb_frame_flag) // top macroblock
564 topright_xy -= s->mb_stride;
566 if (left_mb_frame_flag != curr_mb_frame_flag) {
567 left_xy[1] = left_xy[0] = pair_xy - 1;
568 if (curr_mb_frame_flag) {
589 left_xy[1] += s->mb_stride;
602 h->top_mb_xy = top_xy;
603 h->left_mb_xy[0] = left_xy[0];
604 h->left_mb_xy[1] = left_xy[1];
608 top_type = h->slice_table[top_xy ] < 255 ? s->current_picture.mb_type[top_xy] : 0;
609 left_type[0] = h->slice_table[left_xy[0] ] < 255 ? s->current_picture.mb_type[left_xy[0]] : 0;
610 left_type[1] = h->slice_table[left_xy[1] ] < 255 ? s->current_picture.mb_type[left_xy[1]] : 0;
612 if(FRAME_MBAFF && !IS_INTRA(mb_type)){
614 int v = *(uint16_t*)&h->non_zero_count[mb_xy][14];
616 h->non_zero_count_cache[scan8[i]] = (v>>i)&1;
617 for(list=0; list<h->list_count; list++){
618 if(USES_LIST(mb_type,list)){
619 uint32_t *src = (uint32_t*)s->current_picture.motion_val[list][h->mb2b_xy[mb_xy]];
620 uint32_t *dst = (uint32_t*)h->mv_cache[list][scan8[0]];
621 int8_t *ref = &s->current_picture.ref_index[list][h->mb2b8_xy[mb_xy]];
622 for(i=0; i<4; i++, dst+=8, src+=h->b_stride){
628 *(uint32_t*)&h->ref_cache[list][scan8[ 0]] =
629 *(uint32_t*)&h->ref_cache[list][scan8[ 2]] = pack16to32(ref[0],ref[1])*0x0101;
631 *(uint32_t*)&h->ref_cache[list][scan8[ 8]] =
632 *(uint32_t*)&h->ref_cache[list][scan8[10]] = pack16to32(ref[0],ref[1])*0x0101;
634 fill_rectangle(&h-> mv_cache[list][scan8[ 0]], 4, 4, 8, 0, 4);
635 fill_rectangle(&h->ref_cache[list][scan8[ 0]], 4, 4, 8, (uint8_t)LIST_NOT_USED, 1);
640 topleft_type = h->slice_table[topleft_xy ] == h->slice_num ? s->current_picture.mb_type[topleft_xy] : 0;
641 top_type = h->slice_table[top_xy ] == h->slice_num ? s->current_picture.mb_type[top_xy] : 0;
642 topright_type= h->slice_table[topright_xy] == h->slice_num ? s->current_picture.mb_type[topright_xy]: 0;
643 left_type[0] = h->slice_table[left_xy[0] ] == h->slice_num ? s->current_picture.mb_type[left_xy[0]] : 0;
644 left_type[1] = h->slice_table[left_xy[1] ] == h->slice_num ? s->current_picture.mb_type[left_xy[1]] : 0;
647 if(IS_INTRA(mb_type)){
648 h->topleft_samples_available=
649 h->top_samples_available=
650 h->left_samples_available= 0xFFFF;
651 h->topright_samples_available= 0xEEEA;
653 if(!IS_INTRA(top_type) && (top_type==0 || h->pps.constrained_intra_pred)){
654 h->topleft_samples_available= 0xB3FF;
655 h->top_samples_available= 0x33FF;
656 h->topright_samples_available= 0x26EA;
659 if(!IS_INTRA(left_type[i]) && (left_type[i]==0 || h->pps.constrained_intra_pred)){
660 h->topleft_samples_available&= 0xDF5F;
661 h->left_samples_available&= 0x5F5F;
665 if(!IS_INTRA(topleft_type) && (topleft_type==0 || h->pps.constrained_intra_pred))
666 h->topleft_samples_available&= 0x7FFF;
668 if(!IS_INTRA(topright_type) && (topright_type==0 || h->pps.constrained_intra_pred))
669 h->topright_samples_available&= 0xFBFF;
671 if(IS_INTRA4x4(mb_type)){
672 if(IS_INTRA4x4(top_type)){
673 h->intra4x4_pred_mode_cache[4+8*0]= h->intra4x4_pred_mode[top_xy][4];
674 h->intra4x4_pred_mode_cache[5+8*0]= h->intra4x4_pred_mode[top_xy][5];
675 h->intra4x4_pred_mode_cache[6+8*0]= h->intra4x4_pred_mode[top_xy][6];
676 h->intra4x4_pred_mode_cache[7+8*0]= h->intra4x4_pred_mode[top_xy][3];
679 if(!top_type || (IS_INTER(top_type) && h->pps.constrained_intra_pred))
684 h->intra4x4_pred_mode_cache[4+8*0]=
685 h->intra4x4_pred_mode_cache[5+8*0]=
686 h->intra4x4_pred_mode_cache[6+8*0]=
687 h->intra4x4_pred_mode_cache[7+8*0]= pred;
690 if(IS_INTRA4x4(left_type[i])){
691 h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]= h->intra4x4_pred_mode[left_xy[i]][left_block[0+2*i]];
692 h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= h->intra4x4_pred_mode[left_xy[i]][left_block[1+2*i]];
695 if(!left_type[i] || (IS_INTER(left_type[i]) && h->pps.constrained_intra_pred))
700 h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]=
701 h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= pred;
716 //FIXME constraint_intra_pred & partitioning & nnz (lets hope this is just a typo in the spec)
718 h->non_zero_count_cache[4+8*0]= h->non_zero_count[top_xy][4];
719 h->non_zero_count_cache[5+8*0]= h->non_zero_count[top_xy][5];
720 h->non_zero_count_cache[6+8*0]= h->non_zero_count[top_xy][6];
721 h->non_zero_count_cache[7+8*0]= h->non_zero_count[top_xy][3];
723 h->non_zero_count_cache[1+8*0]= h->non_zero_count[top_xy][9];
724 h->non_zero_count_cache[2+8*0]= h->non_zero_count[top_xy][8];
726 h->non_zero_count_cache[1+8*3]= h->non_zero_count[top_xy][12];
727 h->non_zero_count_cache[2+8*3]= h->non_zero_count[top_xy][11];
730 h->non_zero_count_cache[4+8*0]=
731 h->non_zero_count_cache[5+8*0]=
732 h->non_zero_count_cache[6+8*0]=
733 h->non_zero_count_cache[7+8*0]=
735 h->non_zero_count_cache[1+8*0]=
736 h->non_zero_count_cache[2+8*0]=
738 h->non_zero_count_cache[1+8*3]=
739 h->non_zero_count_cache[2+8*3]= h->pps.cabac && !IS_INTRA(mb_type) ? 0 : 64;
743 for (i=0; i<2; i++) {
745 h->non_zero_count_cache[3+8*1 + 2*8*i]= h->non_zero_count[left_xy[i]][left_block[0+2*i]];
746 h->non_zero_count_cache[3+8*2 + 2*8*i]= h->non_zero_count[left_xy[i]][left_block[1+2*i]];
747 h->non_zero_count_cache[0+8*1 + 8*i]= h->non_zero_count[left_xy[i]][left_block[4+2*i]];
748 h->non_zero_count_cache[0+8*4 + 8*i]= h->non_zero_count[left_xy[i]][left_block[5+2*i]];
750 h->non_zero_count_cache[3+8*1 + 2*8*i]=
751 h->non_zero_count_cache[3+8*2 + 2*8*i]=
752 h->non_zero_count_cache[0+8*1 + 8*i]=
753 h->non_zero_count_cache[0+8*4 + 8*i]= h->pps.cabac && !IS_INTRA(mb_type) ? 0 : 64;
760 h->top_cbp = h->cbp_table[top_xy];
761 } else if(IS_INTRA(mb_type)) {
768 h->left_cbp = h->cbp_table[left_xy[0]] & 0x1f0;
769 } else if(IS_INTRA(mb_type)) {
775 h->left_cbp |= ((h->cbp_table[left_xy[0]]>>((left_block[0]&(~1))+1))&0x1) << 1;
778 h->left_cbp |= ((h->cbp_table[left_xy[1]]>>((left_block[2]&(~1))+1))&0x1) << 3;
783 if(IS_INTER(mb_type) || IS_DIRECT(mb_type)){
785 for(list=0; list<h->list_count; list++){
786 if(!USES_LIST(mb_type, list) && !IS_DIRECT(mb_type) && !h->deblocking_filter){
787 /*if(!h->mv_cache_clean[list]){
788 memset(h->mv_cache [list], 0, 8*5*2*sizeof(int16_t)); //FIXME clean only input? clean at all?
789 memset(h->ref_cache[list], PART_NOT_AVAILABLE, 8*5*sizeof(int8_t));
790 h->mv_cache_clean[list]= 1;
794 h->mv_cache_clean[list]= 0;
796 if(USES_LIST(top_type, list)){
797 const int b_xy= h->mb2b_xy[top_xy] + 3*h->b_stride;
798 const int b8_xy= h->mb2b8_xy[top_xy] + h->b8_stride;
799 *(uint32_t*)h->mv_cache[list][scan8[0] + 0 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 0];
800 *(uint32_t*)h->mv_cache[list][scan8[0] + 1 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 1];
801 *(uint32_t*)h->mv_cache[list][scan8[0] + 2 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 2];
802 *(uint32_t*)h->mv_cache[list][scan8[0] + 3 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 3];
803 h->ref_cache[list][scan8[0] + 0 - 1*8]=
804 h->ref_cache[list][scan8[0] + 1 - 1*8]= s->current_picture.ref_index[list][b8_xy + 0];
805 h->ref_cache[list][scan8[0] + 2 - 1*8]=
806 h->ref_cache[list][scan8[0] + 3 - 1*8]= s->current_picture.ref_index[list][b8_xy + 1];
808 *(uint32_t*)h->mv_cache [list][scan8[0] + 0 - 1*8]=
809 *(uint32_t*)h->mv_cache [list][scan8[0] + 1 - 1*8]=
810 *(uint32_t*)h->mv_cache [list][scan8[0] + 2 - 1*8]=
811 *(uint32_t*)h->mv_cache [list][scan8[0] + 3 - 1*8]= 0;
812 *(uint32_t*)&h->ref_cache[list][scan8[0] + 0 - 1*8]= ((top_type ? LIST_NOT_USED : PART_NOT_AVAILABLE)&0xFF)*0x01010101;
815 //FIXME unify cleanup or sth
816 if(USES_LIST(left_type[0], list)){
817 const int b_xy= h->mb2b_xy[left_xy[0]] + 3;
818 const int b8_xy= h->mb2b8_xy[left_xy[0]] + 1;
819 *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 0*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[0]];
820 *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[1]];
821 h->ref_cache[list][scan8[0] - 1 + 0*8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[0]>>1)];
822 h->ref_cache[list][scan8[0] - 1 + 1*8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[1]>>1)];
824 *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 0*8]=
825 *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 1*8]= 0;
826 h->ref_cache[list][scan8[0] - 1 + 0*8]=
827 h->ref_cache[list][scan8[0] - 1 + 1*8]= left_type[0] ? LIST_NOT_USED : PART_NOT_AVAILABLE;
830 if(USES_LIST(left_type[1], list)){
831 const int b_xy= h->mb2b_xy[left_xy[1]] + 3;
832 const int b8_xy= h->mb2b8_xy[left_xy[1]] + 1;
833 *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 2*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[2]];
834 *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 3*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[3]];
835 h->ref_cache[list][scan8[0] - 1 + 2*8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[2]>>1)];
836 h->ref_cache[list][scan8[0] - 1 + 3*8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[3]>>1)];
838 *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 2*8]=
839 *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 3*8]= 0;
840 h->ref_cache[list][scan8[0] - 1 + 2*8]=
841 h->ref_cache[list][scan8[0] - 1 + 3*8]= left_type[0] ? LIST_NOT_USED : PART_NOT_AVAILABLE;
842 assert((!left_type[0]) == (!left_type[1]));
845 if((for_deblock || (IS_DIRECT(mb_type) && !h->direct_spatial_mv_pred)) && !FRAME_MBAFF)
848 if(USES_LIST(topleft_type, list)){
849 const int b_xy = h->mb2b_xy[topleft_xy] + 3 + 3*h->b_stride;
850 const int b8_xy= h->mb2b8_xy[topleft_xy] + 1 + h->b8_stride;
851 *(uint32_t*)h->mv_cache[list][scan8[0] - 1 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy];
852 h->ref_cache[list][scan8[0] - 1 - 1*8]= s->current_picture.ref_index[list][b8_xy];
854 *(uint32_t*)h->mv_cache[list][scan8[0] - 1 - 1*8]= 0;
855 h->ref_cache[list][scan8[0] - 1 - 1*8]= topleft_type ? LIST_NOT_USED : PART_NOT_AVAILABLE;
858 if(USES_LIST(topright_type, list)){
859 const int b_xy= h->mb2b_xy[topright_xy] + 3*h->b_stride;
860 const int b8_xy= h->mb2b8_xy[topright_xy] + h->b8_stride;
861 *(uint32_t*)h->mv_cache[list][scan8[0] + 4 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy];
862 h->ref_cache[list][scan8[0] + 4 - 1*8]= s->current_picture.ref_index[list][b8_xy];
864 *(uint32_t*)h->mv_cache [list][scan8[0] + 4 - 1*8]= 0;
865 h->ref_cache[list][scan8[0] + 4 - 1*8]= topright_type ? LIST_NOT_USED : PART_NOT_AVAILABLE;
868 if((IS_SKIP(mb_type) || IS_DIRECT(mb_type)) && !FRAME_MBAFF)
871 h->ref_cache[list][scan8[5 ]+1] =
872 h->ref_cache[list][scan8[7 ]+1] =
873 h->ref_cache[list][scan8[13]+1] = //FIXME remove past 3 (init somewhere else)
874 h->ref_cache[list][scan8[4 ]] =
875 h->ref_cache[list][scan8[12]] = PART_NOT_AVAILABLE;
876 *(uint32_t*)h->mv_cache [list][scan8[5 ]+1]=
877 *(uint32_t*)h->mv_cache [list][scan8[7 ]+1]=
878 *(uint32_t*)h->mv_cache [list][scan8[13]+1]= //FIXME remove past 3 (init somewhere else)
879 *(uint32_t*)h->mv_cache [list][scan8[4 ]]=
880 *(uint32_t*)h->mv_cache [list][scan8[12]]= 0;
883 /* XXX beurk, Load mvd */
884 if(USES_LIST(top_type, list)){
885 const int b_xy= h->mb2b_xy[top_xy] + 3*h->b_stride;
886 *(uint32_t*)h->mvd_cache[list][scan8[0] + 0 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 0];
887 *(uint32_t*)h->mvd_cache[list][scan8[0] + 1 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 1];
888 *(uint32_t*)h->mvd_cache[list][scan8[0] + 2 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 2];
889 *(uint32_t*)h->mvd_cache[list][scan8[0] + 3 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 3];
891 *(uint32_t*)h->mvd_cache [list][scan8[0] + 0 - 1*8]=
892 *(uint32_t*)h->mvd_cache [list][scan8[0] + 1 - 1*8]=
893 *(uint32_t*)h->mvd_cache [list][scan8[0] + 2 - 1*8]=
894 *(uint32_t*)h->mvd_cache [list][scan8[0] + 3 - 1*8]= 0;
896 if(USES_LIST(left_type[0], list)){
897 const int b_xy= h->mb2b_xy[left_xy[0]] + 3;
898 *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 0*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[0]];
899 *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[1]];
901 *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 0*8]=
902 *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 1*8]= 0;
904 if(USES_LIST(left_type[1], list)){
905 const int b_xy= h->mb2b_xy[left_xy[1]] + 3;
906 *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 2*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[2]];
907 *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 3*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[3]];
909 *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 2*8]=
910 *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 3*8]= 0;
912 *(uint32_t*)h->mvd_cache [list][scan8[5 ]+1]=
913 *(uint32_t*)h->mvd_cache [list][scan8[7 ]+1]=
914 *(uint32_t*)h->mvd_cache [list][scan8[13]+1]= //FIXME remove past 3 (init somewhere else)
915 *(uint32_t*)h->mvd_cache [list][scan8[4 ]]=
916 *(uint32_t*)h->mvd_cache [list][scan8[12]]= 0;
918 if(h->slice_type == B_TYPE){
919 fill_rectangle(&h->direct_cache[scan8[0]], 4, 4, 8, 0, 1);
921 if(IS_DIRECT(top_type)){
922 *(uint32_t*)&h->direct_cache[scan8[0] - 1*8]= 0x01010101;
923 }else if(IS_8X8(top_type)){
924 int b8_xy = h->mb2b8_xy[top_xy] + h->b8_stride;
925 h->direct_cache[scan8[0] + 0 - 1*8]= h->direct_table[b8_xy];
926 h->direct_cache[scan8[0] + 2 - 1*8]= h->direct_table[b8_xy + 1];
928 *(uint32_t*)&h->direct_cache[scan8[0] - 1*8]= 0;
931 if(IS_DIRECT(left_type[0]))
932 h->direct_cache[scan8[0] - 1 + 0*8]= 1;
933 else if(IS_8X8(left_type[0]))
934 h->direct_cache[scan8[0] - 1 + 0*8]= h->direct_table[h->mb2b8_xy[left_xy[0]] + 1 + h->b8_stride*(left_block[0]>>1)];
936 h->direct_cache[scan8[0] - 1 + 0*8]= 0;
938 if(IS_DIRECT(left_type[1]))
939 h->direct_cache[scan8[0] - 1 + 2*8]= 1;
940 else if(IS_8X8(left_type[1]))
941 h->direct_cache[scan8[0] - 1 + 2*8]= h->direct_table[h->mb2b8_xy[left_xy[1]] + 1 + h->b8_stride*(left_block[2]>>1)];
943 h->direct_cache[scan8[0] - 1 + 2*8]= 0;
949 MAP_F2F(scan8[0] - 1 - 1*8, topleft_type)\
950 MAP_F2F(scan8[0] + 0 - 1*8, top_type)\
951 MAP_F2F(scan8[0] + 1 - 1*8, top_type)\
952 MAP_F2F(scan8[0] + 2 - 1*8, top_type)\
953 MAP_F2F(scan8[0] + 3 - 1*8, top_type)\
954 MAP_F2F(scan8[0] + 4 - 1*8, topright_type)\
955 MAP_F2F(scan8[0] - 1 + 0*8, left_type[0])\
956 MAP_F2F(scan8[0] - 1 + 1*8, left_type[0])\
957 MAP_F2F(scan8[0] - 1 + 2*8, left_type[1])\
958 MAP_F2F(scan8[0] - 1 + 3*8, left_type[1])
960 #define MAP_F2F(idx, mb_type)\
961 if(!IS_INTERLACED(mb_type) && h->ref_cache[list][idx] >= 0){\
962 h->ref_cache[list][idx] <<= 1;\
963 h->mv_cache[list][idx][1] /= 2;\
964 h->mvd_cache[list][idx][1] /= 2;\
969 #define MAP_F2F(idx, mb_type)\
970 if(IS_INTERLACED(mb_type) && h->ref_cache[list][idx] >= 0){\
971 h->ref_cache[list][idx] >>= 1;\
972 h->mv_cache[list][idx][1] <<= 1;\
973 h->mvd_cache[list][idx][1] <<= 1;\
983 h->neighbor_transform_size= !!IS_8x8DCT(top_type) + !!IS_8x8DCT(left_type[0]);
986 static inline void write_back_intra_pred_mode(H264Context *h){
987 MpegEncContext * const s = &h->s;
988 const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
990 h->intra4x4_pred_mode[mb_xy][0]= h->intra4x4_pred_mode_cache[7+8*1];
991 h->intra4x4_pred_mode[mb_xy][1]= h->intra4x4_pred_mode_cache[7+8*2];
992 h->intra4x4_pred_mode[mb_xy][2]= h->intra4x4_pred_mode_cache[7+8*3];
993 h->intra4x4_pred_mode[mb_xy][3]= h->intra4x4_pred_mode_cache[7+8*4];
994 h->intra4x4_pred_mode[mb_xy][4]= h->intra4x4_pred_mode_cache[4+8*4];
995 h->intra4x4_pred_mode[mb_xy][5]= h->intra4x4_pred_mode_cache[5+8*4];
996 h->intra4x4_pred_mode[mb_xy][6]= h->intra4x4_pred_mode_cache[6+8*4];
1000 * checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
1002 static inline int check_intra4x4_pred_mode(H264Context *h){
1003 MpegEncContext * const s = &h->s;
1004 static const int8_t top [12]= {-1, 0,LEFT_DC_PRED,-1,-1,-1,-1,-1, 0};
1005 static const int8_t left[12]= { 0,-1, TOP_DC_PRED, 0,-1,-1,-1, 0,-1,DC_128_PRED};
1008 if(!(h->top_samples_available&0x8000)){
1010 int status= top[ h->intra4x4_pred_mode_cache[scan8[0] + i] ];
1012 av_log(h->s.avctx, AV_LOG_ERROR, "top block unavailable for requested intra4x4 mode %d at %d %d\n", status, s->mb_x, s->mb_y);
1015 h->intra4x4_pred_mode_cache[scan8[0] + i]= status;
1020 if(!(h->left_samples_available&0x8000)){
1022 int status= left[ h->intra4x4_pred_mode_cache[scan8[0] + 8*i] ];
1024 av_log(h->s.avctx, AV_LOG_ERROR, "left block unavailable for requested intra4x4 mode %d at %d %d\n", status, s->mb_x, s->mb_y);
1027 h->intra4x4_pred_mode_cache[scan8[0] + 8*i]= status;
1033 } //FIXME cleanup like next
1036 * checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
1038 static inline int check_intra_pred_mode(H264Context *h, int mode){
1039 MpegEncContext * const s = &h->s;
1040 static const int8_t top [7]= {LEFT_DC_PRED8x8, 1,-1,-1};
1041 static const int8_t left[7]= { TOP_DC_PRED8x8,-1, 2,-1,DC_128_PRED8x8};
1044 av_log(h->s.avctx, AV_LOG_ERROR, "out of range intra chroma pred mode at %d %d\n", s->mb_x, s->mb_y);
1048 if(!(h->top_samples_available&0x8000)){
1051 av_log(h->s.avctx, AV_LOG_ERROR, "top block unavailable for requested intra mode at %d %d\n", s->mb_x, s->mb_y);
1056 if(!(h->left_samples_available&0x8000)){
1059 av_log(h->s.avctx, AV_LOG_ERROR, "left block unavailable for requested intra mode at %d %d\n", s->mb_x, s->mb_y);
1068 * gets the predicted intra4x4 prediction mode.
1070 static inline int pred_intra_mode(H264Context *h, int n){
1071 const int index8= scan8[n];
1072 const int left= h->intra4x4_pred_mode_cache[index8 - 1];
1073 const int top = h->intra4x4_pred_mode_cache[index8 - 8];
1074 const int min= FFMIN(left, top);
1076 tprintf("mode:%d %d min:%d\n", left ,top, min);
1078 if(min<0) return DC_PRED;
1082 static inline void write_back_non_zero_count(H264Context *h){
1083 MpegEncContext * const s = &h->s;
1084 const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
1086 h->non_zero_count[mb_xy][0]= h->non_zero_count_cache[7+8*1];
1087 h->non_zero_count[mb_xy][1]= h->non_zero_count_cache[7+8*2];
1088 h->non_zero_count[mb_xy][2]= h->non_zero_count_cache[7+8*3];
1089 h->non_zero_count[mb_xy][3]= h->non_zero_count_cache[7+8*4];
1090 h->non_zero_count[mb_xy][4]= h->non_zero_count_cache[4+8*4];
1091 h->non_zero_count[mb_xy][5]= h->non_zero_count_cache[5+8*4];
1092 h->non_zero_count[mb_xy][6]= h->non_zero_count_cache[6+8*4];
1094 h->non_zero_count[mb_xy][9]= h->non_zero_count_cache[1+8*2];
1095 h->non_zero_count[mb_xy][8]= h->non_zero_count_cache[2+8*2];
1096 h->non_zero_count[mb_xy][7]= h->non_zero_count_cache[2+8*1];
1098 h->non_zero_count[mb_xy][12]=h->non_zero_count_cache[1+8*5];
1099 h->non_zero_count[mb_xy][11]=h->non_zero_count_cache[2+8*5];
1100 h->non_zero_count[mb_xy][10]=h->non_zero_count_cache[2+8*4];
1103 // store all luma nnzs, for deblocking
1106 v += (!!h->non_zero_count_cache[scan8[i]]) << i;
1107 *(uint16_t*)&h->non_zero_count[mb_xy][14] = v;
1112 * gets the predicted number of non zero coefficients.
1113 * @param n block index
1115 static inline int pred_non_zero_count(H264Context *h, int n){
1116 const int index8= scan8[n];
1117 const int left= h->non_zero_count_cache[index8 - 1];
1118 const int top = h->non_zero_count_cache[index8 - 8];
1121 if(i<64) i= (i+1)>>1;
1123 tprintf("pred_nnz L%X T%X n%d s%d P%X\n", left, top, n, scan8[n], i&31);
1128 static inline int fetch_diagonal_mv(H264Context *h, const int16_t **C, int i, int list, int part_width){
1129 const int topright_ref= h->ref_cache[list][ i - 8 + part_width ];
1131 /* there is no consistent mapping of mvs to neighboring locations that will
1132 * make mbaff happy, so we can't move all this logic to fill_caches */
1134 MpegEncContext *s = &h->s;
1135 const uint32_t *mb_types = s->current_picture_ptr->mb_type;
1137 *(uint32_t*)h->mv_cache[list][scan8[0]-2] = 0;
1138 *C = h->mv_cache[list][scan8[0]-2];
1141 && (s->mb_y&1) && i < scan8[0]+8 && topright_ref != PART_NOT_AVAILABLE){
1142 int topright_xy = s->mb_x + (s->mb_y-1)*s->mb_stride + (i == scan8[0]+3);
1143 if(IS_INTERLACED(mb_types[topright_xy])){
1144 #define SET_DIAG_MV(MV_OP, REF_OP, X4, Y4)\
1145 const int x4 = X4, y4 = Y4;\
1146 const int mb_type = mb_types[(x4>>2)+(y4>>2)*s->mb_stride];\
1147 if(!USES_LIST(mb_type,list) && !IS_8X8(mb_type))\
1148 return LIST_NOT_USED;\
1149 mv = s->current_picture_ptr->motion_val[list][x4 + y4*h->b_stride];\
1150 h->mv_cache[list][scan8[0]-2][0] = mv[0];\
1151 h->mv_cache[list][scan8[0]-2][1] = mv[1] MV_OP;\
1152 return s->current_picture_ptr->ref_index[list][(x4>>1) + (y4>>1)*h->b8_stride] REF_OP;
1154 SET_DIAG_MV(*2, >>1, s->mb_x*4+(i&7)-4+part_width, s->mb_y*4-1);
1157 if(topright_ref == PART_NOT_AVAILABLE
1158 && ((s->mb_y&1) || i >= scan8[0]+8) && (i&7)==4
1159 && h->ref_cache[list][scan8[0]-1] != PART_NOT_AVAILABLE){
1161 && IS_INTERLACED(mb_types[h->left_mb_xy[0]])){
1162 SET_DIAG_MV(*2, >>1, s->mb_x*4-1, (s->mb_y|1)*4+(s->mb_y&1)*2+(i>>4)-1);
1165 && !IS_INTERLACED(mb_types[h->left_mb_xy[0]])
1166 && i >= scan8[0]+8){
1167 // leftshift will turn LIST_NOT_USED into PART_NOT_AVAILABLE, but that's ok.
1168 SET_DIAG_MV(>>1, <<1, s->mb_x*4-1, (s->mb_y&~1)*4 - 1 + ((i-scan8[0])>>3)*2);
1174 if(topright_ref != PART_NOT_AVAILABLE){
1175 *C= h->mv_cache[list][ i - 8 + part_width ];
1176 return topright_ref;
1178 tprintf("topright MV not available\n");
1180 *C= h->mv_cache[list][ i - 8 - 1 ];
1181 return h->ref_cache[list][ i - 8 - 1 ];
1186 * gets the predicted MV.
1187 * @param n the block index
1188 * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
1189 * @param mx the x component of the predicted motion vector
1190 * @param my the y component of the predicted motion vector
1192 static inline void pred_motion(H264Context * const h, int n, int part_width, int list, int ref, int * const mx, int * const my){
1193 const int index8= scan8[n];
1194 const int top_ref= h->ref_cache[list][ index8 - 8 ];
1195 const int left_ref= h->ref_cache[list][ index8 - 1 ];
1196 const int16_t * const A= h->mv_cache[list][ index8 - 1 ];
1197 const int16_t * const B= h->mv_cache[list][ index8 - 8 ];
1199 int diagonal_ref, match_count;
1201 assert(part_width==1 || part_width==2 || part_width==4);
1211 diagonal_ref= fetch_diagonal_mv(h, &C, index8, list, part_width);
1212 match_count= (diagonal_ref==ref) + (top_ref==ref) + (left_ref==ref);
1213 tprintf("pred_motion match_count=%d\n", match_count);
1214 if(match_count > 1){ //most common
1215 *mx= mid_pred(A[0], B[0], C[0]);
1216 *my= mid_pred(A[1], B[1], C[1]);
1217 }else if(match_count==1){
1221 }else if(top_ref==ref){
1229 if(top_ref == PART_NOT_AVAILABLE && diagonal_ref == PART_NOT_AVAILABLE && left_ref != PART_NOT_AVAILABLE){
1233 *mx= mid_pred(A[0], B[0], C[0]);
1234 *my= mid_pred(A[1], B[1], C[1]);
1238 tprintf("pred_motion (%2d %2d %2d) (%2d %2d %2d) (%2d %2d %2d) -> (%2d %2d %2d) at %2d %2d %d list %d\n", top_ref, B[0], B[1], diagonal_ref, C[0], C[1], left_ref, A[0], A[1], ref, *mx, *my, h->s.mb_x, h->s.mb_y, n, list);
1242 * gets the directionally predicted 16x8 MV.
1243 * @param n the block index
1244 * @param mx the x component of the predicted motion vector
1245 * @param my the y component of the predicted motion vector
1247 static inline void pred_16x8_motion(H264Context * const h, int n, int list, int ref, int * const mx, int * const my){
1249 const int top_ref= h->ref_cache[list][ scan8[0] - 8 ];
1250 const int16_t * const B= h->mv_cache[list][ scan8[0] - 8 ];
1252 tprintf("pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n", top_ref, B[0], B[1], h->s.mb_x, h->s.mb_y, n, list);
1260 const int left_ref= h->ref_cache[list][ scan8[8] - 1 ];
1261 const int16_t * const A= h->mv_cache[list][ scan8[8] - 1 ];
1263 tprintf("pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n", left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list);
1265 if(left_ref == ref){
1273 pred_motion(h, n, 4, list, ref, mx, my);
1277 * gets the directionally predicted 8x16 MV.
1278 * @param n the block index
1279 * @param mx the x component of the predicted motion vector
1280 * @param my the y component of the predicted motion vector
1282 static inline void pred_8x16_motion(H264Context * const h, int n, int list, int ref, int * const mx, int * const my){
1284 const int left_ref= h->ref_cache[list][ scan8[0] - 1 ];
1285 const int16_t * const A= h->mv_cache[list][ scan8[0] - 1 ];
1287 tprintf("pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n", left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list);
1289 if(left_ref == ref){
1298 diagonal_ref= fetch_diagonal_mv(h, &C, scan8[4], list, 2);
1300 tprintf("pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n", diagonal_ref, C[0], C[1], h->s.mb_x, h->s.mb_y, n, list);
1302 if(diagonal_ref == ref){
1310 pred_motion(h, n, 2, list, ref, mx, my);
1313 static inline void pred_pskip_motion(H264Context * const h, int * const mx, int * const my){
1314 const int top_ref = h->ref_cache[0][ scan8[0] - 8 ];
1315 const int left_ref= h->ref_cache[0][ scan8[0] - 1 ];
1317 tprintf("pred_pskip: (%d) (%d) at %2d %2d\n", top_ref, left_ref, h->s.mb_x, h->s.mb_y);
1319 if(top_ref == PART_NOT_AVAILABLE || left_ref == PART_NOT_AVAILABLE
1320 || (top_ref == 0 && *(uint32_t*)h->mv_cache[0][ scan8[0] - 8 ] == 0)
1321 || (left_ref == 0 && *(uint32_t*)h->mv_cache[0][ scan8[0] - 1 ] == 0)){
1327 pred_motion(h, 0, 4, 0, 0, mx, my);
1332 static inline void direct_dist_scale_factor(H264Context * const h){
1333 const int poc = h->s.current_picture_ptr->poc;
1334 const int poc1 = h->ref_list[1][0].poc;
1336 for(i=0; i<h->ref_count[0]; i++){
1337 int poc0 = h->ref_list[0][i].poc;
1338 int td = clip(poc1 - poc0, -128, 127);
1339 if(td == 0 /* FIXME || pic0 is a long-term ref */){
1340 h->dist_scale_factor[i] = 256;
1342 int tb = clip(poc - poc0, -128, 127);
1343 int tx = (16384 + (FFABS(td) >> 1)) / td;
1344 h->dist_scale_factor[i] = clip((tb*tx + 32) >> 6, -1024, 1023);
1348 for(i=0; i<h->ref_count[0]; i++){
1349 h->dist_scale_factor_field[2*i] =
1350 h->dist_scale_factor_field[2*i+1] = h->dist_scale_factor[i];
1354 static inline void direct_ref_list_init(H264Context * const h){
1355 MpegEncContext * const s = &h->s;
1356 Picture * const ref1 = &h->ref_list[1][0];
1357 Picture * const cur = s->current_picture_ptr;
1359 if(cur->pict_type == I_TYPE)
1360 cur->ref_count[0] = 0;
1361 if(cur->pict_type != B_TYPE)
1362 cur->ref_count[1] = 0;
1363 for(list=0; list<2; list++){
1364 cur->ref_count[list] = h->ref_count[list];
1365 for(j=0; j<h->ref_count[list]; j++)
1366 cur->ref_poc[list][j] = h->ref_list[list][j].poc;
1368 if(cur->pict_type != B_TYPE || h->direct_spatial_mv_pred)
1370 for(list=0; list<2; list++){
1371 for(i=0; i<ref1->ref_count[list]; i++){
1372 const int poc = ref1->ref_poc[list][i];
1373 h->map_col_to_list0[list][i] = 0; /* bogus; fills in for missing frames */
1374 for(j=0; j<h->ref_count[list]; j++)
1375 if(h->ref_list[list][j].poc == poc){
1376 h->map_col_to_list0[list][i] = j;
1382 for(list=0; list<2; list++){
1383 for(i=0; i<ref1->ref_count[list]; i++){
1384 j = h->map_col_to_list0[list][i];
1385 h->map_col_to_list0_field[list][2*i] = 2*j;
1386 h->map_col_to_list0_field[list][2*i+1] = 2*j+1;
1392 static inline void pred_direct_motion(H264Context * const h, int *mb_type){
1393 MpegEncContext * const s = &h->s;
1394 const int mb_xy = s->mb_x + s->mb_y*s->mb_stride;
1395 const int b8_xy = 2*s->mb_x + 2*s->mb_y*h->b8_stride;
1396 const int b4_xy = 4*s->mb_x + 4*s->mb_y*h->b_stride;
1397 const int mb_type_col = h->ref_list[1][0].mb_type[mb_xy];
1398 const int16_t (*l1mv0)[2] = (const int16_t (*)[2]) &h->ref_list[1][0].motion_val[0][b4_xy];
1399 const int16_t (*l1mv1)[2] = (const int16_t (*)[2]) &h->ref_list[1][0].motion_val[1][b4_xy];
1400 const int8_t *l1ref0 = &h->ref_list[1][0].ref_index[0][b8_xy];
1401 const int8_t *l1ref1 = &h->ref_list[1][0].ref_index[1][b8_xy];
1402 const int is_b8x8 = IS_8X8(*mb_type);
1403 unsigned int sub_mb_type;
1406 #define MB_TYPE_16x16_OR_INTRA (MB_TYPE_16x16|MB_TYPE_INTRA4x4|MB_TYPE_INTRA16x16|MB_TYPE_INTRA_PCM)
1407 if(IS_8X8(mb_type_col) && !h->sps.direct_8x8_inference_flag){
1408 /* FIXME save sub mb types from previous frames (or derive from MVs)
1409 * so we know exactly what block size to use */
1410 sub_mb_type = MB_TYPE_8x8|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_4x4 */
1411 *mb_type = MB_TYPE_8x8|MB_TYPE_L0L1;
1412 }else if(!is_b8x8 && (mb_type_col & MB_TYPE_16x16_OR_INTRA)){
1413 sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
1414 *mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_16x16 */
1416 sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
1417 *mb_type = MB_TYPE_8x8|MB_TYPE_L0L1;
1420 *mb_type |= MB_TYPE_DIRECT2;
1422 *mb_type |= MB_TYPE_INTERLACED;
1424 tprintf("mb_type = %08x, sub_mb_type = %08x, is_b8x8 = %d, mb_type_col = %08x\n", *mb_type, sub_mb_type, is_b8x8, mb_type_col);
1426 if(h->direct_spatial_mv_pred){
1431 /* FIXME interlacing + spatial direct uses wrong colocated block positions */
1433 /* ref = min(neighbors) */
1434 for(list=0; list<2; list++){
1435 int refa = h->ref_cache[list][scan8[0] - 1];
1436 int refb = h->ref_cache[list][scan8[0] - 8];
1437 int refc = h->ref_cache[list][scan8[0] - 8 + 4];
1439 refc = h->ref_cache[list][scan8[0] - 8 - 1];
1441 if(ref[list] < 0 || (refb < ref[list] && refb >= 0))
1443 if(ref[list] < 0 || (refc < ref[list] && refc >= 0))
1449 if(ref[0] < 0 && ref[1] < 0){
1450 ref[0] = ref[1] = 0;
1451 mv[0][0] = mv[0][1] =
1452 mv[1][0] = mv[1][1] = 0;
1454 for(list=0; list<2; list++){
1456 pred_motion(h, 0, 4, list, ref[list], &mv[list][0], &mv[list][1]);
1458 mv[list][0] = mv[list][1] = 0;
1463 *mb_type &= ~MB_TYPE_P0L1;
1464 sub_mb_type &= ~MB_TYPE_P0L1;
1465 }else if(ref[0] < 0){
1466 *mb_type &= ~MB_TYPE_P0L0;
1467 sub_mb_type &= ~MB_TYPE_P0L0;
1470 if(IS_16X16(*mb_type)){
1471 fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, (uint8_t)ref[0], 1);
1472 fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, (uint8_t)ref[1], 1);
1473 if(!IS_INTRA(mb_type_col)
1474 && ( (l1ref0[0] == 0 && FFABS(l1mv0[0][0]) <= 1 && FFABS(l1mv0[0][1]) <= 1)
1475 || (l1ref0[0] < 0 && l1ref1[0] == 0 && FFABS(l1mv1[0][0]) <= 1 && FFABS(l1mv1[0][1]) <= 1
1476 && (h->x264_build>33 || !h->x264_build)))){
1478 fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mv[0][0],mv[0][1]), 4);
1480 fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, 0, 4);
1482 fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, pack16to32(mv[1][0],mv[1][1]), 4);
1484 fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, 0, 4);
1486 fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mv[0][0],mv[0][1]), 4);
1487 fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, pack16to32(mv[1][0],mv[1][1]), 4);
1490 for(i8=0; i8<4; i8++){
1491 const int x8 = i8&1;
1492 const int y8 = i8>>1;
1494 if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
1496 h->sub_mb_type[i8] = sub_mb_type;
1498 fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, pack16to32(mv[0][0],mv[0][1]), 4);
1499 fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, pack16to32(mv[1][0],mv[1][1]), 4);
1500 fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[0], 1);
1501 fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[1], 1);
1504 if(!IS_INTRA(mb_type_col) && ( l1ref0[x8 + y8*h->b8_stride] == 0
1505 || (l1ref0[x8 + y8*h->b8_stride] < 0 && l1ref1[x8 + y8*h->b8_stride] == 0
1506 && (h->x264_build>33 || !h->x264_build)))){
1507 const int16_t (*l1mv)[2]= l1ref0[x8 + y8*h->b8_stride] == 0 ? l1mv0 : l1mv1;
1508 if(IS_SUB_8X8(sub_mb_type)){
1509 const int16_t *mv_col = l1mv[x8*3 + y8*3*h->b_stride];
1510 if(FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1){
1512 fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4);
1514 fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4);
1517 for(i4=0; i4<4; i4++){
1518 const int16_t *mv_col = l1mv[x8*2 + (i4&1) + (y8*2 + (i4>>1))*h->b_stride];
1519 if(FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1){
1521 *(uint32_t*)h->mv_cache[0][scan8[i8*4+i4]] = 0;
1523 *(uint32_t*)h->mv_cache[1][scan8[i8*4+i4]] = 0;
1529 }else{ /* direct temporal mv pred */
1530 const int *map_col_to_list0[2] = {h->map_col_to_list0[0], h->map_col_to_list0[1]};
1531 const int *dist_scale_factor = h->dist_scale_factor;
1534 if(IS_INTERLACED(*mb_type)){
1535 map_col_to_list0[0] = h->map_col_to_list0_field[0];
1536 map_col_to_list0[1] = h->map_col_to_list0_field[1];
1537 dist_scale_factor = h->dist_scale_factor_field;
1539 if(IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col)){
1540 /* FIXME assumes direct_8x8_inference == 1 */
1541 const int pair_xy = s->mb_x + (s->mb_y&~1)*s->mb_stride;
1542 int mb_types_col[2];
1545 *mb_type = MB_TYPE_8x8|MB_TYPE_L0L1
1546 | (is_b8x8 ? 0 : MB_TYPE_DIRECT2)
1547 | (*mb_type & MB_TYPE_INTERLACED);
1548 sub_mb_type = MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2|MB_TYPE_16x16;
1550 if(IS_INTERLACED(*mb_type)){
1551 /* frame to field scaling */
1552 mb_types_col[0] = h->ref_list[1][0].mb_type[pair_xy];
1553 mb_types_col[1] = h->ref_list[1][0].mb_type[pair_xy+s->mb_stride];
1555 l1ref0 -= 2*h->b8_stride;
1556 l1ref1 -= 2*h->b8_stride;
1557 l1mv0 -= 4*h->b_stride;
1558 l1mv1 -= 4*h->b_stride;
1562 if( (mb_types_col[0] & MB_TYPE_16x16_OR_INTRA)
1563 && (mb_types_col[1] & MB_TYPE_16x16_OR_INTRA)
1565 *mb_type |= MB_TYPE_16x8;
1567 *mb_type |= MB_TYPE_8x8;
1569 /* field to frame scaling */
1570 /* col_mb_y = (mb_y&~1) + (topAbsDiffPOC < bottomAbsDiffPOC ? 0 : 1)
1571 * but in MBAFF, top and bottom POC are equal */
1572 int dy = (s->mb_y&1) ? 1 : 2;
1574 mb_types_col[1] = h->ref_list[1][0].mb_type[pair_xy+s->mb_stride];
1575 l1ref0 += dy*h->b8_stride;
1576 l1ref1 += dy*h->b8_stride;
1577 l1mv0 += 2*dy*h->b_stride;
1578 l1mv1 += 2*dy*h->b_stride;
1581 if((mb_types_col[0] & (MB_TYPE_16x16_OR_INTRA|MB_TYPE_16x8))
1583 *mb_type |= MB_TYPE_16x16;
1585 *mb_type |= MB_TYPE_8x8;
1588 for(i8=0; i8<4; i8++){
1589 const int x8 = i8&1;
1590 const int y8 = i8>>1;
1592 const int16_t (*l1mv)[2]= l1mv0;
1594 if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
1596 h->sub_mb_type[i8] = sub_mb_type;
1598 fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1);
1599 if(IS_INTRA(mb_types_col[y8])){
1600 fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, 0, 1);
1601 fill_rectangle(&h-> mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4);
1602 fill_rectangle(&h-> mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4);
1606 ref0 = l1ref0[x8 + (y8*2>>y_shift)*h->b8_stride];
1608 ref0 = map_col_to_list0[0][ref0*2>>y_shift];
1610 ref0 = map_col_to_list0[1][l1ref1[x8 + (y8*2>>y_shift)*h->b8_stride]*2>>y_shift];
1613 scale = dist_scale_factor[ref0];
1614 fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, ref0, 1);
1617 const int16_t *mv_col = l1mv[x8*3 + (y8*6>>y_shift)*h->b_stride];
1618 int my_col = (mv_col[1]<<y_shift)/2;
1619 int mx = (scale * mv_col[0] + 128) >> 8;
1620 int my = (scale * my_col + 128) >> 8;
1621 fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, pack16to32(mx,my), 4);
1622 fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, pack16to32(mx-mv_col[0],my-my_col), 4);
1629 /* one-to-one mv scaling */
1631 if(IS_16X16(*mb_type)){
1632 fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, 0, 1);
1633 if(IS_INTRA(mb_type_col)){
1634 fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, 0, 1);
1635 fill_rectangle(&h-> mv_cache[0][scan8[0]], 4, 4, 8, 0, 4);
1636 fill_rectangle(&h-> mv_cache[1][scan8[0]], 4, 4, 8, 0, 4);
1638 const int ref0 = l1ref0[0] >= 0 ? map_col_to_list0[0][l1ref0[0]]
1639 : map_col_to_list0[1][l1ref1[0]];
1640 const int scale = dist_scale_factor[ref0];
1641 const int16_t *mv_col = l1ref0[0] >= 0 ? l1mv0[0] : l1mv1[0];
1643 mv_l0[0] = (scale * mv_col[0] + 128) >> 8;
1644 mv_l0[1] = (scale * mv_col[1] + 128) >> 8;
1645 fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref0, 1);
1646 fill_rectangle(&h-> mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mv_l0[0],mv_l0[1]), 4);
1647 fill_rectangle(&h-> mv_cache[1][scan8[0]], 4, 4, 8, pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]), 4);
1650 for(i8=0; i8<4; i8++){
1651 const int x8 = i8&1;
1652 const int y8 = i8>>1;
1654 const int16_t (*l1mv)[2]= l1mv0;
1656 if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
1658 h->sub_mb_type[i8] = sub_mb_type;
1659 fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1);
1660 if(IS_INTRA(mb_type_col)){
1661 fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, 0, 1);
1662 fill_rectangle(&h-> mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4);
1663 fill_rectangle(&h-> mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4);
1667 ref0 = l1ref0[x8 + y8*h->b8_stride];
1669 ref0 = map_col_to_list0[0][ref0];
1671 ref0 = map_col_to_list0[1][l1ref1[x8 + y8*h->b8_stride]];
1674 scale = dist_scale_factor[ref0];
1676 fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, ref0, 1);
1677 if(IS_SUB_8X8(sub_mb_type)){
1678 const int16_t *mv_col = l1mv[x8*3 + y8*3*h->b_stride];
1679 int mx = (scale * mv_col[0] + 128) >> 8;
1680 int my = (scale * mv_col[1] + 128) >> 8;
1681 fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, pack16to32(mx,my), 4);
1682 fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, pack16to32(mx-mv_col[0],my-mv_col[1]), 4);
1684 for(i4=0; i4<4; i4++){
1685 const int16_t *mv_col = l1mv[x8*2 + (i4&1) + (y8*2 + (i4>>1))*h->b_stride];
1686 int16_t *mv_l0 = h->mv_cache[0][scan8[i8*4+i4]];
1687 mv_l0[0] = (scale * mv_col[0] + 128) >> 8;
1688 mv_l0[1] = (scale * mv_col[1] + 128) >> 8;
1689 *(uint32_t*)h->mv_cache[1][scan8[i8*4+i4]] =
1690 pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]);
1697 static inline void write_back_motion(H264Context *h, int mb_type){
1698 MpegEncContext * const s = &h->s;
1699 const int b_xy = 4*s->mb_x + 4*s->mb_y*h->b_stride;
1700 const int b8_xy= 2*s->mb_x + 2*s->mb_y*h->b8_stride;
1703 if(!USES_LIST(mb_type, 0))
1704 fill_rectangle(&s->current_picture.ref_index[0][b8_xy], 2, 2, h->b8_stride, (uint8_t)LIST_NOT_USED, 1);
1706 for(list=0; list<h->list_count; list++){
1708 if(!USES_LIST(mb_type, list))
1712 *(uint64_t*)s->current_picture.motion_val[list][b_xy + 0 + y*h->b_stride]= *(uint64_t*)h->mv_cache[list][scan8[0]+0 + 8*y];
1713 *(uint64_t*)s->current_picture.motion_val[list][b_xy + 2 + y*h->b_stride]= *(uint64_t*)h->mv_cache[list][scan8[0]+2 + 8*y];
1715 if( h->pps.cabac ) {
1716 if(IS_SKIP(mb_type))
1717 fill_rectangle(h->mvd_table[list][b_xy], 4, 4, h->b_stride, 0, 4);
1720 *(uint64_t*)h->mvd_table[list][b_xy + 0 + y*h->b_stride]= *(uint64_t*)h->mvd_cache[list][scan8[0]+0 + 8*y];
1721 *(uint64_t*)h->mvd_table[list][b_xy + 2 + y*h->b_stride]= *(uint64_t*)h->mvd_cache[list][scan8[0]+2 + 8*y];
1726 int8_t *ref_index = &s->current_picture.ref_index[list][b8_xy];
1727 ref_index[0+0*h->b8_stride]= h->ref_cache[list][scan8[0]];
1728 ref_index[1+0*h->b8_stride]= h->ref_cache[list][scan8[4]];
1729 ref_index[0+1*h->b8_stride]= h->ref_cache[list][scan8[8]];
1730 ref_index[1+1*h->b8_stride]= h->ref_cache[list][scan8[12]];
1734 if(h->slice_type == B_TYPE && h->pps.cabac){
1735 if(IS_8X8(mb_type)){
1736 uint8_t *direct_table = &h->direct_table[b8_xy];
1737 direct_table[1+0*h->b8_stride] = IS_DIRECT(h->sub_mb_type[1]) ? 1 : 0;
1738 direct_table[0+1*h->b8_stride] = IS_DIRECT(h->sub_mb_type[2]) ? 1 : 0;
1739 direct_table[1+1*h->b8_stride] = IS_DIRECT(h->sub_mb_type[3]) ? 1 : 0;
1745 * Decodes a network abstraction layer unit.
1746 * @param consumed is the number of bytes used as input
1747 * @param length is the length of the array
1748 * @param dst_length is the number of decoded bytes FIXME here or a decode rbsp tailing?
1749 * @returns decoded bytes, might be src+1 if no escapes
1751 static uint8_t *decode_nal(H264Context *h, uint8_t *src, int *dst_length, int *consumed, int length){
1755 // src[0]&0x80; //forbidden bit
1756 h->nal_ref_idc= src[0]>>5;
1757 h->nal_unit_type= src[0]&0x1F;
1761 for(i=0; i<length; i++)
1762 printf("%2X ", src[i]);
1764 for(i=0; i+1<length; i+=2){
1765 if(src[i]) continue;
1766 if(i>0 && src[i-1]==0) i--;
1767 if(i+2<length && src[i+1]==0 && src[i+2]<=3){
1769 /* startcode, so we must be past the end */
1776 if(i>=length-1){ //no escaped 0
1777 *dst_length= length;
1778 *consumed= length+1; //+1 for the header
1782 h->rbsp_buffer= av_fast_realloc(h->rbsp_buffer, &h->rbsp_buffer_size, length);
1783 dst= h->rbsp_buffer;
1789 //printf("decoding esc\n");
1792 //remove escapes (very rare 1:2^22)
1793 if(si+2<length && src[si]==0 && src[si+1]==0 && src[si+2]<=3){
1794 if(src[si+2]==3){ //escape
1799 }else //next start code
1803 dst[di++]= src[si++];
1807 *consumed= si + 1;//+1 for the header
1808 //FIXME store exact number of bits in the getbitcontext (its needed for decoding)
1813 * identifies the exact end of the bitstream
1814 * @return the length of the trailing, or 0 if damaged
1816 static int decode_rbsp_trailing(uint8_t *src){
1820 tprintf("rbsp trailing %X\n", v);
1830 * idct tranforms the 16 dc values and dequantize them.
1831 * @param qp quantization parameter
1833 static void h264_luma_dc_dequant_idct_c(DCTELEM *block, int qp, int qmul){
1836 int temp[16]; //FIXME check if this is a good idea
1837 static const int x_offset[4]={0, 1*stride, 4* stride, 5*stride};
1838 static const int y_offset[4]={0, 2*stride, 8* stride, 10*stride};
1840 //memset(block, 64, 2*256);
1843 const int offset= y_offset[i];
1844 const int z0= block[offset+stride*0] + block[offset+stride*4];
1845 const int z1= block[offset+stride*0] - block[offset+stride*4];
1846 const int z2= block[offset+stride*1] - block[offset+stride*5];
1847 const int z3= block[offset+stride*1] + block[offset+stride*5];
1856 const int offset= x_offset[i];
1857 const int z0= temp[4*0+i] + temp[4*2+i];
1858 const int z1= temp[4*0+i] - temp[4*2+i];
1859 const int z2= temp[4*1+i] - temp[4*3+i];
1860 const int z3= temp[4*1+i] + temp[4*3+i];
1862 block[stride*0 +offset]= ((((z0 + z3)*qmul + 128 ) >> 8)); //FIXME think about merging this into decode_resdual
1863 block[stride*2 +offset]= ((((z1 + z2)*qmul + 128 ) >> 8));
1864 block[stride*8 +offset]= ((((z1 - z2)*qmul + 128 ) >> 8));
1865 block[stride*10+offset]= ((((z0 - z3)*qmul + 128 ) >> 8));
1871 * dct tranforms the 16 dc values.
1872 * @param qp quantization parameter ??? FIXME
1874 static void h264_luma_dc_dct_c(DCTELEM *block/*, int qp*/){
1875 // const int qmul= dequant_coeff[qp][0];
1877 int temp[16]; //FIXME check if this is a good idea
1878 static const int x_offset[4]={0, 1*stride, 4* stride, 5*stride};
1879 static const int y_offset[4]={0, 2*stride, 8* stride, 10*stride};
1882 const int offset= y_offset[i];
1883 const int z0= block[offset+stride*0] + block[offset+stride*4];
1884 const int z1= block[offset+stride*0] - block[offset+stride*4];
1885 const int z2= block[offset+stride*1] - block[offset+stride*5];
1886 const int z3= block[offset+stride*1] + block[offset+stride*5];
1895 const int offset= x_offset[i];
1896 const int z0= temp[4*0+i] + temp[4*2+i];
1897 const int z1= temp[4*0+i] - temp[4*2+i];
1898 const int z2= temp[4*1+i] - temp[4*3+i];
1899 const int z3= temp[4*1+i] + temp[4*3+i];
1901 block[stride*0 +offset]= (z0 + z3)>>1;
1902 block[stride*2 +offset]= (z1 + z2)>>1;
1903 block[stride*8 +offset]= (z1 - z2)>>1;
1904 block[stride*10+offset]= (z0 - z3)>>1;
1912 static void chroma_dc_dequant_idct_c(DCTELEM *block, int qp, int qmul){
1913 const int stride= 16*2;
1914 const int xStride= 16;
1917 a= block[stride*0 + xStride*0];
1918 b= block[stride*0 + xStride*1];
1919 c= block[stride*1 + xStride*0];
1920 d= block[stride*1 + xStride*1];
1927 block[stride*0 + xStride*0]= ((a+c)*qmul) >> 7;
1928 block[stride*0 + xStride*1]= ((e+b)*qmul) >> 7;
1929 block[stride*1 + xStride*0]= ((a-c)*qmul) >> 7;
1930 block[stride*1 + xStride*1]= ((e-b)*qmul) >> 7;
1934 static void chroma_dc_dct_c(DCTELEM *block){
1935 const int stride= 16*2;
1936 const int xStride= 16;
1939 a= block[stride*0 + xStride*0];
1940 b= block[stride*0 + xStride*1];
1941 c= block[stride*1 + xStride*0];
1942 d= block[stride*1 + xStride*1];
1949 block[stride*0 + xStride*0]= (a+c);
1950 block[stride*0 + xStride*1]= (e+b);
1951 block[stride*1 + xStride*0]= (a-c);
1952 block[stride*1 + xStride*1]= (e-b);
1957 * gets the chroma qp.
1959 static inline int get_chroma_qp(int chroma_qp_index_offset, int qscale){
1961 return chroma_qp[clip(qscale + chroma_qp_index_offset, 0, 51)];
1964 //FIXME need to check that this doesnt overflow signed 32 bit for low qp, i am not sure, it's very close
1965 //FIXME check that gcc inlines this (and optimizes intra & seperate_dc stuff away)
1966 static inline int quantize_c(DCTELEM *block, uint8_t *scantable, int qscale, int intra, int seperate_dc){
1968 const int * const quant_table= quant_coeff[qscale];
1969 const int bias= intra ? (1<<QUANT_SHIFT)/3 : (1<<QUANT_SHIFT)/6;
1970 const unsigned int threshold1= (1<<QUANT_SHIFT) - bias - 1;
1971 const unsigned int threshold2= (threshold1<<1);
1977 const int dc_bias= intra ? (1<<(QUANT_SHIFT-2))/3 : (1<<(QUANT_SHIFT-2))/6;
1978 const unsigned int dc_threshold1= (1<<(QUANT_SHIFT-2)) - dc_bias - 1;
1979 const unsigned int dc_threshold2= (dc_threshold1<<1);
1981 int level= block[0]*quant_coeff[qscale+18][0];
1982 if(((unsigned)(level+dc_threshold1))>dc_threshold2){
1984 level= (dc_bias + level)>>(QUANT_SHIFT-2);
1987 level= (dc_bias - level)>>(QUANT_SHIFT-2);
1990 // last_non_zero = i;
1995 const int dc_bias= intra ? (1<<(QUANT_SHIFT+1))/3 : (1<<(QUANT_SHIFT+1))/6;
1996 const unsigned int dc_threshold1= (1<<(QUANT_SHIFT+1)) - dc_bias - 1;
1997 const unsigned int dc_threshold2= (dc_threshold1<<1);
1999 int level= block[0]*quant_table[0];
2000 if(((unsigned)(level+dc_threshold1))>dc_threshold2){
2002 level= (dc_bias + level)>>(QUANT_SHIFT+1);
2005 level= (dc_bias - level)>>(QUANT_SHIFT+1);
2008 // last_non_zero = i;
2021 const int j= scantable[i];
2022 int level= block[j]*quant_table[j];
2024 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
2025 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
2026 if(((unsigned)(level+threshold1))>threshold2){
2028 level= (bias + level)>>QUANT_SHIFT;
2031 level= (bias - level)>>QUANT_SHIFT;
2040 return last_non_zero;
2043 static void pred4x4_vertical_c(uint8_t *src, uint8_t *topright, int stride){
2044 const uint32_t a= ((uint32_t*)(src-stride))[0];
2045 ((uint32_t*)(src+0*stride))[0]= a;
2046 ((uint32_t*)(src+1*stride))[0]= a;
2047 ((uint32_t*)(src+2*stride))[0]= a;
2048 ((uint32_t*)(src+3*stride))[0]= a;
2051 static void pred4x4_horizontal_c(uint8_t *src, uint8_t *topright, int stride){
2052 ((uint32_t*)(src+0*stride))[0]= src[-1+0*stride]*0x01010101;
2053 ((uint32_t*)(src+1*stride))[0]= src[-1+1*stride]*0x01010101;
2054 ((uint32_t*)(src+2*stride))[0]= src[-1+2*stride]*0x01010101;
2055 ((uint32_t*)(src+3*stride))[0]= src[-1+3*stride]*0x01010101;
2058 static void pred4x4_dc_c(uint8_t *src, uint8_t *topright, int stride){
2059 const int dc= ( src[-stride] + src[1-stride] + src[2-stride] + src[3-stride]
2060 + src[-1+0*stride] + src[-1+1*stride] + src[-1+2*stride] + src[-1+3*stride] + 4) >>3;
2062 ((uint32_t*)(src+0*stride))[0]=
2063 ((uint32_t*)(src+1*stride))[0]=
2064 ((uint32_t*)(src+2*stride))[0]=
2065 ((uint32_t*)(src+3*stride))[0]= dc* 0x01010101;
2068 static void pred4x4_left_dc_c(uint8_t *src, uint8_t *topright, int stride){
2069 const int dc= ( src[-1+0*stride] + src[-1+1*stride] + src[-1+2*stride] + src[-1+3*stride] + 2) >>2;
2071 ((uint32_t*)(src+0*stride))[0]=
2072 ((uint32_t*)(src+1*stride))[0]=
2073 ((uint32_t*)(src+2*stride))[0]=
2074 ((uint32_t*)(src+3*stride))[0]= dc* 0x01010101;
2077 static void pred4x4_top_dc_c(uint8_t *src, uint8_t *topright, int stride){
2078 const int dc= ( src[-stride] + src[1-stride] + src[2-stride] + src[3-stride] + 2) >>2;
2080 ((uint32_t*)(src+0*stride))[0]=
2081 ((uint32_t*)(src+1*stride))[0]=
2082 ((uint32_t*)(src+2*stride))[0]=
2083 ((uint32_t*)(src+3*stride))[0]= dc* 0x01010101;
2086 static void pred4x4_128_dc_c(uint8_t *src, uint8_t *topright, int stride){
2087 ((uint32_t*)(src+0*stride))[0]=
2088 ((uint32_t*)(src+1*stride))[0]=
2089 ((uint32_t*)(src+2*stride))[0]=
2090 ((uint32_t*)(src+3*stride))[0]= 128U*0x01010101U;
2094 #define LOAD_TOP_RIGHT_EDGE\
2095 const int t4= topright[0];\
2096 const int t5= topright[1];\
2097 const int t6= topright[2];\
2098 const int t7= topright[3];\
2100 #define LOAD_LEFT_EDGE\
2101 const int l0= src[-1+0*stride];\
2102 const int l1= src[-1+1*stride];\
2103 const int l2= src[-1+2*stride];\
2104 const int l3= src[-1+3*stride];\
2106 #define LOAD_TOP_EDGE\
2107 const int t0= src[ 0-1*stride];\
2108 const int t1= src[ 1-1*stride];\
2109 const int t2= src[ 2-1*stride];\
2110 const int t3= src[ 3-1*stride];\
2112 static void pred4x4_down_right_c(uint8_t *src, uint8_t *topright, int stride){
2113 const int lt= src[-1-1*stride];
2117 src[0+3*stride]=(l3 + 2*l2 + l1 + 2)>>2;
2119 src[1+3*stride]=(l2 + 2*l1 + l0 + 2)>>2;
2122 src[2+3*stride]=(l1 + 2*l0 + lt + 2)>>2;
2126 src[3+3*stride]=(l0 + 2*lt + t0 + 2)>>2;
2129 src[3+2*stride]=(lt + 2*t0 + t1 + 2)>>2;
2131 src[3+1*stride]=(t0 + 2*t1 + t2 + 2)>>2;
2132 src[3+0*stride]=(t1 + 2*t2 + t3 + 2)>>2;
2135 static void pred4x4_down_left_c(uint8_t *src, uint8_t *topright, int stride){
2140 src[0+0*stride]=(t0 + t2 + 2*t1 + 2)>>2;
2142 src[0+1*stride]=(t1 + t3 + 2*t2 + 2)>>2;
2145 src[0+2*stride]=(t2 + t4 + 2*t3 + 2)>>2;
2149 src[0+3*stride]=(t3 + t5 + 2*t4 + 2)>>2;
2152 src[1+3*stride]=(t4 + t6 + 2*t5 + 2)>>2;
2154 src[2+3*stride]=(t5 + t7 + 2*t6 + 2)>>2;
2155 src[3+3*stride]=(t6 + 3*t7 + 2)>>2;
2158 static void pred4x4_vertical_right_c(uint8_t *src, uint8_t *topright, int stride){
2159 const int lt= src[-1-1*stride];
2162 const __attribute__((unused)) int unu= l3;
2165 src[1+2*stride]=(lt + t0 + 1)>>1;
2167 src[2+2*stride]=(t0 + t1 + 1)>>1;
2169 src[3+2*stride]=(t1 + t2 + 1)>>1;
2170 src[3+0*stride]=(t2 + t3 + 1)>>1;
2172 src[1+3*stride]=(l0 + 2*lt + t0 + 2)>>2;
2174 src[2+3*stride]=(lt + 2*t0 + t1 + 2)>>2;
2176 src[3+3*stride]=(t0 + 2*t1 + t2 + 2)>>2;
2177 src[3+1*stride]=(t1 + 2*t2 + t3 + 2)>>2;
2178 src[0+2*stride]=(lt + 2*l0 + l1 + 2)>>2;
2179 src[0+3*stride]=(l0 + 2*l1 + l2 + 2)>>2;
2182 static void pred4x4_vertical_left_c(uint8_t *src, uint8_t *topright, int stride){
2185 const __attribute__((unused)) int unu= t7;
2187 src[0+0*stride]=(t0 + t1 + 1)>>1;
2189 src[0+2*stride]=(t1 + t2 + 1)>>1;
2191 src[1+2*stride]=(t2 + t3 + 1)>>1;
2193 src[2+2*stride]=(t3 + t4+ 1)>>1;
2194 src[3+2*stride]=(t4 + t5+ 1)>>1;
2195 src[0+1*stride]=(t0 + 2*t1 + t2 + 2)>>2;
2197 src[0+3*stride]=(t1 + 2*t2 + t3 + 2)>>2;
2199 src[1+3*stride]=(t2 + 2*t3 + t4 + 2)>>2;
2201 src[2+3*stride]=(t3 + 2*t4 + t5 + 2)>>2;
2202 src[3+3*stride]=(t4 + 2*t5 + t6 + 2)>>2;
2205 static void pred4x4_horizontal_up_c(uint8_t *src, uint8_t *topright, int stride){
2208 src[0+0*stride]=(l0 + l1 + 1)>>1;
2209 src[1+0*stride]=(l0 + 2*l1 + l2 + 2)>>2;
2211 src[0+1*stride]=(l1 + l2 + 1)>>1;
2213 src[1+1*stride]=(l1 + 2*l2 + l3 + 2)>>2;
2215 src[0+2*stride]=(l2 + l3 + 1)>>1;
2217 src[1+2*stride]=(l2 + 2*l3 + l3 + 2)>>2;
2226 static void pred4x4_horizontal_down_c(uint8_t *src, uint8_t *topright, int stride){
2227 const int lt= src[-1-1*stride];
2230 const __attribute__((unused)) int unu= t3;
2233 src[2+1*stride]=(lt + l0 + 1)>>1;
2235 src[3+1*stride]=(l0 + 2*lt + t0 + 2)>>2;
2236 src[2+0*stride]=(lt + 2*t0 + t1 + 2)>>2;
2237 src[3+0*stride]=(t0 + 2*t1 + t2 + 2)>>2;
2239 src[2+2*stride]=(l0 + l1 + 1)>>1;
2241 src[3+2*stride]=(lt + 2*l0 + l1 + 2)>>2;
2243 src[2+3*stride]=(l1 + l2+ 1)>>1;
2245 src[3+3*stride]=(l0 + 2*l1 + l2 + 2)>>2;
2246 src[0+3*stride]=(l2 + l3 + 1)>>1;
2247 src[1+3*stride]=(l1 + 2*l2 + l3 + 2)>>2;
2250 void ff_pred16x16_vertical_c(uint8_t *src, int stride){
2252 const uint32_t a= ((uint32_t*)(src-stride))[0];
2253 const uint32_t b= ((uint32_t*)(src-stride))[1];
2254 const uint32_t c= ((uint32_t*)(src-stride))[2];
2255 const uint32_t d= ((uint32_t*)(src-stride))[3];
2257 for(i=0; i<16; i++){
2258 ((uint32_t*)(src+i*stride))[0]= a;
2259 ((uint32_t*)(src+i*stride))[1]= b;
2260 ((uint32_t*)(src+i*stride))[2]= c;
2261 ((uint32_t*)(src+i*stride))[3]= d;
2265 void ff_pred16x16_horizontal_c(uint8_t *src, int stride){
2268 for(i=0; i<16; i++){
2269 ((uint32_t*)(src+i*stride))[0]=
2270 ((uint32_t*)(src+i*stride))[1]=
2271 ((uint32_t*)(src+i*stride))[2]=
2272 ((uint32_t*)(src+i*stride))[3]= src[-1+i*stride]*0x01010101;
2276 void ff_pred16x16_dc_c(uint8_t *src, int stride){
2280 dc+= src[-1+i*stride];
2287 dc= 0x01010101*((dc + 16)>>5);
2289 for(i=0; i<16; i++){
2290 ((uint32_t*)(src+i*stride))[0]=
2291 ((uint32_t*)(src+i*stride))[1]=
2292 ((uint32_t*)(src+i*stride))[2]=
2293 ((uint32_t*)(src+i*stride))[3]= dc;
2297 static void pred16x16_left_dc_c(uint8_t *src, int stride){
2301 dc+= src[-1+i*stride];
2304 dc= 0x01010101*((dc + 8)>>4);
2306 for(i=0; i<16; i++){
2307 ((uint32_t*)(src+i*stride))[0]=
2308 ((uint32_t*)(src+i*stride))[1]=
2309 ((uint32_t*)(src+i*stride))[2]=
2310 ((uint32_t*)(src+i*stride))[3]= dc;
2314 static void pred16x16_top_dc_c(uint8_t *src, int stride){
2320 dc= 0x01010101*((dc + 8)>>4);
2322 for(i=0; i<16; i++){
2323 ((uint32_t*)(src+i*stride))[0]=
2324 ((uint32_t*)(src+i*stride))[1]=
2325 ((uint32_t*)(src+i*stride))[2]=
2326 ((uint32_t*)(src+i*stride))[3]= dc;
2330 void ff_pred16x16_128_dc_c(uint8_t *src, int stride){
2333 for(i=0; i<16; i++){
2334 ((uint32_t*)(src+i*stride))[0]=
2335 ((uint32_t*)(src+i*stride))[1]=
2336 ((uint32_t*)(src+i*stride))[2]=
2337 ((uint32_t*)(src+i*stride))[3]= 0x01010101U*128U;
2341 static inline void pred16x16_plane_compat_c(uint8_t *src, int stride, const int svq3){
2344 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
2345 const uint8_t * const src0 = src+7-stride;
2346 const uint8_t *src1 = src+8*stride-1;
2347 const uint8_t *src2 = src1-2*stride; // == src+6*stride-1;
2348 int H = src0[1] - src0[-1];
2349 int V = src1[0] - src2[ 0];
2350 for(k=2; k<=8; ++k) {
2351 src1 += stride; src2 -= stride;
2352 H += k*(src0[k] - src0[-k]);
2353 V += k*(src1[0] - src2[ 0]);
2356 H = ( 5*(H/4) ) / 16;
2357 V = ( 5*(V/4) ) / 16;
2359 /* required for 100% accuracy */
2360 i = H; H = V; V = i;
2362 H = ( 5*H+32 ) >> 6;
2363 V = ( 5*V+32 ) >> 6;
2366 a = 16*(src1[0] + src2[16] + 1) - 7*(V+H);
2367 for(j=16; j>0; --j) {
2370 for(i=-16; i<0; i+=4) {
2371 src[16+i] = cm[ (b ) >> 5 ];
2372 src[17+i] = cm[ (b+ H) >> 5 ];
2373 src[18+i] = cm[ (b+2*H) >> 5 ];
2374 src[19+i] = cm[ (b+3*H) >> 5 ];
2381 void ff_pred16x16_plane_c(uint8_t *src, int stride){
2382 pred16x16_plane_compat_c(src, stride, 0);
2385 void ff_pred8x8_vertical_c(uint8_t *src, int stride){
2387 const uint32_t a= ((uint32_t*)(src-stride))[0];
2388 const uint32_t b= ((uint32_t*)(src-stride))[1];
2391 ((uint32_t*)(src+i*stride))[0]= a;
2392 ((uint32_t*)(src+i*stride))[1]= b;
2396 void ff_pred8x8_horizontal_c(uint8_t *src, int stride){
2400 ((uint32_t*)(src+i*stride))[0]=
2401 ((uint32_t*)(src+i*stride))[1]= src[-1+i*stride]*0x01010101;
2405 void ff_pred8x8_128_dc_c(uint8_t *src, int stride){
2409 ((uint32_t*)(src+i*stride))[0]=
2410 ((uint32_t*)(src+i*stride))[1]= 0x01010101U*128U;
2414 static void pred8x8_left_dc_c(uint8_t *src, int stride){
2420 dc0+= src[-1+i*stride];
2421 dc2+= src[-1+(i+4)*stride];
2423 dc0= 0x01010101*((dc0 + 2)>>2);
2424 dc2= 0x01010101*((dc2 + 2)>>2);
2427 ((uint32_t*)(src+i*stride))[0]=
2428 ((uint32_t*)(src+i*stride))[1]= dc0;
2431 ((uint32_t*)(src+i*stride))[0]=
2432 ((uint32_t*)(src+i*stride))[1]= dc2;
2436 static void pred8x8_top_dc_c(uint8_t *src, int stride){
2442 dc0+= src[i-stride];
2443 dc1+= src[4+i-stride];
2445 dc0= 0x01010101*((dc0 + 2)>>2);
2446 dc1= 0x01010101*((dc1 + 2)>>2);
2449 ((uint32_t*)(src+i*stride))[0]= dc0;
2450 ((uint32_t*)(src+i*stride))[1]= dc1;
2453 ((uint32_t*)(src+i*stride))[0]= dc0;
2454 ((uint32_t*)(src+i*stride))[1]= dc1;
2459 void ff_pred8x8_dc_c(uint8_t *src, int stride){
2461 int dc0, dc1, dc2, dc3;
2465 dc0+= src[-1+i*stride] + src[i-stride];
2466 dc1+= src[4+i-stride];
2467 dc2+= src[-1+(i+4)*stride];
2469 dc3= 0x01010101*((dc1 + dc2 + 4)>>3);
2470 dc0= 0x01010101*((dc0 + 4)>>3);
2471 dc1= 0x01010101*((dc1 + 2)>>2);
2472 dc2= 0x01010101*((dc2 + 2)>>2);
2475 ((uint32_t*)(src+i*stride))[0]= dc0;
2476 ((uint32_t*)(src+i*stride))[1]= dc1;
2479 ((uint32_t*)(src+i*stride))[0]= dc2;
2480 ((uint32_t*)(src+i*stride))[1]= dc3;
2484 void ff_pred8x8_plane_c(uint8_t *src, int stride){
2487 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
2488 const uint8_t * const src0 = src+3-stride;
2489 const uint8_t *src1 = src+4*stride-1;
2490 const uint8_t *src2 = src1-2*stride; // == src+2*stride-1;
2491 int H = src0[1] - src0[-1];
2492 int V = src1[0] - src2[ 0];
2493 for(k=2; k<=4; ++k) {
2494 src1 += stride; src2 -= stride;
2495 H += k*(src0[k] - src0[-k]);
2496 V += k*(src1[0] - src2[ 0]);
2498 H = ( 17*H+16 ) >> 5;
2499 V = ( 17*V+16 ) >> 5;
2501 a = 16*(src1[0] + src2[8]+1) - 3*(V+H);
2502 for(j=8; j>0; --j) {
2505 src[0] = cm[ (b ) >> 5 ];
2506 src[1] = cm[ (b+ H) >> 5 ];
2507 src[2] = cm[ (b+2*H) >> 5 ];
2508 src[3] = cm[ (b+3*H) >> 5 ];
2509 src[4] = cm[ (b+4*H) >> 5 ];
2510 src[5] = cm[ (b+5*H) >> 5 ];
2511 src[6] = cm[ (b+6*H) >> 5 ];
2512 src[7] = cm[ (b+7*H) >> 5 ];
2517 #define SRC(x,y) src[(x)+(y)*stride]
2519 const int l##y = (SRC(-1,y-1) + 2*SRC(-1,y) + SRC(-1,y+1) + 2) >> 2;
2520 #define PREDICT_8x8_LOAD_LEFT \
2521 const int l0 = ((has_topleft ? SRC(-1,-1) : SRC(-1,0)) \
2522 + 2*SRC(-1,0) + SRC(-1,1) + 2) >> 2; \
2523 PL(1) PL(2) PL(3) PL(4) PL(5) PL(6) \
2524 const int l7 attribute_unused = (SRC(-1,6) + 3*SRC(-1,7) + 2) >> 2
2527 const int t##x = (SRC(x-1,-1) + 2*SRC(x,-1) + SRC(x+1,-1) + 2) >> 2;
2528 #define PREDICT_8x8_LOAD_TOP \
2529 const int t0 = ((has_topleft ? SRC(-1,-1) : SRC(0,-1)) \
2530 + 2*SRC(0,-1) + SRC(1,-1) + 2) >> 2; \
2531 PT(1) PT(2) PT(3) PT(4) PT(5) PT(6) \
2532 const int t7 attribute_unused = ((has_topright ? SRC(8,-1) : SRC(7,-1)) \
2533 + 2*SRC(7,-1) + SRC(6,-1) + 2) >> 2
2536 t##x = (SRC(x-1,-1) + 2*SRC(x,-1) + SRC(x+1,-1) + 2) >> 2;
2537 #define PREDICT_8x8_LOAD_TOPRIGHT \
2538 int t8, t9, t10, t11, t12, t13, t14, t15; \
2539 if(has_topright) { \
2540 PTR(8) PTR(9) PTR(10) PTR(11) PTR(12) PTR(13) PTR(14) \
2541 t15 = (SRC(14,-1) + 3*SRC(15,-1) + 2) >> 2; \
2542 } else t8=t9=t10=t11=t12=t13=t14=t15= SRC(7,-1);
2544 #define PREDICT_8x8_LOAD_TOPLEFT \
2545 const int lt = (SRC(-1,0) + 2*SRC(-1,-1) + SRC(0,-1) + 2) >> 2
2547 #define PREDICT_8x8_DC(v) \
2549 for( y = 0; y < 8; y++ ) { \
2550 ((uint32_t*)src)[0] = \
2551 ((uint32_t*)src)[1] = v; \
2555 static void pred8x8l_128_dc_c(uint8_t *src, int has_topleft, int has_topright, int stride)
2557 PREDICT_8x8_DC(0x80808080);
2559 static void pred8x8l_left_dc_c(uint8_t *src, int has_topleft, int has_topright, int stride)
2561 PREDICT_8x8_LOAD_LEFT;
2562 const uint32_t dc = ((l0+l1+l2+l3+l4+l5+l6+l7+4) >> 3) * 0x01010101;
2565 static void pred8x8l_top_dc_c(uint8_t *src, int has_topleft, int has_topright, int stride)
2567 PREDICT_8x8_LOAD_TOP;
2568 const uint32_t dc = ((t0+t1+t2+t3+t4+t5+t6+t7+4) >> 3) * 0x01010101;
2571 static void pred8x8l_dc_c(uint8_t *src, int has_topleft, int has_topright, int stride)
2573 PREDICT_8x8_LOAD_LEFT;
2574 PREDICT_8x8_LOAD_TOP;
2575 const uint32_t dc = ((l0+l1+l2+l3+l4+l5+l6+l7
2576 +t0+t1+t2+t3+t4+t5+t6+t7+8) >> 4) * 0x01010101;
2579 static void pred8x8l_horizontal_c(uint8_t *src, int has_topleft, int has_topright, int stride)
2581 PREDICT_8x8_LOAD_LEFT;
2582 #define ROW(y) ((uint32_t*)(src+y*stride))[0] =\
2583 ((uint32_t*)(src+y*stride))[1] = 0x01010101 * l##y
2584 ROW(0); ROW(1); ROW(2); ROW(3); ROW(4); ROW(5); ROW(6); ROW(7);
2587 static void pred8x8l_vertical_c(uint8_t *src, int has_topleft, int has_topright, int stride)
2590 PREDICT_8x8_LOAD_TOP;
2599 for( y = 1; y < 8; y++ )
2600 *(uint64_t*)(src+y*stride) = *(uint64_t*)src;
2602 static void pred8x8l_down_left_c(uint8_t *src, int has_topleft, int has_topright, int stride)
2604 PREDICT_8x8_LOAD_TOP;
2605 PREDICT_8x8_LOAD_TOPRIGHT;
2606 SRC(0,0)= (t0 + 2*t1 + t2 + 2) >> 2;
2607 SRC(0,1)=SRC(1,0)= (t1 + 2*t2 + t3 + 2) >> 2;
2608 SRC(0,2)=SRC(1,1)=SRC(2,0)= (t2 + 2*t3 + t4 + 2) >> 2;
2609 SRC(0,3)=SRC(1,2)=SRC(2,1)=SRC(3,0)= (t3 + 2*t4 + t5 + 2) >> 2;
2610 SRC(0,4)=SRC(1,3)=SRC(2,2)=SRC(3,1)=SRC(4,0)= (t4 + 2*t5 + t6 + 2) >> 2;
2611 SRC(0,5)=SRC(1,4)=SRC(2,3)=SRC(3,2)=SRC(4,1)=SRC(5,0)= (t5 + 2*t6 + t7 + 2) >> 2;
2612 SRC(0,6)=SRC(1,5)=SRC(2,4)=SRC(3,3)=SRC(4,2)=SRC(5,1)=SRC(6,0)= (t6 + 2*t7 + t8 + 2) >> 2;
2613 SRC(0,7)=SRC(1,6)=SRC(2,5)=SRC(3,4)=SRC(4,3)=SRC(5,2)=SRC(6,1)=SRC(7,0)= (t7 + 2*t8 + t9 + 2) >> 2;
2614 SRC(1,7)=SRC(2,6)=SRC(3,5)=SRC(4,4)=SRC(5,3)=SRC(6,2)=SRC(7,1)= (t8 + 2*t9 + t10 + 2) >> 2;
2615 SRC(2,7)=SRC(3,6)=SRC(4,5)=SRC(5,4)=SRC(6,3)=SRC(7,2)= (t9 + 2*t10 + t11 + 2) >> 2;
2616 SRC(3,7)=SRC(4,6)=SRC(5,5)=SRC(6,4)=SRC(7,3)= (t10 + 2*t11 + t12 + 2) >> 2;
2617 SRC(4,7)=SRC(5,6)=SRC(6,5)=SRC(7,4)= (t11 + 2*t12 + t13 + 2) >> 2;
2618 SRC(5,7)=SRC(6,6)=SRC(7,5)= (t12 + 2*t13 + t14 + 2) >> 2;
2619 SRC(6,7)=SRC(7,6)= (t13 + 2*t14 + t15 + 2) >> 2;
2620 SRC(7,7)= (t14 + 3*t15 + 2) >> 2;
2622 static void pred8x8l_down_right_c(uint8_t *src, int has_topleft, int has_topright, int stride)
2624 PREDICT_8x8_LOAD_TOP;
2625 PREDICT_8x8_LOAD_LEFT;
2626 PREDICT_8x8_LOAD_TOPLEFT;
2627 SRC(0,7)= (l7 + 2*l6 + l5 + 2) >> 2;
2628 SRC(0,6)=SRC(1,7)= (l6 + 2*l5 + l4 + 2) >> 2;
2629 SRC(0,5)=SRC(1,6)=SRC(2,7)= (l5 + 2*l4 + l3 + 2) >> 2;
2630 SRC(0,4)=SRC(1,5)=SRC(2,6)=SRC(3,7)= (l4 + 2*l3 + l2 + 2) >> 2;
2631 SRC(0,3)=SRC(1,4)=SRC(2,5)=SRC(3,6)=SRC(4,7)= (l3 + 2*l2 + l1 + 2) >> 2;
2632 SRC(0,2)=SRC(1,3)=SRC(2,4)=SRC(3,5)=SRC(4,6)=SRC(5,7)= (l2 + 2*l1 + l0 + 2) >> 2;
2633 SRC(0,1)=SRC(1,2)=SRC(2,3)=SRC(3,4)=SRC(4,5)=SRC(5,6)=SRC(6,7)= (l1 + 2*l0 + lt + 2) >> 2;
2634 SRC(0,0)=SRC(1,1)=SRC(2,2)=SRC(3,3)=SRC(4,4)=SRC(5,5)=SRC(6,6)=SRC(7,7)= (l0 + 2*lt + t0 + 2) >> 2;
2635 SRC(1,0)=SRC(2,1)=SRC(3,2)=SRC(4,3)=SRC(5,4)=SRC(6,5)=SRC(7,6)= (lt + 2*t0 + t1 + 2) >> 2;
2636 SRC(2,0)=SRC(3,1)=SRC(4,2)=SRC(5,3)=SRC(6,4)=SRC(7,5)= (t0 + 2*t1 + t2 + 2) >> 2;
2637 SRC(3,0)=SRC(4,1)=SRC(5,2)=SRC(6,3)=SRC(7,4)= (t1 + 2*t2 + t3 + 2) >> 2;
2638 SRC(4,0)=SRC(5,1)=SRC(6,2)=SRC(7,3)= (t2 + 2*t3 + t4 + 2) >> 2;
2639 SRC(5,0)=SRC(6,1)=SRC(7,2)= (t3 + 2*t4 + t5 + 2) >> 2;
2640 SRC(6,0)=SRC(7,1)= (t4 + 2*t5 + t6 + 2) >> 2;
2641 SRC(7,0)= (t5 + 2*t6 + t7 + 2) >> 2;
2644 static void pred8x8l_vertical_right_c(uint8_t *src, int has_topleft, int has_topright, int stride)
2646 PREDICT_8x8_LOAD_TOP;
2647 PREDICT_8x8_LOAD_LEFT;
2648 PREDICT_8x8_LOAD_TOPLEFT;
2649 SRC(0,6)= (l5 + 2*l4 + l3 + 2) >> 2;
2650 SRC(0,7)= (l6 + 2*l5 + l4 + 2) >> 2;
2651 SRC(0,4)=SRC(1,6)= (l3 + 2*l2 + l1 + 2) >> 2;
2652 SRC(0,5)=SRC(1,7)= (l4 + 2*l3 + l2 + 2) >> 2;
2653 SRC(0,2)=SRC(1,4)=SRC(2,6)= (l1 + 2*l0 + lt + 2) >> 2;
2654 SRC(0,3)=SRC(1,5)=SRC(2,7)= (l2 + 2*l1 + l0 + 2) >> 2;
2655 SRC(0,1)=SRC(1,3)=SRC(2,5)=SRC(3,7)= (l0 + 2*lt + t0 + 2) >> 2;
2656 SRC(0,0)=SRC(1,2)=SRC(2,4)=SRC(3,6)= (lt + t0 + 1) >> 1;
2657 SRC(1,1)=SRC(2,3)=SRC(3,5)=SRC(4,7)= (lt + 2*t0 + t1 + 2) >> 2;
2658 SRC(1,0)=SRC(2,2)=SRC(3,4)=SRC(4,6)= (t0 + t1 + 1) >> 1;
2659 SRC(2,1)=SRC(3,3)=SRC(4,5)=SRC(5,7)= (t0 + 2*t1 + t2 + 2) >> 2;
2660 SRC(2,0)=SRC(3,2)=SRC(4,4)=SRC(5,6)= (t1 + t2 + 1) >> 1;
2661 SRC(3,1)=SRC(4,3)=SRC(5,5)=SRC(6,7)= (t1 + 2*t2 + t3 + 2) >> 2;
2662 SRC(3,0)=SRC(4,2)=SRC(5,4)=SRC(6,6)= (t2 + t3 + 1) >> 1;
2663 SRC(4,1)=SRC(5,3)=SRC(6,5)=SRC(7,7)= (t2 + 2*t3 + t4 + 2) >> 2;
2664 SRC(4,0)=SRC(5,2)=SRC(6,4)=SRC(7,6)= (t3 + t4 + 1) >> 1;
2665 SRC(5,1)=SRC(6,3)=SRC(7,5)= (t3 + 2*t4 + t5 + 2) >> 2;
2666 SRC(5,0)=SRC(6,2)=SRC(7,4)= (t4 + t5 + 1) >> 1;
2667 SRC(6,1)=SRC(7,3)= (t4 + 2*t5 + t6 + 2) >> 2;
2668 SRC(6,0)=SRC(7,2)= (t5 + t6 + 1) >> 1;
2669 SRC(7,1)= (t5 + 2*t6 + t7 + 2) >> 2;
2670 SRC(7,0)= (t6 + t7 + 1) >> 1;
2672 static void pred8x8l_horizontal_down_c(uint8_t *src, int has_topleft, int has_topright, int stride)
2674 PREDICT_8x8_LOAD_TOP;
2675 PREDICT_8x8_LOAD_LEFT;
2676 PREDICT_8x8_LOAD_TOPLEFT;
2677 SRC(0,7)= (l6 + l7 + 1) >> 1;
2678 SRC(1,7)= (l5 + 2*l6 + l7 + 2) >> 2;
2679 SRC(0,6)=SRC(2,7)= (l5 + l6 + 1) >> 1;
2680 SRC(1,6)=SRC(3,7)= (l4 + 2*l5 + l6 + 2) >> 2;
2681 SRC(0,5)=SRC(2,6)=SRC(4,7)= (l4 + l5 + 1) >> 1;
2682 SRC(1,5)=SRC(3,6)=SRC(5,7)= (l3 + 2*l4 + l5 + 2) >> 2;
2683 SRC(0,4)=SRC(2,5)=SRC(4,6)=SRC(6,7)= (l3 + l4 + 1) >> 1;
2684 SRC(1,4)=SRC(3,5)=SRC(5,6)=SRC(7,7)= (l2 + 2*l3 + l4 + 2) >> 2;
2685 SRC(0,3)=SRC(2,4)=SRC(4,5)=SRC(6,6)= (l2 + l3 + 1) >> 1;
2686 SRC(1,3)=SRC(3,4)=SRC(5,5)=SRC(7,6)= (l1 + 2*l2 + l3 + 2) >> 2;
2687 SRC(0,2)=SRC(2,3)=SRC(4,4)=SRC(6,5)= (l1 + l2 + 1) >> 1;
2688 SRC(1,2)=SRC(3,3)=SRC(5,4)=SRC(7,5)= (l0 + 2*l1 + l2 + 2) >> 2;
2689 SRC(0,1)=SRC(2,2)=SRC(4,3)=SRC(6,4)= (l0 + l1 + 1) >> 1;
2690 SRC(1,1)=SRC(3,2)=SRC(5,3)=SRC(7,4)= (lt + 2*l0 + l1 + 2) >> 2;
2691 SRC(0,0)=SRC(2,1)=SRC(4,2)=SRC(6,3)= (lt + l0 + 1) >> 1;
2692 SRC(1,0)=SRC(3,1)=SRC(5,2)=SRC(7,3)= (l0 + 2*lt + t0 + 2) >> 2;
2693 SRC(2,0)=SRC(4,1)=SRC(6,2)= (t1 + 2*t0 + lt + 2) >> 2;
2694 SRC(3,0)=SRC(5,1)=SRC(7,2)= (t2 + 2*t1 + t0 + 2) >> 2;
2695 SRC(4,0)=SRC(6,1)= (t3 + 2*t2 + t1 + 2) >> 2;
2696 SRC(5,0)=SRC(7,1)= (t4 + 2*t3 + t2 + 2) >> 2;
2697 SRC(6,0)= (t5 + 2*t4 + t3 + 2) >> 2;
2698 SRC(7,0)= (t6 + 2*t5 + t4 + 2) >> 2;
2700 static void pred8x8l_vertical_left_c(uint8_t *src, int has_topleft, int has_topright, int stride)
2702 PREDICT_8x8_LOAD_TOP;
2703 PREDICT_8x8_LOAD_TOPRIGHT;
2704 SRC(0,0)= (t0 + t1 + 1) >> 1;
2705 SRC(0,1)= (t0 + 2*t1 + t2 + 2) >> 2;
2706 SRC(0,2)=SRC(1,0)= (t1 + t2 + 1) >> 1;
2707 SRC(0,3)=SRC(1,1)= (t1 + 2*t2 + t3 + 2) >> 2;
2708 SRC(0,4)=SRC(1,2)=SRC(2,0)= (t2 + t3 + 1) >> 1;
2709 SRC(0,5)=SRC(1,3)=SRC(2,1)= (t2 + 2*t3 + t4 + 2) >> 2;
2710 SRC(0,6)=SRC(1,4)=SRC(2,2)=SRC(3,0)= (t3 + t4 + 1) >> 1;
2711 SRC(0,7)=SRC(1,5)=SRC(2,3)=SRC(3,1)= (t3 + 2*t4 + t5 + 2) >> 2;
2712 SRC(1,6)=SRC(2,4)=SRC(3,2)=SRC(4,0)= (t4 + t5 + 1) >> 1;
2713 SRC(1,7)=SRC(2,5)=SRC(3,3)=SRC(4,1)= (t4 + 2*t5 + t6 + 2) >> 2;
2714 SRC(2,6)=SRC(3,4)=SRC(4,2)=SRC(5,0)= (t5 + t6 + 1) >> 1;
2715 SRC(2,7)=SRC(3,5)=SRC(4,3)=SRC(5,1)= (t5 + 2*t6 + t7 + 2) >> 2;
2716 SRC(3,6)=SRC(4,4)=SRC(5,2)=SRC(6,0)= (t6 + t7 + 1) >> 1;
2717 SRC(3,7)=SRC(4,5)=SRC(5,3)=SRC(6,1)= (t6 + 2*t7 + t8 + 2) >> 2;
2718 SRC(4,6)=SRC(5,4)=SRC(6,2)=SRC(7,0)= (t7 + t8 + 1) >> 1;
2719 SRC(4,7)=SRC(5,5)=SRC(6,3)=SRC(7,1)= (t7 + 2*t8 + t9 + 2) >> 2;
2720 SRC(5,6)=SRC(6,4)=SRC(7,2)= (t8 + t9 + 1) >> 1;
2721 SRC(5,7)=SRC(6,5)=SRC(7,3)= (t8 + 2*t9 + t10 + 2) >> 2;
2722 SRC(6,6)=SRC(7,4)= (t9 + t10 + 1) >> 1;
2723 SRC(6,7)=SRC(7,5)= (t9 + 2*t10 + t11 + 2) >> 2;
2724 SRC(7,6)= (t10 + t11 + 1) >> 1;
2725 SRC(7,7)= (t10 + 2*t11 + t12 + 2) >> 2;
2727 static void pred8x8l_horizontal_up_c(uint8_t *src, int has_topleft, int has_topright, int stride)
2729 PREDICT_8x8_LOAD_LEFT;
2730 SRC(0,0)= (l0 + l1 + 1) >> 1;
2731 SRC(1,0)= (l0 + 2*l1 + l2 + 2) >> 2;
2732 SRC(0,1)=SRC(2,0)= (l1 + l2 + 1) >> 1;
2733 SRC(1,1)=SRC(3,0)= (l1 + 2*l2 + l3 + 2) >> 2;
2734 SRC(0,2)=SRC(2,1)=SRC(4,0)= (l2 + l3 + 1) >> 1;
2735 SRC(1,2)=SRC(3,1)=SRC(5,0)= (l2 + 2*l3 + l4 + 2) >> 2;
2736 SRC(0,3)=SRC(2,2)=SRC(4,1)=SRC(6,0)= (l3 + l4 + 1) >> 1;
2737 SRC(1,3)=SRC(3,2)=SRC(5,1)=SRC(7,0)= (l3 + 2*l4 + l5 + 2) >> 2;
2738 SRC(0,4)=SRC(2,3)=SRC(4,2)=SRC(6,1)= (l4 + l5 + 1) >> 1;
2739 SRC(1,4)=SRC(3,3)=SRC(5,2)=SRC(7,1)= (l4 + 2*l5 + l6 + 2) >> 2;
2740 SRC(0,5)=SRC(2,4)=SRC(4,3)=SRC(6,2)= (l5 + l6 + 1) >> 1;
2741 SRC(1,5)=SRC(3,4)=SRC(5,3)=SRC(7,2)= (l5 + 2*l6 + l7 + 2) >> 2;
2742 SRC(0,6)=SRC(2,5)=SRC(4,4)=SRC(6,3)= (l6 + l7 + 1) >> 1;
2743 SRC(1,6)=SRC(3,5)=SRC(5,4)=SRC(7,3)= (l6 + 3*l7 + 2) >> 2;
2744 SRC(0,7)=SRC(1,7)=SRC(2,6)=SRC(2,7)=SRC(3,6)=
2745 SRC(3,7)=SRC(4,5)=SRC(4,6)=SRC(4,7)=SRC(5,5)=
2746 SRC(5,6)=SRC(5,7)=SRC(6,4)=SRC(6,5)=SRC(6,6)=
2747 SRC(6,7)=SRC(7,4)=SRC(7,5)=SRC(7,6)=SRC(7,7)= l7;
2749 #undef PREDICT_8x8_LOAD_LEFT
2750 #undef PREDICT_8x8_LOAD_TOP
2751 #undef PREDICT_8x8_LOAD_TOPLEFT
2752 #undef PREDICT_8x8_LOAD_TOPRIGHT
2753 #undef PREDICT_8x8_DC
2759 static inline void mc_dir_part(H264Context *h, Picture *pic, int n, int square, int chroma_height, int delta, int list,
2760 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2761 int src_x_offset, int src_y_offset,
2762 qpel_mc_func *qpix_op, h264_chroma_mc_func chroma_op){
2763 MpegEncContext * const s = &h->s;
2764 const int mx= h->mv_cache[list][ scan8[n] ][0] + src_x_offset*8;
2765 int my= h->mv_cache[list][ scan8[n] ][1] + src_y_offset*8;
2766 const int luma_xy= (mx&3) + ((my&3)<<2);
2767 uint8_t * src_y = pic->data[0] + (mx>>2) + (my>>2)*h->mb_linesize;
2768 uint8_t * src_cb, * src_cr;
2769 int extra_width= h->emu_edge_width;
2770 int extra_height= h->emu_edge_height;
2772 const int full_mx= mx>>2;
2773 const int full_my= my>>2;
2774 const int pic_width = 16*s->mb_width;
2775 const int pic_height = 16*s->mb_height >> MB_MBAFF;
2777 if(!pic->data[0]) //FIXME this is unacceptable, some senseable error concealment must be done for missing reference frames
2780 if(mx&7) extra_width -= 3;
2781 if(my&7) extra_height -= 3;
2783 if( full_mx < 0-extra_width
2784 || full_my < 0-extra_height
2785 || full_mx + 16/*FIXME*/ > pic_width + extra_width
2786 || full_my + 16/*FIXME*/ > pic_height + extra_height){
2787 ff_emulated_edge_mc(s->edge_emu_buffer, src_y - 2 - 2*h->mb_linesize, h->mb_linesize, 16+5, 16+5/*FIXME*/, full_mx-2, full_my-2, pic_width, pic_height);
2788 src_y= s->edge_emu_buffer + 2 + 2*h->mb_linesize;
2792 qpix_op[luma_xy](dest_y, src_y, h->mb_linesize); //FIXME try variable height perhaps?
2794 qpix_op[luma_xy](dest_y + delta, src_y + delta, h->mb_linesize);
2797 if(s->flags&CODEC_FLAG_GRAY) return;
2800 // chroma offset when predicting from a field of opposite parity
2801 my += 2 * ((s->mb_y & 1) - (h->ref_cache[list][scan8[n]] & 1));
2802 emu |= (my>>3) < 0 || (my>>3) + 8 >= (pic_height>>1);
2804 src_cb= pic->data[1] + (mx>>3) + (my>>3)*h->mb_uvlinesize;
2805 src_cr= pic->data[2] + (mx>>3) + (my>>3)*h->mb_uvlinesize;
2808 ff_emulated_edge_mc(s->edge_emu_buffer, src_cb, h->mb_uvlinesize, 9, 9/*FIXME*/, (mx>>3), (my>>3), pic_width>>1, pic_height>>1);
2809 src_cb= s->edge_emu_buffer;
2811 chroma_op(dest_cb, src_cb, h->mb_uvlinesize, chroma_height, mx&7, my&7);
2814 ff_emulated_edge_mc(s->edge_emu_buffer, src_cr, h->mb_uvlinesize, 9, 9/*FIXME*/, (mx>>3), (my>>3), pic_width>>1, pic_height>>1);
2815 src_cr= s->edge_emu_buffer;
2817 chroma_op(dest_cr, src_cr, h->mb_uvlinesize, chroma_height, mx&7, my&7);
2820 static inline void mc_part_std(H264Context *h, int n, int square, int chroma_height, int delta,
2821 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2822 int x_offset, int y_offset,
2823 qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put,
2824 qpel_mc_func *qpix_avg, h264_chroma_mc_func chroma_avg,
2825 int list0, int list1){
2826 MpegEncContext * const s = &h->s;
2827 qpel_mc_func *qpix_op= qpix_put;
2828 h264_chroma_mc_func chroma_op= chroma_put;
2830 dest_y += 2*x_offset + 2*y_offset*h-> mb_linesize;
2831 dest_cb += x_offset + y_offset*h->mb_uvlinesize;
2832 dest_cr += x_offset + y_offset*h->mb_uvlinesize;
2833 x_offset += 8*s->mb_x;
2834 y_offset += 8*(s->mb_y >> MB_MBAFF);
2837 Picture *ref= &h->ref_list[0][ h->ref_cache[0][ scan8[n] ] ];
2838 mc_dir_part(h, ref, n, square, chroma_height, delta, 0,
2839 dest_y, dest_cb, dest_cr, x_offset, y_offset,
2840 qpix_op, chroma_op);
2843 chroma_op= chroma_avg;
2847 Picture *ref= &h->ref_list[1][ h->ref_cache[1][ scan8[n] ] ];
2848 mc_dir_part(h, ref, n, square, chroma_height, delta, 1,
2849 dest_y, dest_cb, dest_cr, x_offset, y_offset,
2850 qpix_op, chroma_op);
2854 static inline void mc_part_weighted(H264Context *h, int n, int square, int chroma_height, int delta,
2855 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2856 int x_offset, int y_offset,
2857 qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put,
2858 h264_weight_func luma_weight_op, h264_weight_func chroma_weight_op,
2859 h264_biweight_func luma_weight_avg, h264_biweight_func chroma_weight_avg,
2860 int list0, int list1){
2861 MpegEncContext * const s = &h->s;
2863 dest_y += 2*x_offset + 2*y_offset*h-> mb_linesize;
2864 dest_cb += x_offset + y_offset*h->mb_uvlinesize;
2865 dest_cr += x_offset + y_offset*h->mb_uvlinesize;
2866 x_offset += 8*s->mb_x;
2867 y_offset += 8*(s->mb_y >> MB_MBAFF);
2870 /* don't optimize for luma-only case, since B-frames usually
2871 * use implicit weights => chroma too. */
2872 uint8_t *tmp_cb = s->obmc_scratchpad;
2873 uint8_t *tmp_cr = s->obmc_scratchpad + 8;
2874 uint8_t *tmp_y = s->obmc_scratchpad + 8*h->mb_uvlinesize;
2875 int refn0 = h->ref_cache[0][ scan8[n] ];
2876 int refn1 = h->ref_cache[1][ scan8[n] ];
2878 mc_dir_part(h, &h->ref_list[0][refn0], n, square, chroma_height, delta, 0,
2879 dest_y, dest_cb, dest_cr,
2880 x_offset, y_offset, qpix_put, chroma_put);
2881 mc_dir_part(h, &h->ref_list[1][refn1], n, square, chroma_height, delta, 1,
2882 tmp_y, tmp_cb, tmp_cr,
2883 x_offset, y_offset, qpix_put, chroma_put);
2885 if(h->use_weight == 2){
2886 int weight0 = h->implicit_weight[refn0][refn1];
2887 int weight1 = 64 - weight0;
2888 luma_weight_avg( dest_y, tmp_y, h-> mb_linesize, 5, weight0, weight1, 0);
2889 chroma_weight_avg(dest_cb, tmp_cb, h->mb_uvlinesize, 5, weight0, weight1, 0);
2890 chroma_weight_avg(dest_cr, tmp_cr, h->mb_uvlinesize, 5, weight0, weight1, 0);
2892 luma_weight_avg(dest_y, tmp_y, h->mb_linesize, h->luma_log2_weight_denom,
2893 h->luma_weight[0][refn0], h->luma_weight[1][refn1],
2894 h->luma_offset[0][refn0] + h->luma_offset[1][refn1]);
2895 chroma_weight_avg(dest_cb, tmp_cb, h->mb_uvlinesize, h->chroma_log2_weight_denom,
2896 h->chroma_weight[0][refn0][0], h->chroma_weight[1][refn1][0],
2897 h->chroma_offset[0][refn0][0] + h->chroma_offset[1][refn1][0]);
2898 chroma_weight_avg(dest_cr, tmp_cr, h->mb_uvlinesize, h->chroma_log2_weight_denom,
2899 h->chroma_weight[0][refn0][1], h->chroma_weight[1][refn1][1],
2900 h->chroma_offset[0][refn0][1] + h->chroma_offset[1][refn1][1]);
2903 int list = list1 ? 1 : 0;
2904 int refn = h->ref_cache[list][ scan8[n] ];
2905 Picture *ref= &h->ref_list[list][refn];
2906 mc_dir_part(h, ref, n, square, chroma_height, delta, list,
2907 dest_y, dest_cb, dest_cr, x_offset, y_offset,
2908 qpix_put, chroma_put);
2910 luma_weight_op(dest_y, h->mb_linesize, h->luma_log2_weight_denom,
2911 h->luma_weight[list][refn], h->luma_offset[list][refn]);
2912 if(h->use_weight_chroma){
2913 chroma_weight_op(dest_cb, h->mb_uvlinesize, h->chroma_log2_weight_denom,
2914 h->chroma_weight[list][refn][0], h->chroma_offset[list][refn][0]);
2915 chroma_weight_op(dest_cr, h->mb_uvlinesize, h->chroma_log2_weight_denom,
2916 h->chroma_weight[list][refn][1], h->chroma_offset[list][refn][1]);
2921 static inline void mc_part(H264Context *h, int n, int square, int chroma_height, int delta,
2922 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2923 int x_offset, int y_offset,
2924 qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put,
2925 qpel_mc_func *qpix_avg, h264_chroma_mc_func chroma_avg,
2926 h264_weight_func *weight_op, h264_biweight_func *weight_avg,
2927 int list0, int list1){
2928 if((h->use_weight==2 && list0 && list1
2929 && (h->implicit_weight[ h->ref_cache[0][scan8[n]] ][ h->ref_cache[1][scan8[n]] ] != 32))
2930 || h->use_weight==1)
2931 mc_part_weighted(h, n, square, chroma_height, delta, dest_y, dest_cb, dest_cr,
2932 x_offset, y_offset, qpix_put, chroma_put,
2933 weight_op[0], weight_op[3], weight_avg[0], weight_avg[3], list0, list1);
2935 mc_part_std(h, n, square, chroma_height, delta, dest_y, dest_cb, dest_cr,
2936 x_offset, y_offset, qpix_put, chroma_put, qpix_avg, chroma_avg, list0, list1);
2939 static inline void prefetch_motion(H264Context *h, int list){
2940 /* fetch pixels for estimated mv 4 macroblocks ahead
2941 * optimized for 64byte cache lines */
2942 MpegEncContext * const s = &h->s;
2943 const int refn = h->ref_cache[list][scan8[0]];
2945 const int mx= (h->mv_cache[list][scan8[0]][0]>>2) + 16*s->mb_x + 8;
2946 const int my= (h->mv_cache[list][scan8[0]][1]>>2) + 16*s->mb_y;
2947 uint8_t **src= h->ref_list[list][refn].data;
2948 int off= mx + (my + (s->mb_x&3)*4)*h->mb_linesize + 64;
2949 s->dsp.prefetch(src[0]+off, s->linesize, 4);
2950 off= (mx>>1) + ((my>>1) + (s->mb_x&7))*s->uvlinesize + 64;
2951 s->dsp.prefetch(src[1]+off, src[2]-src[1], 2);
2955 static void hl_motion(H264Context *h, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2956 qpel_mc_func (*qpix_put)[16], h264_chroma_mc_func (*chroma_put),
2957 qpel_mc_func (*qpix_avg)[16], h264_chroma_mc_func (*chroma_avg),
2958 h264_weight_func *weight_op, h264_biweight_func *weight_avg){
2959 MpegEncContext * const s = &h->s;
2960 const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
2961 const int mb_type= s->current_picture.mb_type[mb_xy];
2963 assert(IS_INTER(mb_type));
2965 prefetch_motion(h, 0);
2967 if(IS_16X16(mb_type)){
2968 mc_part(h, 0, 1, 8, 0, dest_y, dest_cb, dest_cr, 0, 0,
2969 qpix_put[0], chroma_put[0], qpix_avg[0], chroma_avg[0],
2970 &weight_op[0], &weight_avg[0],
2971 IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1));
2972 }else if(IS_16X8(mb_type)){
2973 mc_part(h, 0, 0, 4, 8, dest_y, dest_cb, dest_cr, 0, 0,
2974 qpix_put[1], chroma_put[0], qpix_avg[1], chroma_avg[0],
2975 &weight_op[1], &weight_avg[1],
2976 IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1));
2977 mc_part(h, 8, 0, 4, 8, dest_y, dest_cb, dest_cr, 0, 4,
2978 qpix_put[1], chroma_put[0], qpix_avg[1], chroma_avg[0],
2979 &weight_op[1], &weight_avg[1],
2980 IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1));
2981 }else if(IS_8X16(mb_type)){
2982 mc_part(h, 0, 0, 8, 8*h->mb_linesize, dest_y, dest_cb, dest_cr, 0, 0,
2983 qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
2984 &weight_op[2], &weight_avg[2],
2985 IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1));
2986 mc_part(h, 4, 0, 8, 8*h->mb_linesize, dest_y, dest_cb, dest_cr, 4, 0,
2987 qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
2988 &weight_op[2], &weight_avg[2],
2989 IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1));
2993 assert(IS_8X8(mb_type));
2996 const int sub_mb_type= h->sub_mb_type[i];
2998 int x_offset= (i&1)<<2;
2999 int y_offset= (i&2)<<1;
3001 if(IS_SUB_8X8(sub_mb_type)){
3002 mc_part(h, n, 1, 4, 0, dest_y, dest_cb, dest_cr, x_offset, y_offset,
3003 qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
3004 &weight_op[3], &weight_avg[3],
3005 IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
3006 }else if(IS_SUB_8X4(sub_mb_type)){
3007 mc_part(h, n , 0, 2, 4, dest_y, dest_cb, dest_cr, x_offset, y_offset,
3008 qpix_put[2], chroma_put[1], qpix_avg[2], chroma_avg[1],
3009 &weight_op[4], &weight_avg[4],
3010 IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
3011 mc_part(h, n+2, 0, 2, 4, dest_y, dest_cb, dest_cr, x_offset, y_offset+2,
3012 qpix_put[2], chroma_put[1], qpix_avg[2], chroma_avg[1],
3013 &weight_op[4], &weight_avg[4],
3014 IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
3015 }else if(IS_SUB_4X8(sub_mb_type)){
3016 mc_part(h, n , 0, 4, 4*h->mb_linesize, dest_y, dest_cb, dest_cr, x_offset, y_offset,
3017 qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
3018 &weight_op[5], &weight_avg[5],
3019 IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
3020 mc_part(h, n+1, 0, 4, 4*h->mb_linesize, dest_y, dest_cb, dest_cr, x_offset+2, y_offset,
3021 qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
3022 &weight_op[5], &weight_avg[5],
3023 IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
3026 assert(IS_SUB_4X4(sub_mb_type));
3028 int sub_x_offset= x_offset + 2*(j&1);
3029 int sub_y_offset= y_offset + (j&2);
3030 mc_part(h, n+j, 1, 2, 0, dest_y, dest_cb, dest_cr, sub_x_offset, sub_y_offset,
3031 qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
3032 &weight_op[6], &weight_avg[6],
3033 IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
3039 prefetch_motion(h, 1);
3042 static void decode_init_vlc(){
3043 static int done = 0;
3049 init_vlc(&chroma_dc_coeff_token_vlc, CHROMA_DC_COEFF_TOKEN_VLC_BITS, 4*5,
3050 &chroma_dc_coeff_token_len [0], 1, 1,
3051 &chroma_dc_coeff_token_bits[0], 1, 1, 1);
3054 init_vlc(&coeff_token_vlc[i], COEFF_TOKEN_VLC_BITS, 4*17,
3055 &coeff_token_len [i][0], 1, 1,
3056 &coeff_token_bits[i][0], 1, 1, 1);
3060 init_vlc(&chroma_dc_total_zeros_vlc[i], CHROMA_DC_TOTAL_ZEROS_VLC_BITS, 4,
3061 &chroma_dc_total_zeros_len [i][0], 1, 1,
3062 &chroma_dc_total_zeros_bits[i][0], 1, 1, 1);
3064 for(i=0; i<15; i++){
3065 init_vlc(&total_zeros_vlc[i], TOTAL_ZEROS_VLC_BITS, 16,
3066 &total_zeros_len [i][0], 1, 1,
3067 &total_zeros_bits[i][0], 1, 1, 1);
3071 init_vlc(&run_vlc[i], RUN_VLC_BITS, 7,
3072 &run_len [i][0], 1, 1,
3073 &run_bits[i][0], 1, 1, 1);
3075 init_vlc(&run7_vlc, RUN7_VLC_BITS, 16,
3076 &run_len [6][0], 1, 1,
3077 &run_bits[6][0], 1, 1, 1);
3082 * Sets the intra prediction function pointers.
3084 static void init_pred_ptrs(H264Context *h){
3085 // MpegEncContext * const s = &h->s;
3087 h->pred4x4[VERT_PRED ]= pred4x4_vertical_c;
3088 h->pred4x4[HOR_PRED ]= pred4x4_horizontal_c;
3089 h->pred4x4[DC_PRED ]= pred4x4_dc_c;
3090 h->pred4x4[DIAG_DOWN_LEFT_PRED ]= pred4x4_down_left_c;
3091 h->pred4x4[DIAG_DOWN_RIGHT_PRED]= pred4x4_down_right_c;
3092 h->pred4x4[VERT_RIGHT_PRED ]= pred4x4_vertical_right_c;
3093 h->pred4x4[HOR_DOWN_PRED ]= pred4x4_horizontal_down_c;
3094 h->pred4x4[VERT_LEFT_PRED ]= pred4x4_vertical_left_c;
3095 h->pred4x4[HOR_UP_PRED ]= pred4x4_horizontal_up_c;
3096 h->pred4x4[LEFT_DC_PRED ]= pred4x4_left_dc_c;
3097 h->pred4x4[TOP_DC_PRED ]= pred4x4_top_dc_c;
3098 h->pred4x4[DC_128_PRED ]= pred4x4_128_dc_c;
3100 h->pred8x8l[VERT_PRED ]= pred8x8l_vertical_c;
3101 h->pred8x8l[HOR_PRED ]= pred8x8l_horizontal_c;
3102 h->pred8x8l[DC_PRED ]= pred8x8l_dc_c;
3103 h->pred8x8l[DIAG_DOWN_LEFT_PRED ]= pred8x8l_down_left_c;
3104 h->pred8x8l[DIAG_DOWN_RIGHT_PRED]= pred8x8l_down_right_c;
3105 h->pred8x8l[VERT_RIGHT_PRED ]= pred8x8l_vertical_right_c;
3106 h->pred8x8l[HOR_DOWN_PRED ]= pred8x8l_horizontal_down_c;
3107 h->pred8x8l[VERT_LEFT_PRED ]= pred8x8l_vertical_left_c;
3108 h->pred8x8l[HOR_UP_PRED ]= pred8x8l_horizontal_up_c;
3109 h->pred8x8l[LEFT_DC_PRED ]= pred8x8l_left_dc_c;
3110 h->pred8x8l[TOP_DC_PRED ]= pred8x8l_top_dc_c;
3111 h->pred8x8l[DC_128_PRED ]= pred8x8l_128_dc_c;
3113 h->pred8x8[DC_PRED8x8 ]= ff_pred8x8_dc_c;
3114 h->pred8x8[VERT_PRED8x8 ]= ff_pred8x8_vertical_c;
3115 h->pred8x8[HOR_PRED8x8 ]= ff_pred8x8_horizontal_c;
3116 h->pred8x8[PLANE_PRED8x8 ]= ff_pred8x8_plane_c;
3117 h->pred8x8[LEFT_DC_PRED8x8]= pred8x8_left_dc_c;
3118 h->pred8x8[TOP_DC_PRED8x8 ]= pred8x8_top_dc_c;
3119 h->pred8x8[DC_128_PRED8x8 ]= ff_pred8x8_128_dc_c;
3121 h->pred16x16[DC_PRED8x8 ]= ff_pred16x16_dc_c;
3122 h->pred16x16[VERT_PRED8x8 ]= ff_pred16x16_vertical_c;
3123 h->pred16x16[HOR_PRED8x8 ]= ff_pred16x16_horizontal_c;
3124 h->pred16x16[PLANE_PRED8x8 ]= ff_pred16x16_plane_c;
3125 h->pred16x16[LEFT_DC_PRED8x8]= pred16x16_left_dc_c;
3126 h->pred16x16[TOP_DC_PRED8x8 ]= pred16x16_top_dc_c;
3127 h->pred16x16[DC_128_PRED8x8 ]= ff_pred16x16_128_dc_c;
3130 static void free_tables(H264Context *h){
3131 av_freep(&h->intra4x4_pred_mode);
3132 av_freep(&h->chroma_pred_mode_table);
3133 av_freep(&h->cbp_table);
3134 av_freep(&h->mvd_table[0]);
3135 av_freep(&h->mvd_table[1]);
3136 av_freep(&h->direct_table);
3137 av_freep(&h->non_zero_count);
3138 av_freep(&h->slice_table_base);
3139 av_freep(&h->top_borders[1]);
3140 av_freep(&h->top_borders[0]);
3141 h->slice_table= NULL;
3143 av_freep(&h->mb2b_xy);
3144 av_freep(&h->mb2b8_xy);
3146 av_freep(&h->s.obmc_scratchpad);
3149 static void init_dequant8_coeff_table(H264Context *h){
3151 const int transpose = (h->s.dsp.h264_idct8_add != ff_h264_idct8_add_c); //FIXME ugly
3152 h->dequant8_coeff[0] = h->dequant8_buffer[0];
3153 h->dequant8_coeff[1] = h->dequant8_buffer[1];
3155 for(i=0; i<2; i++ ){
3156 if(i && !memcmp(h->pps.scaling_matrix8[0], h->pps.scaling_matrix8[1], 64*sizeof(uint8_t))){
3157 h->dequant8_coeff[1] = h->dequant8_buffer[0];
3161 for(q=0; q<52; q++){
3162 int shift = ff_div6[q];
3163 int idx = ff_rem6[q];
3165 h->dequant8_coeff[i][q][transpose ? (x>>3)|((x&7)<<3) : x] =
3166 ((uint32_t)dequant8_coeff_init[idx][ dequant8_coeff_init_scan[((x>>1)&12) | (x&3)] ] *
3167 h->pps.scaling_matrix8[i][x]) << shift;
3172 static void init_dequant4_coeff_table(H264Context *h){
3174 const int transpose = (h->s.dsp.h264_idct_add != ff_h264_idct_add_c); //FIXME ugly
3175 for(i=0; i<6; i++ ){
3176 h->dequant4_coeff[i] = h->dequant4_buffer[i];
3178 if(!memcmp(h->pps.scaling_matrix4[j], h->pps.scaling_matrix4[i], 16*sizeof(uint8_t))){
3179 h->dequant4_coeff[i] = h->dequant4_buffer[j];
3186 for(q=0; q<52; q++){
3187 int shift = ff_div6[q] + 2;
3188 int idx = ff_rem6[q];
3190 h->dequant4_coeff[i][q][transpose ? (x>>2)|((x<<2)&0xF) : x] =
3191 ((uint32_t)dequant4_coeff_init[idx][(x&1) + ((x>>2)&1)] *
3192 h->pps.scaling_matrix4[i][x]) << shift;
3197 static void init_dequant_tables(H264Context *h){
3199 init_dequant4_coeff_table(h);
3200 if(h->pps.transform_8x8_mode)
3201 init_dequant8_coeff_table(h);
3202 if(h->sps.transform_bypass){
3205 h->dequant4_coeff[i][0][x] = 1<<6;
3206 if(h->pps.transform_8x8_mode)
3209 h->dequant8_coeff[i][0][x] = 1<<6;
3216 * needs width/height
3218 static int alloc_tables(H264Context *h){
3219 MpegEncContext * const s = &h->s;
3220 const int big_mb_num= s->mb_stride * (s->mb_height+1);
3223 CHECKED_ALLOCZ(h->intra4x4_pred_mode, big_mb_num * 8 * sizeof(uint8_t))
3225 CHECKED_ALLOCZ(h->non_zero_count , big_mb_num * 16 * sizeof(uint8_t))
3226 CHECKED_ALLOCZ(h->slice_table_base , (big_mb_num+s->mb_stride) * sizeof(uint8_t))
3227 CHECKED_ALLOCZ(h->top_borders[0] , s->mb_width * (16+8+8) * sizeof(uint8_t))
3228 CHECKED_ALLOCZ(h->top_borders[1] , s->mb_width * (16+8+8) * sizeof(uint8_t))
3229 CHECKED_ALLOCZ(h->cbp_table, big_mb_num * sizeof(uint16_t))
3231 if( h->pps.cabac ) {
3232 CHECKED_ALLOCZ(h->chroma_pred_mode_table, big_mb_num * sizeof(uint8_t))
3233 CHECKED_ALLOCZ(h->mvd_table[0], 32*big_mb_num * sizeof(uint16_t));
3234 CHECKED_ALLOCZ(h->mvd_table[1], 32*big_mb_num * sizeof(uint16_t));
3235 CHECKED_ALLOCZ(h->direct_table, 32*big_mb_num * sizeof(uint8_t));
3238 memset(h->slice_table_base, -1, (big_mb_num+s->mb_stride) * sizeof(uint8_t));
3239 h->slice_table= h->slice_table_base + s->mb_stride*2 + 1;
3241 CHECKED_ALLOCZ(h->mb2b_xy , big_mb_num * sizeof(uint32_t));
3242 CHECKED_ALLOCZ(h->mb2b8_xy , big_mb_num * sizeof(uint32_t));
3243 for(y=0; y<s->mb_height; y++){
3244 for(x=0; x<s->mb_width; x++){
3245 const int mb_xy= x + y*s->mb_stride;
3246 const int b_xy = 4*x + 4*y*h->b_stride;
3247 const int b8_xy= 2*x + 2*y*h->b8_stride;
3249 h->mb2b_xy [mb_xy]= b_xy;
3250 h->mb2b8_xy[mb_xy]= b8_xy;
3254 s->obmc_scratchpad = NULL;
3256 if(!h->dequant4_coeff[0])
3257 init_dequant_tables(h);
3265 static void common_init(H264Context *h){
3266 MpegEncContext * const s = &h->s;
3268 s->width = s->avctx->width;
3269 s->height = s->avctx->height;
3270 s->codec_id= s->avctx->codec->id;
3274 h->dequant_coeff_pps= -1;
3275 s->unrestricted_mv=1;
3276 s->decode=1; //FIXME
3278 memset(h->pps.scaling_matrix4, 16, 6*16*sizeof(uint8_t));
3279 memset(h->pps.scaling_matrix8, 16, 2*64*sizeof(uint8_t));
3282 static int decode_init(AVCodecContext *avctx){
3283 H264Context *h= avctx->priv_data;
3284 MpegEncContext * const s = &h->s;
3286 MPV_decode_defaults(s);
3291 s->out_format = FMT_H264;
3292 s->workaround_bugs= avctx->workaround_bugs;
3295 // s->decode_mb= ff_h263_decode_mb;
3297 avctx->pix_fmt= PIX_FMT_YUV420P;
3301 if(avctx->extradata_size > 0 && avctx->extradata &&
3302 *(char *)avctx->extradata == 1){
3312 static int frame_start(H264Context *h){
3313 MpegEncContext * const s = &h->s;
3316 if(MPV_frame_start(s, s->avctx) < 0)
3318 ff_er_frame_start(s);
3320 assert(s->linesize && s->uvlinesize);
3322 for(i=0; i<16; i++){
3323 h->block_offset[i]= 4*((scan8[i] - scan8[0])&7) + 4*s->linesize*((scan8[i] - scan8[0])>>3);
3324 h->block_offset[24+i]= 4*((scan8[i] - scan8[0])&7) + 8*s->linesize*((scan8[i] - scan8[0])>>3);
3327 h->block_offset[16+i]=
3328 h->block_offset[20+i]= 4*((scan8[i] - scan8[0])&7) + 4*s->uvlinesize*((scan8[i] - scan8[0])>>3);
3329 h->block_offset[24+16+i]=
3330 h->block_offset[24+20+i]= 4*((scan8[i] - scan8[0])&7) + 8*s->uvlinesize*((scan8[i] - scan8[0])>>3);
3333 /* can't be in alloc_tables because linesize isn't known there.
3334 * FIXME: redo bipred weight to not require extra buffer? */
3335 if(!s->obmc_scratchpad)
3336 s->obmc_scratchpad = av_malloc(16*2*s->linesize + 8*2*s->uvlinesize);
3338 /* some macroblocks will be accessed before they're available */
3340 memset(h->slice_table, -1, (s->mb_height*s->mb_stride-1) * sizeof(uint8_t));
3342 // s->decode= (s->flags&CODEC_FLAG_PSNR) || !s->encoding || s->current_picture.reference /*|| h->contains_intra*/ || 1;
3346 static inline void backup_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize){
3347 MpegEncContext * const s = &h->s;
3351 src_cb -= uvlinesize;
3352 src_cr -= uvlinesize;
3354 // There are two lines saved, the line above the the top macroblock of a pair,
3355 // and the line above the bottom macroblock
3356 h->left_border[0]= h->top_borders[0][s->mb_x][15];
3357 for(i=1; i<17; i++){
3358 h->left_border[i]= src_y[15+i* linesize];
3361 *(uint64_t*)(h->top_borders[0][s->mb_x]+0)= *(uint64_t*)(src_y + 16*linesize);
3362 *(uint64_t*)(h->top_borders[0][s->mb_x]+8)= *(uint64_t*)(src_y +8+16*linesize);
3364 if(!(s->flags&CODEC_FLAG_GRAY)){
3365 h->left_border[17 ]= h->top_borders[0][s->mb_x][16+7];
3366 h->left_border[17+9]= h->top_borders[0][s->mb_x][24+7];
3368 h->left_border[i+17 ]= src_cb[7+i*uvlinesize];
3369 h->left_border[i+17+9]= src_cr[7+i*uvlinesize];
3371 *(uint64_t*)(h->top_borders[0][s->mb_x]+16)= *(uint64_t*)(src_cb+8*uvlinesize);
3372 *(uint64_t*)(h->top_borders[0][s->mb_x]+24)= *(uint64_t*)(src_cr+8*uvlinesize);
3376 static inline void xchg_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int xchg){
3377 MpegEncContext * const s = &h->s;
3380 int deblock_left = (s->mb_x > 0);
3381 int deblock_top = (s->mb_y > 0);
3383 src_y -= linesize + 1;
3384 src_cb -= uvlinesize + 1;
3385 src_cr -= uvlinesize + 1;
3387 #define XCHG(a,b,t,xchg)\
3394 for(i = !deblock_top; i<17; i++){
3395 XCHG(h->left_border[i ], src_y [i* linesize], temp8, xchg);
3400 XCHG(*(uint64_t*)(h->top_borders[0][s->mb_x]+0), *(uint64_t*)(src_y +1), temp64, xchg);
3401 XCHG(*(uint64_t*)(h->top_borders[0][s->mb_x]+8), *(uint64_t*)(src_y +9), temp64, 1);
3402 if(s->mb_x+1 < s->mb_width){
3403 XCHG(*(uint64_t*)(h->top_borders[0][s->mb_x+1]), *(uint64_t*)(src_y +17), temp64, 1);
3407 if(!(s->flags&CODEC_FLAG_GRAY)){
3409 for(i = !deblock_top; i<9; i++){
3410 XCHG(h->left_border[i+17 ], src_cb[i*uvlinesize], temp8, xchg);
3411 XCHG(h->left_border[i+17+9], src_cr[i*uvlinesize], temp8, xchg);
3415 XCHG(*(uint64_t*)(h->top_borders[0][s->mb_x]+16), *(uint64_t*)(src_cb+1), temp64, 1);
3416 XCHG(*(uint64_t*)(h->top_borders[0][s->mb_x]+24), *(uint64_t*)(src_cr+1), temp64, 1);
3421 static inline void backup_pair_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize){
3422 MpegEncContext * const s = &h->s;
3425 src_y -= 2 * linesize;
3426 src_cb -= 2 * uvlinesize;
3427 src_cr -= 2 * uvlinesize;
3429 // There are two lines saved, the line above the the top macroblock of a pair,
3430 // and the line above the bottom macroblock
3431 h->left_border[0]= h->top_borders[0][s->mb_x][15];
3432 h->left_border[1]= h->top_borders[1][s->mb_x][15];
3433 for(i=2; i<34; i++){
3434 h->left_border[i]= src_y[15+i* linesize];
3437 *(uint64_t*)(h->top_borders[0][s->mb_x]+0)= *(uint64_t*)(src_y + 32*linesize);
3438 *(uint64_t*)(h->top_borders[0][s->mb_x]+8)= *(uint64_t*)(src_y +8+32*linesize);
3439 *(uint64_t*)(h->top_borders[1][s->mb_x]+0)= *(uint64_t*)(src_y + 33*linesize);
3440 *(uint64_t*)(h->top_borders[1][s->mb_x]+8)= *(uint64_t*)(src_y +8+33*linesize);
3442 if(!(s->flags&CODEC_FLAG_GRAY)){
3443 h->left_border[34 ]= h->top_borders[0][s->mb_x][16+7];
3444 h->left_border[34+ 1]= h->top_borders[1][s->mb_x][16+7];
3445 h->left_border[34+18 ]= h->top_borders[0][s->mb_x][24+7];
3446 h->left_border[34+18+1]= h->top_borders[1][s->mb_x][24+7];
3447 for(i=2; i<18; i++){
3448 h->left_border[i+34 ]= src_cb[7+i*uvlinesize];
3449 h->left_border[i+34+18]= src_cr[7+i*uvlinesize];
3451 *(uint64_t*)(h->top_borders[0][s->mb_x]+16)= *(uint64_t*)(src_cb+16*uvlinesize);
3452 *(uint64_t*)(h->top_borders[0][s->mb_x]+24)= *(uint64_t*)(src_cr+16*uvlinesize);
3453 *(uint64_t*)(h->top_borders[1][s->mb_x]+16)= *(uint64_t*)(src_cb+17*uvlinesize);
3454 *(uint64_t*)(h->top_borders[1][s->mb_x]+24)= *(uint64_t*)(src_cr+17*uvlinesize);
3458 static inline void xchg_pair_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int xchg){
3459 MpegEncContext * const s = &h->s;
3462 int deblock_left = (s->mb_x > 0);
3463 int deblock_top = (s->mb_y > 1);
3465 tprintf("xchg_pair_border: src_y:%p src_cb:%p src_cr:%p ls:%d uvls:%d\n", src_y, src_cb, src_cr, linesize, uvlinesize);
3467 src_y -= 2 * linesize + 1;
3468 src_cb -= 2 * uvlinesize + 1;
3469 src_cr -= 2 * uvlinesize + 1;
3471 #define XCHG(a,b,t,xchg)\
3478 for(i = (!deblock_top)<<1; i<34; i++){
3479 XCHG(h->left_border[i ], src_y [i* linesize], temp8, xchg);
3484 XCHG(*(uint64_t*)(h->top_borders[0][s->mb_x]+0), *(uint64_t*)(src_y +1), temp64, xchg);
3485 XCHG(*(uint64_t*)(h->top_borders[0][s->mb_x]+8), *(uint64_t*)(src_y +9), temp64, 1);
3486 XCHG(*(uint64_t*)(h->top_borders[1][s->mb_x]+0), *(uint64_t*)(src_y +1 +linesize), temp64, xchg);
3487 XCHG(*(uint64_t*)(h->top_borders[1][s->mb_x]+8), *(uint64_t*)(src_y +9 +linesize), temp64, 1);
3488 if(s->mb_x+1 < s->mb_width){
3489 XCHG(*(uint64_t*)(h->top_borders[0][s->mb_x+1]), *(uint64_t*)(src_y +17), temp64, 1);
3490 XCHG(*(uint64_t*)(h->top_borders[1][s->mb_x+1]), *(uint64_t*)(src_y +17 +linesize), temp64, 1);
3494 if(!(s->flags&CODEC_FLAG_GRAY)){
3496 for(i = (!deblock_top) << 1; i<18; i++){
3497 XCHG(h->left_border[i+34 ], src_cb[i*uvlinesize], temp8, xchg);
3498 XCHG(h->left_border[i+34+18], src_cr[i*uvlinesize], temp8, xchg);
3502 XCHG(*(uint64_t*)(h->top_borders[0][s->mb_x]+16), *(uint64_t*)(src_cb+1), temp64, 1);
3503 XCHG(*(uint64_t*)(h->top_borders[0][s->mb_x]+24), *(uint64_t*)(src_cr+1), temp64, 1);
3504 XCHG(*(uint64_t*)(h->top_borders[1][s->mb_x]+16), *(uint64_t*)(src_cb+1 +uvlinesize), temp64, 1);
3505 XCHG(*(uint64_t*)(h->top_borders[1][s->mb_x]+24), *(uint64_t*)(src_cr+1 +uvlinesize), temp64, 1);
3510 static void hl_decode_mb(H264Context *h){
3511 MpegEncContext * const s = &h->s;
3512 const int mb_x= s->mb_x;
3513 const int mb_y= s->mb_y;
3514 const int mb_xy= mb_x + mb_y*s->mb_stride;
3515 const int mb_type= s->current_picture.mb_type[mb_xy];
3516 uint8_t *dest_y, *dest_cb, *dest_cr;
3517 int linesize, uvlinesize /*dct_offset*/;
3519 int *block_offset = &h->block_offset[0];
3520 const unsigned int bottom = mb_y & 1;
3521 const int transform_bypass = (s->qscale == 0 && h->sps.transform_bypass);
3522 void (*idct_add)(uint8_t *dst, DCTELEM *block, int stride);
3523 void (*idct_dc_add)(uint8_t *dst, DCTELEM *block, int stride);
3528 dest_y = s->current_picture.data[0] + (mb_y * 16* s->linesize ) + mb_x * 16;
3529 dest_cb = s->current_picture.data[1] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
3530 dest_cr = s->current_picture.data[2] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
3532 s->dsp.prefetch(dest_y + (s->mb_x&3)*4*s->linesize + 64, s->linesize, 4);
3533 s->dsp.prefetch(dest_cb + (s->mb_x&7)*s->uvlinesize + 64, dest_cr - dest_cb, 2);
3536 linesize = h->mb_linesize = s->linesize * 2;
3537 uvlinesize = h->mb_uvlinesize = s->uvlinesize * 2;
3538 block_offset = &h->block_offset[24];
3539 if(mb_y&1){ //FIXME move out of this func?
3540 dest_y -= s->linesize*15;
3541 dest_cb-= s->uvlinesize*7;
3542 dest_cr-= s->uvlinesize*7;
3546 for(list=0; list<h->list_count; list++){
3547 if(!USES_LIST(mb_type, list))
3549 if(IS_16X16(mb_type)){
3550 int8_t *ref = &h->ref_cache[list][scan8[0]];
3551 fill_rectangle(ref, 4, 4, 8, 16+*ref^(s->mb_y&1), 1);
3553 for(i=0; i<16; i+=4){
3554 //FIXME can refs be smaller than 8x8 when !direct_8x8_inference ?
3555 int ref = h->ref_cache[list][scan8[i]];
3557 fill_rectangle(&h->ref_cache[list][scan8[i]], 2, 2, 8, 16+ref^(s->mb_y&1), 1);
3563 linesize = h->mb_linesize = s->linesize;
3564 uvlinesize = h->mb_uvlinesize = s->uvlinesize;
3565 // dct_offset = s->linesize * 16;
3568 if(transform_bypass){