6980304fa07562f950a533522e980953a5ebc1ff
[ffmpeg.git] / libavcodec / h264.c
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 /**
23  * @file h264.c
24  * H.264 / AVC / MPEG4 part10 codec.
25  * @author Michael Niedermayer <michaelni@gmx.at>
26  */
27
28 #include "dsputil.h"
29 #include "avcodec.h"
30 #include "mpegvideo.h"
31 #include "h264.h"
32 #include "h264data.h"
33 #include "h264_parser.h"
34 #include "golomb.h"
35 #include "rectangle.h"
36
37 #include "cabac.h"
38 #ifdef ARCH_X86
39 #include "i386/h264_i386.h"
40 #endif
41
42 //#undef NDEBUG
43 #include <assert.h>
44
45 /**
46  * Value of Picture.reference when Picture is not a reference picture, but
47  * is held for delayed output.
48  */
49 #define DELAYED_PIC_REF 4
50
51 static VLC coeff_token_vlc[4];
52 static VLC_TYPE coeff_token_vlc_tables[520+332+280+256][2];
53 static const int coeff_token_vlc_tables_size[4]={520,332,280,256};
54
55 static VLC chroma_dc_coeff_token_vlc;
56 static VLC_TYPE chroma_dc_coeff_token_vlc_table[256][2];
57 static const int chroma_dc_coeff_token_vlc_table_size = 256;
58
59 static VLC total_zeros_vlc[15];
60 static VLC_TYPE total_zeros_vlc_tables[15][512][2];
61 static const int total_zeros_vlc_tables_size = 512;
62
63 static VLC chroma_dc_total_zeros_vlc[3];
64 static VLC_TYPE chroma_dc_total_zeros_vlc_tables[3][8][2];
65 static const int chroma_dc_total_zeros_vlc_tables_size = 8;
66
67 static VLC run_vlc[6];
68 static VLC_TYPE run_vlc_tables[6][8][2];
69 static const int run_vlc_tables_size = 8;
70
71 static VLC run7_vlc;
72 static VLC_TYPE run7_vlc_table[96][2];
73 static const int run7_vlc_table_size = 96;
74
75 static void svq3_luma_dc_dequant_idct_c(DCTELEM *block, int qp);
76 static void svq3_add_idct_c(uint8_t *dst, DCTELEM *block, int stride, int qp, int dc);
77 static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize);
78 static void filter_mb_fast( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize);
79 static Picture * remove_long(H264Context *h, int i, int ref_mask);
80
81 static av_always_inline uint32_t pack16to32(int a, int b){
82 #ifdef WORDS_BIGENDIAN
83    return (b&0xFFFF) + (a<<16);
84 #else
85    return (a&0xFFFF) + (b<<16);
86 #endif
87 }
88
89 const uint8_t ff_rem6[52]={
90 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
91 };
92
93 const uint8_t ff_div6[52]={
94 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8,
95 };
96
97 static const int left_block_options[4][8]={
98     {0,1,2,3,7,10,8,11},
99     {2,2,3,3,8,11,8,11},
100     {0,0,1,1,7,10,7,10},
101     {0,2,0,2,7,10,7,10}
102 };
103
104 static void fill_caches(H264Context *h, int mb_type, int for_deblock){
105     MpegEncContext * const s = &h->s;
106     const int mb_xy= h->mb_xy;
107     int topleft_xy, top_xy, topright_xy, left_xy[2];
108     int topleft_type, top_type, topright_type, left_type[2];
109     int * left_block;
110     int topleft_partition= -1;
111     int i;
112
113     top_xy     = mb_xy  - (s->mb_stride << FIELD_PICTURE);
114
115     //FIXME deblocking could skip the intra and nnz parts.
116     if(for_deblock && (h->slice_num == 1 || h->slice_table[mb_xy] == h->slice_table[top_xy]) && !FRAME_MBAFF)
117         return;
118
119     /* Wow, what a mess, why didn't they simplify the interlacing & intra
120      * stuff, I can't imagine that these complex rules are worth it. */
121
122     topleft_xy = top_xy - 1;
123     topright_xy= top_xy + 1;
124     left_xy[1] = left_xy[0] = mb_xy-1;
125     left_block = left_block_options[0];
126     if(FRAME_MBAFF){
127         const int pair_xy          = s->mb_x     + (s->mb_y & ~1)*s->mb_stride;
128         const int top_pair_xy      = pair_xy     - s->mb_stride;
129         const int topleft_pair_xy  = top_pair_xy - 1;
130         const int topright_pair_xy = top_pair_xy + 1;
131         const int topleft_mb_frame_flag  = !IS_INTERLACED(s->current_picture.mb_type[topleft_pair_xy]);
132         const int top_mb_frame_flag      = !IS_INTERLACED(s->current_picture.mb_type[top_pair_xy]);
133         const int topright_mb_frame_flag = !IS_INTERLACED(s->current_picture.mb_type[topright_pair_xy]);
134         const int left_mb_frame_flag = !IS_INTERLACED(s->current_picture.mb_type[pair_xy-1]);
135         const int curr_mb_frame_flag = !IS_INTERLACED(mb_type);
136         const int bottom = (s->mb_y & 1);
137         tprintf(s->avctx, "fill_caches: curr_mb_frame_flag:%d, left_mb_frame_flag:%d, topleft_mb_frame_flag:%d, top_mb_frame_flag:%d, topright_mb_frame_flag:%d\n", curr_mb_frame_flag, left_mb_frame_flag, topleft_mb_frame_flag, top_mb_frame_flag, topright_mb_frame_flag);
138         if (bottom
139                 ? !curr_mb_frame_flag // bottom macroblock
140                 : (!curr_mb_frame_flag && !top_mb_frame_flag) // top macroblock
141                 ) {
142             top_xy -= s->mb_stride;
143         }
144         if (bottom
145                 ? !curr_mb_frame_flag // bottom macroblock
146                 : (!curr_mb_frame_flag && !topleft_mb_frame_flag) // top macroblock
147                 ) {
148             topleft_xy -= s->mb_stride;
149         } else if(bottom && curr_mb_frame_flag && !left_mb_frame_flag) {
150             topleft_xy += s->mb_stride;
151             // take top left mv from the middle of the mb, as opposed to all other modes which use the bottom right partition
152             topleft_partition = 0;
153         }
154         if (bottom
155                 ? !curr_mb_frame_flag // bottom macroblock
156                 : (!curr_mb_frame_flag && !topright_mb_frame_flag) // top macroblock
157                 ) {
158             topright_xy -= s->mb_stride;
159         }
160         if (left_mb_frame_flag != curr_mb_frame_flag) {
161             left_xy[1] = left_xy[0] = pair_xy - 1;
162             if (curr_mb_frame_flag) {
163                 if (bottom) {
164                     left_block = left_block_options[1];
165                 } else {
166                     left_block= left_block_options[2];
167                 }
168             } else {
169                 left_xy[1] += s->mb_stride;
170                 left_block = left_block_options[3];
171             }
172         }
173     }
174
175     h->top_mb_xy = top_xy;
176     h->left_mb_xy[0] = left_xy[0];
177     h->left_mb_xy[1] = left_xy[1];
178     if(for_deblock){
179         topleft_type = 0;
180         topright_type = 0;
181         top_type     = h->slice_table[top_xy     ] < 255 ? s->current_picture.mb_type[top_xy]     : 0;
182         left_type[0] = h->slice_table[left_xy[0] ] < 255 ? s->current_picture.mb_type[left_xy[0]] : 0;
183         left_type[1] = h->slice_table[left_xy[1] ] < 255 ? s->current_picture.mb_type[left_xy[1]] : 0;
184
185         if(FRAME_MBAFF && !IS_INTRA(mb_type)){
186             int list;
187             int v = *(uint16_t*)&h->non_zero_count[mb_xy][14];
188             for(i=0; i<16; i++)
189                 h->non_zero_count_cache[scan8[i]] = (v>>i)&1;
190             for(list=0; list<h->list_count; list++){
191                 if(USES_LIST(mb_type,list)){
192                     uint32_t *src = (uint32_t*)s->current_picture.motion_val[list][h->mb2b_xy[mb_xy]];
193                     uint32_t *dst = (uint32_t*)h->mv_cache[list][scan8[0]];
194                     int8_t *ref = &s->current_picture.ref_index[list][h->mb2b8_xy[mb_xy]];
195                     for(i=0; i<4; i++, dst+=8, src+=h->b_stride){
196                         dst[0] = src[0];
197                         dst[1] = src[1];
198                         dst[2] = src[2];
199                         dst[3] = src[3];
200                     }
201                     *(uint32_t*)&h->ref_cache[list][scan8[ 0]] =
202                     *(uint32_t*)&h->ref_cache[list][scan8[ 2]] = pack16to32(ref[0],ref[1])*0x0101;
203                     ref += h->b8_stride;
204                     *(uint32_t*)&h->ref_cache[list][scan8[ 8]] =
205                     *(uint32_t*)&h->ref_cache[list][scan8[10]] = pack16to32(ref[0],ref[1])*0x0101;
206                 }else{
207                     fill_rectangle(&h-> mv_cache[list][scan8[ 0]], 4, 4, 8, 0, 4);
208                     fill_rectangle(&h->ref_cache[list][scan8[ 0]], 4, 4, 8, (uint8_t)LIST_NOT_USED, 1);
209                 }
210             }
211         }
212     }else{
213         topleft_type = h->slice_table[topleft_xy ] == h->slice_num ? s->current_picture.mb_type[topleft_xy] : 0;
214         top_type     = h->slice_table[top_xy     ] == h->slice_num ? s->current_picture.mb_type[top_xy]     : 0;
215         topright_type= h->slice_table[topright_xy] == h->slice_num ? s->current_picture.mb_type[topright_xy]: 0;
216         left_type[0] = h->slice_table[left_xy[0] ] == h->slice_num ? s->current_picture.mb_type[left_xy[0]] : 0;
217         left_type[1] = h->slice_table[left_xy[1] ] == h->slice_num ? s->current_picture.mb_type[left_xy[1]] : 0;
218     }
219
220     if(IS_INTRA(mb_type)){
221         h->topleft_samples_available=
222         h->top_samples_available=
223         h->left_samples_available= 0xFFFF;
224         h->topright_samples_available= 0xEEEA;
225
226         if(!IS_INTRA(top_type) && (top_type==0 || h->pps.constrained_intra_pred)){
227             h->topleft_samples_available= 0xB3FF;
228             h->top_samples_available= 0x33FF;
229             h->topright_samples_available= 0x26EA;
230         }
231         for(i=0; i<2; i++){
232             if(!IS_INTRA(left_type[i]) && (left_type[i]==0 || h->pps.constrained_intra_pred)){
233                 h->topleft_samples_available&= 0xDF5F;
234                 h->left_samples_available&= 0x5F5F;
235             }
236         }
237
238         if(!IS_INTRA(topleft_type) && (topleft_type==0 || h->pps.constrained_intra_pred))
239             h->topleft_samples_available&= 0x7FFF;
240
241         if(!IS_INTRA(topright_type) && (topright_type==0 || h->pps.constrained_intra_pred))
242             h->topright_samples_available&= 0xFBFF;
243
244         if(IS_INTRA4x4(mb_type)){
245             if(IS_INTRA4x4(top_type)){
246                 h->intra4x4_pred_mode_cache[4+8*0]= h->intra4x4_pred_mode[top_xy][4];
247                 h->intra4x4_pred_mode_cache[5+8*0]= h->intra4x4_pred_mode[top_xy][5];
248                 h->intra4x4_pred_mode_cache[6+8*0]= h->intra4x4_pred_mode[top_xy][6];
249                 h->intra4x4_pred_mode_cache[7+8*0]= h->intra4x4_pred_mode[top_xy][3];
250             }else{
251                 int pred;
252                 if(!top_type || (IS_INTER(top_type) && h->pps.constrained_intra_pred))
253                     pred= -1;
254                 else{
255                     pred= 2;
256                 }
257                 h->intra4x4_pred_mode_cache[4+8*0]=
258                 h->intra4x4_pred_mode_cache[5+8*0]=
259                 h->intra4x4_pred_mode_cache[6+8*0]=
260                 h->intra4x4_pred_mode_cache[7+8*0]= pred;
261             }
262             for(i=0; i<2; i++){
263                 if(IS_INTRA4x4(left_type[i])){
264                     h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]= h->intra4x4_pred_mode[left_xy[i]][left_block[0+2*i]];
265                     h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= h->intra4x4_pred_mode[left_xy[i]][left_block[1+2*i]];
266                 }else{
267                     int pred;
268                     if(!left_type[i] || (IS_INTER(left_type[i]) && h->pps.constrained_intra_pred))
269                         pred= -1;
270                     else{
271                         pred= 2;
272                     }
273                     h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]=
274                     h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= pred;
275                 }
276             }
277         }
278     }
279
280
281 /*
282 0 . T T. T T T T
283 1 L . .L . . . .
284 2 L . .L . . . .
285 3 . T TL . . . .
286 4 L . .L . . . .
287 5 L . .. . . . .
288 */
289 //FIXME constraint_intra_pred & partitioning & nnz (let us hope this is just a typo in the spec)
290     if(top_type){
291         h->non_zero_count_cache[4+8*0]= h->non_zero_count[top_xy][4];
292         h->non_zero_count_cache[5+8*0]= h->non_zero_count[top_xy][5];
293         h->non_zero_count_cache[6+8*0]= h->non_zero_count[top_xy][6];
294         h->non_zero_count_cache[7+8*0]= h->non_zero_count[top_xy][3];
295
296         h->non_zero_count_cache[1+8*0]= h->non_zero_count[top_xy][9];
297         h->non_zero_count_cache[2+8*0]= h->non_zero_count[top_xy][8];
298
299         h->non_zero_count_cache[1+8*3]= h->non_zero_count[top_xy][12];
300         h->non_zero_count_cache[2+8*3]= h->non_zero_count[top_xy][11];
301
302     }else{
303         h->non_zero_count_cache[4+8*0]=
304         h->non_zero_count_cache[5+8*0]=
305         h->non_zero_count_cache[6+8*0]=
306         h->non_zero_count_cache[7+8*0]=
307
308         h->non_zero_count_cache[1+8*0]=
309         h->non_zero_count_cache[2+8*0]=
310
311         h->non_zero_count_cache[1+8*3]=
312         h->non_zero_count_cache[2+8*3]= h->pps.cabac && !IS_INTRA(mb_type) ? 0 : 64;
313
314     }
315
316     for (i=0; i<2; i++) {
317         if(left_type[i]){
318             h->non_zero_count_cache[3+8*1 + 2*8*i]= h->non_zero_count[left_xy[i]][left_block[0+2*i]];
319             h->non_zero_count_cache[3+8*2 + 2*8*i]= h->non_zero_count[left_xy[i]][left_block[1+2*i]];
320             h->non_zero_count_cache[0+8*1 +   8*i]= h->non_zero_count[left_xy[i]][left_block[4+2*i]];
321             h->non_zero_count_cache[0+8*4 +   8*i]= h->non_zero_count[left_xy[i]][left_block[5+2*i]];
322         }else{
323             h->non_zero_count_cache[3+8*1 + 2*8*i]=
324             h->non_zero_count_cache[3+8*2 + 2*8*i]=
325             h->non_zero_count_cache[0+8*1 +   8*i]=
326             h->non_zero_count_cache[0+8*4 +   8*i]= h->pps.cabac && !IS_INTRA(mb_type) ? 0 : 64;
327         }
328     }
329
330     if( h->pps.cabac ) {
331         // top_cbp
332         if(top_type) {
333             h->top_cbp = h->cbp_table[top_xy];
334         } else if(IS_INTRA(mb_type)) {
335             h->top_cbp = 0x1C0;
336         } else {
337             h->top_cbp = 0;
338         }
339         // left_cbp
340         if (left_type[0]) {
341             h->left_cbp = h->cbp_table[left_xy[0]] & 0x1f0;
342         } else if(IS_INTRA(mb_type)) {
343             h->left_cbp = 0x1C0;
344         } else {
345             h->left_cbp = 0;
346         }
347         if (left_type[0]) {
348             h->left_cbp |= ((h->cbp_table[left_xy[0]]>>((left_block[0]&(~1))+1))&0x1) << 1;
349         }
350         if (left_type[1]) {
351             h->left_cbp |= ((h->cbp_table[left_xy[1]]>>((left_block[2]&(~1))+1))&0x1) << 3;
352         }
353     }
354
355 #if 1
356     if(IS_INTER(mb_type) || IS_DIRECT(mb_type)){
357         int list;
358         for(list=0; list<h->list_count; list++){
359             if(!USES_LIST(mb_type, list) && !IS_DIRECT(mb_type) && !h->deblocking_filter){
360                 /*if(!h->mv_cache_clean[list]){
361                     memset(h->mv_cache [list],  0, 8*5*2*sizeof(int16_t)); //FIXME clean only input? clean at all?
362                     memset(h->ref_cache[list], PART_NOT_AVAILABLE, 8*5*sizeof(int8_t));
363                     h->mv_cache_clean[list]= 1;
364                 }*/
365                 continue;
366             }
367             h->mv_cache_clean[list]= 0;
368
369             if(USES_LIST(top_type, list)){
370                 const int b_xy= h->mb2b_xy[top_xy] + 3*h->b_stride;
371                 const int b8_xy= h->mb2b8_xy[top_xy] + h->b8_stride;
372                 *(uint32_t*)h->mv_cache[list][scan8[0] + 0 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 0];
373                 *(uint32_t*)h->mv_cache[list][scan8[0] + 1 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 1];
374                 *(uint32_t*)h->mv_cache[list][scan8[0] + 2 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 2];
375                 *(uint32_t*)h->mv_cache[list][scan8[0] + 3 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 3];
376                 h->ref_cache[list][scan8[0] + 0 - 1*8]=
377                 h->ref_cache[list][scan8[0] + 1 - 1*8]= s->current_picture.ref_index[list][b8_xy + 0];
378                 h->ref_cache[list][scan8[0] + 2 - 1*8]=
379                 h->ref_cache[list][scan8[0] + 3 - 1*8]= s->current_picture.ref_index[list][b8_xy + 1];
380             }else{
381                 *(uint32_t*)h->mv_cache [list][scan8[0] + 0 - 1*8]=
382                 *(uint32_t*)h->mv_cache [list][scan8[0] + 1 - 1*8]=
383                 *(uint32_t*)h->mv_cache [list][scan8[0] + 2 - 1*8]=
384                 *(uint32_t*)h->mv_cache [list][scan8[0] + 3 - 1*8]= 0;
385                 *(uint32_t*)&h->ref_cache[list][scan8[0] + 0 - 1*8]= ((top_type ? LIST_NOT_USED : PART_NOT_AVAILABLE)&0xFF)*0x01010101;
386             }
387
388             for(i=0; i<2; i++){
389                 int cache_idx = scan8[0] - 1 + i*2*8;
390                 if(USES_LIST(left_type[i], list)){
391                     const int b_xy= h->mb2b_xy[left_xy[i]] + 3;
392                     const int b8_xy= h->mb2b8_xy[left_xy[i]] + 1;
393                     *(uint32_t*)h->mv_cache[list][cache_idx  ]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[0+i*2]];
394                     *(uint32_t*)h->mv_cache[list][cache_idx+8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[1+i*2]];
395                     h->ref_cache[list][cache_idx  ]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[0+i*2]>>1)];
396                     h->ref_cache[list][cache_idx+8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[1+i*2]>>1)];
397                 }else{
398                     *(uint32_t*)h->mv_cache [list][cache_idx  ]=
399                     *(uint32_t*)h->mv_cache [list][cache_idx+8]= 0;
400                     h->ref_cache[list][cache_idx  ]=
401                     h->ref_cache[list][cache_idx+8]= left_type[i] ? LIST_NOT_USED : PART_NOT_AVAILABLE;
402                 }
403             }
404
405             if((for_deblock || (IS_DIRECT(mb_type) && !h->direct_spatial_mv_pred)) && !FRAME_MBAFF)
406                 continue;
407
408             if(USES_LIST(topleft_type, list)){
409                 const int b_xy = h->mb2b_xy[topleft_xy] + 3 + h->b_stride + (topleft_partition & 2*h->b_stride);
410                 const int b8_xy= h->mb2b8_xy[topleft_xy] + 1 + (topleft_partition & h->b8_stride);
411                 *(uint32_t*)h->mv_cache[list][scan8[0] - 1 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy];
412                 h->ref_cache[list][scan8[0] - 1 - 1*8]= s->current_picture.ref_index[list][b8_xy];
413             }else{
414                 *(uint32_t*)h->mv_cache[list][scan8[0] - 1 - 1*8]= 0;
415                 h->ref_cache[list][scan8[0] - 1 - 1*8]= topleft_type ? LIST_NOT_USED : PART_NOT_AVAILABLE;
416             }
417
418             if(USES_LIST(topright_type, list)){
419                 const int b_xy= h->mb2b_xy[topright_xy] + 3*h->b_stride;
420                 const int b8_xy= h->mb2b8_xy[topright_xy] + h->b8_stride;
421                 *(uint32_t*)h->mv_cache[list][scan8[0] + 4 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy];
422                 h->ref_cache[list][scan8[0] + 4 - 1*8]= s->current_picture.ref_index[list][b8_xy];
423             }else{
424                 *(uint32_t*)h->mv_cache [list][scan8[0] + 4 - 1*8]= 0;
425                 h->ref_cache[list][scan8[0] + 4 - 1*8]= topright_type ? LIST_NOT_USED : PART_NOT_AVAILABLE;
426             }
427
428             if((IS_SKIP(mb_type) || IS_DIRECT(mb_type)) && !FRAME_MBAFF)
429                 continue;
430
431             h->ref_cache[list][scan8[5 ]+1] =
432             h->ref_cache[list][scan8[7 ]+1] =
433             h->ref_cache[list][scan8[13]+1] =  //FIXME remove past 3 (init somewhere else)
434             h->ref_cache[list][scan8[4 ]] =
435             h->ref_cache[list][scan8[12]] = PART_NOT_AVAILABLE;
436             *(uint32_t*)h->mv_cache [list][scan8[5 ]+1]=
437             *(uint32_t*)h->mv_cache [list][scan8[7 ]+1]=
438             *(uint32_t*)h->mv_cache [list][scan8[13]+1]= //FIXME remove past 3 (init somewhere else)
439             *(uint32_t*)h->mv_cache [list][scan8[4 ]]=
440             *(uint32_t*)h->mv_cache [list][scan8[12]]= 0;
441
442             if( h->pps.cabac ) {
443                 /* XXX beurk, Load mvd */
444                 if(USES_LIST(top_type, list)){
445                     const int b_xy= h->mb2b_xy[top_xy] + 3*h->b_stride;
446                     *(uint32_t*)h->mvd_cache[list][scan8[0] + 0 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 0];
447                     *(uint32_t*)h->mvd_cache[list][scan8[0] + 1 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 1];
448                     *(uint32_t*)h->mvd_cache[list][scan8[0] + 2 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 2];
449                     *(uint32_t*)h->mvd_cache[list][scan8[0] + 3 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 3];
450                 }else{
451                     *(uint32_t*)h->mvd_cache [list][scan8[0] + 0 - 1*8]=
452                     *(uint32_t*)h->mvd_cache [list][scan8[0] + 1 - 1*8]=
453                     *(uint32_t*)h->mvd_cache [list][scan8[0] + 2 - 1*8]=
454                     *(uint32_t*)h->mvd_cache [list][scan8[0] + 3 - 1*8]= 0;
455                 }
456                 if(USES_LIST(left_type[0], list)){
457                     const int b_xy= h->mb2b_xy[left_xy[0]] + 3;
458                     *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 0*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[0]];
459                     *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[1]];
460                 }else{
461                     *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 0*8]=
462                     *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 1*8]= 0;
463                 }
464                 if(USES_LIST(left_type[1], list)){
465                     const int b_xy= h->mb2b_xy[left_xy[1]] + 3;
466                     *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 2*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[2]];
467                     *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 3*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[3]];
468                 }else{
469                     *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 2*8]=
470                     *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 3*8]= 0;
471                 }
472                 *(uint32_t*)h->mvd_cache [list][scan8[5 ]+1]=
473                 *(uint32_t*)h->mvd_cache [list][scan8[7 ]+1]=
474                 *(uint32_t*)h->mvd_cache [list][scan8[13]+1]= //FIXME remove past 3 (init somewhere else)
475                 *(uint32_t*)h->mvd_cache [list][scan8[4 ]]=
476                 *(uint32_t*)h->mvd_cache [list][scan8[12]]= 0;
477
478                 if(h->slice_type_nos == FF_B_TYPE){
479                     fill_rectangle(&h->direct_cache[scan8[0]], 4, 4, 8, 0, 1);
480
481                     if(IS_DIRECT(top_type)){
482                         *(uint32_t*)&h->direct_cache[scan8[0] - 1*8]= 0x01010101;
483                     }else if(IS_8X8(top_type)){
484                         int b8_xy = h->mb2b8_xy[top_xy] + h->b8_stride;
485                         h->direct_cache[scan8[0] + 0 - 1*8]= h->direct_table[b8_xy];
486                         h->direct_cache[scan8[0] + 2 - 1*8]= h->direct_table[b8_xy + 1];
487                     }else{
488                         *(uint32_t*)&h->direct_cache[scan8[0] - 1*8]= 0;
489                     }
490
491                     if(IS_DIRECT(left_type[0]))
492                         h->direct_cache[scan8[0] - 1 + 0*8]= 1;
493                     else if(IS_8X8(left_type[0]))
494                         h->direct_cache[scan8[0] - 1 + 0*8]= h->direct_table[h->mb2b8_xy[left_xy[0]] + 1 + h->b8_stride*(left_block[0]>>1)];
495                     else
496                         h->direct_cache[scan8[0] - 1 + 0*8]= 0;
497
498                     if(IS_DIRECT(left_type[1]))
499                         h->direct_cache[scan8[0] - 1 + 2*8]= 1;
500                     else if(IS_8X8(left_type[1]))
501                         h->direct_cache[scan8[0] - 1 + 2*8]= h->direct_table[h->mb2b8_xy[left_xy[1]] + 1 + h->b8_stride*(left_block[2]>>1)];
502                     else
503                         h->direct_cache[scan8[0] - 1 + 2*8]= 0;
504                 }
505             }
506
507             if(FRAME_MBAFF){
508 #define MAP_MVS\
509                     MAP_F2F(scan8[0] - 1 - 1*8, topleft_type)\
510                     MAP_F2F(scan8[0] + 0 - 1*8, top_type)\
511                     MAP_F2F(scan8[0] + 1 - 1*8, top_type)\
512                     MAP_F2F(scan8[0] + 2 - 1*8, top_type)\
513                     MAP_F2F(scan8[0] + 3 - 1*8, top_type)\
514                     MAP_F2F(scan8[0] + 4 - 1*8, topright_type)\
515                     MAP_F2F(scan8[0] - 1 + 0*8, left_type[0])\
516                     MAP_F2F(scan8[0] - 1 + 1*8, left_type[0])\
517                     MAP_F2F(scan8[0] - 1 + 2*8, left_type[1])\
518                     MAP_F2F(scan8[0] - 1 + 3*8, left_type[1])
519                 if(MB_FIELD){
520 #define MAP_F2F(idx, mb_type)\
521                     if(!IS_INTERLACED(mb_type) && h->ref_cache[list][idx] >= 0){\
522                         h->ref_cache[list][idx] <<= 1;\
523                         h->mv_cache[list][idx][1] /= 2;\
524                         h->mvd_cache[list][idx][1] /= 2;\
525                     }
526                     MAP_MVS
527 #undef MAP_F2F
528                 }else{
529 #define MAP_F2F(idx, mb_type)\
530                     if(IS_INTERLACED(mb_type) && h->ref_cache[list][idx] >= 0){\
531                         h->ref_cache[list][idx] >>= 1;\
532                         h->mv_cache[list][idx][1] <<= 1;\
533                         h->mvd_cache[list][idx][1] <<= 1;\
534                     }
535                     MAP_MVS
536 #undef MAP_F2F
537                 }
538             }
539         }
540     }
541 #endif
542
543     h->neighbor_transform_size= !!IS_8x8DCT(top_type) + !!IS_8x8DCT(left_type[0]);
544 }
545
546 static inline void write_back_intra_pred_mode(H264Context *h){
547     const int mb_xy= h->mb_xy;
548
549     h->intra4x4_pred_mode[mb_xy][0]= h->intra4x4_pred_mode_cache[7+8*1];
550     h->intra4x4_pred_mode[mb_xy][1]= h->intra4x4_pred_mode_cache[7+8*2];
551     h->intra4x4_pred_mode[mb_xy][2]= h->intra4x4_pred_mode_cache[7+8*3];
552     h->intra4x4_pred_mode[mb_xy][3]= h->intra4x4_pred_mode_cache[7+8*4];
553     h->intra4x4_pred_mode[mb_xy][4]= h->intra4x4_pred_mode_cache[4+8*4];
554     h->intra4x4_pred_mode[mb_xy][5]= h->intra4x4_pred_mode_cache[5+8*4];
555     h->intra4x4_pred_mode[mb_xy][6]= h->intra4x4_pred_mode_cache[6+8*4];
556 }
557
558 /**
559  * checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
560  */
561 static inline int check_intra4x4_pred_mode(H264Context *h){
562     MpegEncContext * const s = &h->s;
563     static const int8_t top [12]= {-1, 0,LEFT_DC_PRED,-1,-1,-1,-1,-1, 0};
564     static const int8_t left[12]= { 0,-1, TOP_DC_PRED, 0,-1,-1,-1, 0,-1,DC_128_PRED};
565     int i;
566
567     if(!(h->top_samples_available&0x8000)){
568         for(i=0; i<4; i++){
569             int status= top[ h->intra4x4_pred_mode_cache[scan8[0] + i] ];
570             if(status<0){
571                 av_log(h->s.avctx, AV_LOG_ERROR, "top block unavailable for requested intra4x4 mode %d at %d %d\n", status, s->mb_x, s->mb_y);
572                 return -1;
573             } else if(status){
574                 h->intra4x4_pred_mode_cache[scan8[0] + i]= status;
575             }
576         }
577     }
578
579     if(!(h->left_samples_available&0x8000)){
580         for(i=0; i<4; i++){
581             int status= left[ h->intra4x4_pred_mode_cache[scan8[0] + 8*i] ];
582             if(status<0){
583                 av_log(h->s.avctx, AV_LOG_ERROR, "left block unavailable for requested intra4x4 mode %d at %d %d\n", status, s->mb_x, s->mb_y);
584                 return -1;
585             } else if(status){
586                 h->intra4x4_pred_mode_cache[scan8[0] + 8*i]= status;
587             }
588         }
589     }
590
591     return 0;
592 } //FIXME cleanup like next
593
594 /**
595  * checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
596  */
597 static inline int check_intra_pred_mode(H264Context *h, int mode){
598     MpegEncContext * const s = &h->s;
599     static const int8_t top [7]= {LEFT_DC_PRED8x8, 1,-1,-1};
600     static const int8_t left[7]= { TOP_DC_PRED8x8,-1, 2,-1,DC_128_PRED8x8};
601
602     if(mode > 6U) {
603         av_log(h->s.avctx, AV_LOG_ERROR, "out of range intra chroma pred mode at %d %d\n", s->mb_x, s->mb_y);
604         return -1;
605     }
606
607     if(!(h->top_samples_available&0x8000)){
608         mode= top[ mode ];
609         if(mode<0){
610             av_log(h->s.avctx, AV_LOG_ERROR, "top block unavailable for requested intra mode at %d %d\n", s->mb_x, s->mb_y);
611             return -1;
612         }
613     }
614
615     if(!(h->left_samples_available&0x8000)){
616         mode= left[ mode ];
617         if(mode<0){
618             av_log(h->s.avctx, AV_LOG_ERROR, "left block unavailable for requested intra mode at %d %d\n", s->mb_x, s->mb_y);
619             return -1;
620         }
621     }
622
623     return mode;
624 }
625
626 /**
627  * gets the predicted intra4x4 prediction mode.
628  */
629 static inline int pred_intra_mode(H264Context *h, int n){
630     const int index8= scan8[n];
631     const int left= h->intra4x4_pred_mode_cache[index8 - 1];
632     const int top = h->intra4x4_pred_mode_cache[index8 - 8];
633     const int min= FFMIN(left, top);
634
635     tprintf(h->s.avctx, "mode:%d %d min:%d\n", left ,top, min);
636
637     if(min<0) return DC_PRED;
638     else      return min;
639 }
640
641 static inline void write_back_non_zero_count(H264Context *h){
642     const int mb_xy= h->mb_xy;
643
644     h->non_zero_count[mb_xy][0]= h->non_zero_count_cache[7+8*1];
645     h->non_zero_count[mb_xy][1]= h->non_zero_count_cache[7+8*2];
646     h->non_zero_count[mb_xy][2]= h->non_zero_count_cache[7+8*3];
647     h->non_zero_count[mb_xy][3]= h->non_zero_count_cache[7+8*4];
648     h->non_zero_count[mb_xy][4]= h->non_zero_count_cache[4+8*4];
649     h->non_zero_count[mb_xy][5]= h->non_zero_count_cache[5+8*4];
650     h->non_zero_count[mb_xy][6]= h->non_zero_count_cache[6+8*4];
651
652     h->non_zero_count[mb_xy][9]= h->non_zero_count_cache[1+8*2];
653     h->non_zero_count[mb_xy][8]= h->non_zero_count_cache[2+8*2];
654     h->non_zero_count[mb_xy][7]= h->non_zero_count_cache[2+8*1];
655
656     h->non_zero_count[mb_xy][12]=h->non_zero_count_cache[1+8*5];
657     h->non_zero_count[mb_xy][11]=h->non_zero_count_cache[2+8*5];
658     h->non_zero_count[mb_xy][10]=h->non_zero_count_cache[2+8*4];
659
660     if(FRAME_MBAFF){
661         // store all luma nnzs, for deblocking
662         int v = 0, i;
663         for(i=0; i<16; i++)
664             v += (!!h->non_zero_count_cache[scan8[i]]) << i;
665         *(uint16_t*)&h->non_zero_count[mb_xy][14] = v;
666     }
667 }
668
669 /**
670  * gets the predicted number of non-zero coefficients.
671  * @param n block index
672  */
673 static inline int pred_non_zero_count(H264Context *h, int n){
674     const int index8= scan8[n];
675     const int left= h->non_zero_count_cache[index8 - 1];
676     const int top = h->non_zero_count_cache[index8 - 8];
677     int i= left + top;
678
679     if(i<64) i= (i+1)>>1;
680
681     tprintf(h->s.avctx, "pred_nnz L%X T%X n%d s%d P%X\n", left, top, n, scan8[n], i&31);
682
683     return i&31;
684 }
685
686 static inline int fetch_diagonal_mv(H264Context *h, const int16_t **C, int i, int list, int part_width){
687     const int topright_ref= h->ref_cache[list][ i - 8 + part_width ];
688     MpegEncContext *s = &h->s;
689
690     /* there is no consistent mapping of mvs to neighboring locations that will
691      * make mbaff happy, so we can't move all this logic to fill_caches */
692     if(FRAME_MBAFF){
693         const uint32_t *mb_types = s->current_picture_ptr->mb_type;
694         const int16_t *mv;
695         *(uint32_t*)h->mv_cache[list][scan8[0]-2] = 0;
696         *C = h->mv_cache[list][scan8[0]-2];
697
698         if(!MB_FIELD
699            && (s->mb_y&1) && i < scan8[0]+8 && topright_ref != PART_NOT_AVAILABLE){
700             int topright_xy = s->mb_x + (s->mb_y-1)*s->mb_stride + (i == scan8[0]+3);
701             if(IS_INTERLACED(mb_types[topright_xy])){
702 #define SET_DIAG_MV(MV_OP, REF_OP, X4, Y4)\
703                 const int x4 = X4, y4 = Y4;\
704                 const int mb_type = mb_types[(x4>>2)+(y4>>2)*s->mb_stride];\
705                 if(!USES_LIST(mb_type,list))\
706                     return LIST_NOT_USED;\
707                 mv = s->current_picture_ptr->motion_val[list][x4 + y4*h->b_stride];\
708                 h->mv_cache[list][scan8[0]-2][0] = mv[0];\
709                 h->mv_cache[list][scan8[0]-2][1] = mv[1] MV_OP;\
710                 return s->current_picture_ptr->ref_index[list][(x4>>1) + (y4>>1)*h->b8_stride] REF_OP;
711
712                 SET_DIAG_MV(*2, >>1, s->mb_x*4+(i&7)-4+part_width, s->mb_y*4-1);
713             }
714         }
715         if(topright_ref == PART_NOT_AVAILABLE
716            && ((s->mb_y&1) || i >= scan8[0]+8) && (i&7)==4
717            && h->ref_cache[list][scan8[0]-1] != PART_NOT_AVAILABLE){
718             if(!MB_FIELD
719                && IS_INTERLACED(mb_types[h->left_mb_xy[0]])){
720                 SET_DIAG_MV(*2, >>1, s->mb_x*4-1, (s->mb_y|1)*4+(s->mb_y&1)*2+(i>>4)-1);
721             }
722             if(MB_FIELD
723                && !IS_INTERLACED(mb_types[h->left_mb_xy[0]])
724                && i >= scan8[0]+8){
725                 // left shift will turn LIST_NOT_USED into PART_NOT_AVAILABLE, but that's OK.
726                 SET_DIAG_MV(/2, <<1, s->mb_x*4-1, (s->mb_y&~1)*4 - 1 + ((i-scan8[0])>>3)*2);
727             }
728         }
729 #undef SET_DIAG_MV
730     }
731
732     if(topright_ref != PART_NOT_AVAILABLE){
733         *C= h->mv_cache[list][ i - 8 + part_width ];
734         return topright_ref;
735     }else{
736         tprintf(s->avctx, "topright MV not available\n");
737
738         *C= h->mv_cache[list][ i - 8 - 1 ];
739         return h->ref_cache[list][ i - 8 - 1 ];
740     }
741 }
742
743 /**
744  * gets the predicted MV.
745  * @param n the block index
746  * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
747  * @param mx the x component of the predicted motion vector
748  * @param my the y component of the predicted motion vector
749  */
750 static inline void pred_motion(H264Context * const h, int n, int part_width, int list, int ref, int * const mx, int * const my){
751     const int index8= scan8[n];
752     const int top_ref=      h->ref_cache[list][ index8 - 8 ];
753     const int left_ref=     h->ref_cache[list][ index8 - 1 ];
754     const int16_t * const A= h->mv_cache[list][ index8 - 1 ];
755     const int16_t * const B= h->mv_cache[list][ index8 - 8 ];
756     const int16_t * C;
757     int diagonal_ref, match_count;
758
759     assert(part_width==1 || part_width==2 || part_width==4);
760
761 /* mv_cache
762   B . . A T T T T
763   U . . L . . , .
764   U . . L . . . .
765   U . . L . . , .
766   . . . L . . . .
767 */
768
769     diagonal_ref= fetch_diagonal_mv(h, &C, index8, list, part_width);
770     match_count= (diagonal_ref==ref) + (top_ref==ref) + (left_ref==ref);
771     tprintf(h->s.avctx, "pred_motion match_count=%d\n", match_count);
772     if(match_count > 1){ //most common
773         *mx= mid_pred(A[0], B[0], C[0]);
774         *my= mid_pred(A[1], B[1], C[1]);
775     }else if(match_count==1){
776         if(left_ref==ref){
777             *mx= A[0];
778             *my= A[1];
779         }else if(top_ref==ref){
780             *mx= B[0];
781             *my= B[1];
782         }else{
783             *mx= C[0];
784             *my= C[1];
785         }
786     }else{
787         if(top_ref == PART_NOT_AVAILABLE && diagonal_ref == PART_NOT_AVAILABLE && left_ref != PART_NOT_AVAILABLE){
788             *mx= A[0];
789             *my= A[1];
790         }else{
791             *mx= mid_pred(A[0], B[0], C[0]);
792             *my= mid_pred(A[1], B[1], C[1]);
793         }
794     }
795
796     tprintf(h->s.avctx, "pred_motion (%2d %2d %2d) (%2d %2d %2d) (%2d %2d %2d) -> (%2d %2d %2d) at %2d %2d %d list %d\n", top_ref, B[0], B[1],                    diagonal_ref, C[0], C[1], left_ref, A[0], A[1], ref, *mx, *my, h->s.mb_x, h->s.mb_y, n, list);
797 }
798
799 /**
800  * gets the directionally predicted 16x8 MV.
801  * @param n the block index
802  * @param mx the x component of the predicted motion vector
803  * @param my the y component of the predicted motion vector
804  */
805 static inline void pred_16x8_motion(H264Context * const h, int n, int list, int ref, int * const mx, int * const my){
806     if(n==0){
807         const int top_ref=      h->ref_cache[list][ scan8[0] - 8 ];
808         const int16_t * const B= h->mv_cache[list][ scan8[0] - 8 ];
809
810         tprintf(h->s.avctx, "pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n", top_ref, B[0], B[1], h->s.mb_x, h->s.mb_y, n, list);
811
812         if(top_ref == ref){
813             *mx= B[0];
814             *my= B[1];
815             return;
816         }
817     }else{
818         const int left_ref=     h->ref_cache[list][ scan8[8] - 1 ];
819         const int16_t * const A= h->mv_cache[list][ scan8[8] - 1 ];
820
821         tprintf(h->s.avctx, "pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n", left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list);
822
823         if(left_ref == ref){
824             *mx= A[0];
825             *my= A[1];
826             return;
827         }
828     }
829
830     //RARE
831     pred_motion(h, n, 4, list, ref, mx, my);
832 }
833
834 /**
835  * gets the directionally predicted 8x16 MV.
836  * @param n the block index
837  * @param mx the x component of the predicted motion vector
838  * @param my the y component of the predicted motion vector
839  */
840 static inline void pred_8x16_motion(H264Context * const h, int n, int list, int ref, int * const mx, int * const my){
841     if(n==0){
842         const int left_ref=      h->ref_cache[list][ scan8[0] - 1 ];
843         const int16_t * const A=  h->mv_cache[list][ scan8[0] - 1 ];
844
845         tprintf(h->s.avctx, "pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n", left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list);
846
847         if(left_ref == ref){
848             *mx= A[0];
849             *my= A[1];
850             return;
851         }
852     }else{
853         const int16_t * C;
854         int diagonal_ref;
855
856         diagonal_ref= fetch_diagonal_mv(h, &C, scan8[4], list, 2);
857
858         tprintf(h->s.avctx, "pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n", diagonal_ref, C[0], C[1], h->s.mb_x, h->s.mb_y, n, list);
859
860         if(diagonal_ref == ref){
861             *mx= C[0];
862             *my= C[1];
863             return;
864         }
865     }
866
867     //RARE
868     pred_motion(h, n, 2, list, ref, mx, my);
869 }
870
871 static inline void pred_pskip_motion(H264Context * const h, int * const mx, int * const my){
872     const int top_ref = h->ref_cache[0][ scan8[0] - 8 ];
873     const int left_ref= h->ref_cache[0][ scan8[0] - 1 ];
874
875     tprintf(h->s.avctx, "pred_pskip: (%d) (%d) at %2d %2d\n", top_ref, left_ref, h->s.mb_x, h->s.mb_y);
876
877     if(top_ref == PART_NOT_AVAILABLE || left_ref == PART_NOT_AVAILABLE
878        || (top_ref == 0  && *(uint32_t*)h->mv_cache[0][ scan8[0] - 8 ] == 0)
879        || (left_ref == 0 && *(uint32_t*)h->mv_cache[0][ scan8[0] - 1 ] == 0)){
880
881         *mx = *my = 0;
882         return;
883     }
884
885     pred_motion(h, 0, 4, 0, 0, mx, my);
886
887     return;
888 }
889
890 static inline void direct_dist_scale_factor(H264Context * const h){
891     MpegEncContext * const s = &h->s;
892     const int poc = h->s.current_picture_ptr->field_poc[ s->picture_structure == PICT_BOTTOM_FIELD ];
893     const int poc1 = h->ref_list[1][0].poc;
894     int i;
895     for(i=0; i<h->ref_count[0]; i++){
896         int poc0 = h->ref_list[0][i].poc;
897         int td = av_clip(poc1 - poc0, -128, 127);
898         if(td == 0 || h->ref_list[0][i].long_ref){
899             h->dist_scale_factor[i] = 256;
900         }else{
901             int tb = av_clip(poc - poc0, -128, 127);
902             int tx = (16384 + (FFABS(td) >> 1)) / td;
903             h->dist_scale_factor[i] = av_clip((tb*tx + 32) >> 6, -1024, 1023);
904         }
905     }
906     if(FRAME_MBAFF){
907         for(i=0; i<h->ref_count[0]; i++){
908             h->dist_scale_factor_field[2*i] =
909             h->dist_scale_factor_field[2*i+1] = h->dist_scale_factor[i];
910         }
911     }
912 }
913 static inline void direct_ref_list_init(H264Context * const h){
914     MpegEncContext * const s = &h->s;
915     Picture * const ref1 = &h->ref_list[1][0];
916     Picture * const cur = s->current_picture_ptr;
917     int list, i, j;
918     int sidx= s->picture_structure&1;
919     int ref1sidx= ref1->reference&1;
920     for(list=0; list<2; list++){
921         cur->ref_count[sidx][list] = h->ref_count[list];
922         for(j=0; j<h->ref_count[list]; j++)
923             cur->ref_poc[sidx][list][j] = 4*h->ref_list[list][j].frame_num + (h->ref_list[list][j].reference&3);
924     }
925     if(s->picture_structure == PICT_FRAME){
926         memcpy(cur->ref_count[0], cur->ref_count[1], sizeof(cur->ref_count[0]));
927         memcpy(cur->ref_poc  [0], cur->ref_poc  [1], sizeof(cur->ref_poc  [0]));
928     }
929     if(cur->pict_type != FF_B_TYPE || h->direct_spatial_mv_pred)
930         return;
931     for(list=0; list<2; list++){
932         for(i=0; i<ref1->ref_count[ref1sidx][list]; i++){
933             int poc = ref1->ref_poc[ref1sidx][list][i];
934             if(((poc&3) == 3) != (s->picture_structure == PICT_FRAME))
935                 poc= (poc&~3) + s->picture_structure;
936             h->map_col_to_list0[list][i] = 0; /* bogus; fills in for missing frames */
937             for(j=0; j<h->ref_count[list]; j++)
938                 if(4*h->ref_list[list][j].frame_num + (h->ref_list[list][j].reference&3) == poc){
939                     h->map_col_to_list0[list][i] = j;
940                     break;
941                 }
942         }
943     }
944     if(FRAME_MBAFF){
945         for(list=0; list<2; list++){
946             for(i=0; i<ref1->ref_count[ref1sidx][list]; i++){
947                 j = h->map_col_to_list0[list][i];
948                 h->map_col_to_list0_field[list][2*i] = 2*j;
949                 h->map_col_to_list0_field[list][2*i+1] = 2*j+1;
950             }
951         }
952     }
953 }
954
955 static inline void pred_direct_motion(H264Context * const h, int *mb_type){
956     MpegEncContext * const s = &h->s;
957     int b8_stride = h->b8_stride;
958     int b4_stride = h->b_stride;
959     int mb_xy = h->mb_xy;
960     int mb_type_col[2];
961     const int16_t (*l1mv0)[2], (*l1mv1)[2];
962     const int8_t *l1ref0, *l1ref1;
963     const int is_b8x8 = IS_8X8(*mb_type);
964     unsigned int sub_mb_type;
965     int i8, i4;
966
967 #define MB_TYPE_16x16_OR_INTRA (MB_TYPE_16x16|MB_TYPE_INTRA4x4|MB_TYPE_INTRA16x16|MB_TYPE_INTRA_PCM)
968
969     if(IS_INTERLACED(h->ref_list[1][0].mb_type[mb_xy])){ // AFL/AFR/FR/FL -> AFL/FL
970         if(h->ref_list[1][0].reference == PICT_FRAME){   // AFL/AFR/FR/FL -> AFL
971             if(!IS_INTERLACED(*mb_type)){                //     AFR/FR    -> AFL
972                 int cur_poc = s->current_picture_ptr->poc;
973                 int *col_poc = h->ref_list[1]->field_poc;
974                 int col_parity = FFABS(col_poc[0] - cur_poc) >= FFABS(col_poc[1] - cur_poc);
975                 mb_xy= s->mb_x + ((s->mb_y&~1) + col_parity)*s->mb_stride;
976                 l1mv0  = &h->ref_list[1][0].motion_val[0][h->mb2b_xy [mb_xy]];
977                 l1mv1  = &h->ref_list[1][0].motion_val[1][h->mb2b_xy [mb_xy]];
978                 l1ref0 = &h->ref_list[1][0].ref_index [0][h->mb2b8_xy[mb_xy]];
979                 l1ref1 = &h->ref_list[1][0].ref_index [1][h->mb2b8_xy[mb_xy]];
980                 if(s->mb_y&1){
981                     l1ref0 +=   b8_stride;
982                     l1ref1 +=   b8_stride;
983                     l1mv0  += 2*b4_stride;
984                     l1mv1  += 2*b4_stride;
985                 }
986                 b8_stride = 0;
987             }
988         }else if(!(s->picture_structure & h->ref_list[1][0].reference)){// FL -> FL & differ parity
989             int fieldoff= 2*(h->ref_list[1][0].reference)-3;
990             mb_xy += s->mb_stride*fieldoff;
991         }
992         goto single_col;
993     }else{                                               // AFL/AFR/FR/FL -> AFR/FR
994         if(IS_INTERLACED(*mb_type)){                     // AFL       /FL -> AFR/FR
995             mb_xy= s->mb_x + (s->mb_y&~1)*s->mb_stride;
996             mb_type_col[0] = h->ref_list[1][0].mb_type[mb_xy];
997             mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy + s->mb_stride];
998             b8_stride *= 3;
999             b4_stride *= 6;
1000             //FIXME IS_8X8(mb_type_col[0]) && !h->sps.direct_8x8_inference_flag
1001             if(    (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)
1002                 && (mb_type_col[1] & MB_TYPE_16x16_OR_INTRA)
1003                 && !is_b8x8){
1004                 sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
1005                 *mb_type   |= MB_TYPE_16x8 |MB_TYPE_L0L1|MB_TYPE_DIRECT2; /* B_16x8 */
1006             }else{
1007                 sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
1008                 *mb_type   |= MB_TYPE_8x8|MB_TYPE_L0L1;
1009             }
1010         }else{                                           //     AFR/FR    -> AFR/FR
1011 single_col:
1012             mb_type_col[0] =
1013             mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy];
1014             if(IS_8X8(mb_type_col[0]) && !h->sps.direct_8x8_inference_flag){
1015                 /* FIXME save sub mb types from previous frames (or derive from MVs)
1016                 * so we know exactly what block size to use */
1017                 sub_mb_type = MB_TYPE_8x8|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_4x4 */
1018                 *mb_type   |= MB_TYPE_8x8|MB_TYPE_L0L1;
1019             }else if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){
1020                 sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
1021                 *mb_type   |= MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_16x16 */
1022             }else{
1023                 sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
1024                 *mb_type   |= MB_TYPE_8x8|MB_TYPE_L0L1;
1025             }
1026         }
1027     }
1028
1029     if(b8_stride){
1030         l1mv0  = &h->ref_list[1][0].motion_val[0][h->mb2b_xy [mb_xy]];
1031         l1mv1  = &h->ref_list[1][0].motion_val[1][h->mb2b_xy [mb_xy]];
1032         l1ref0 = &h->ref_list[1][0].ref_index [0][h->mb2b8_xy[mb_xy]];
1033         l1ref1 = &h->ref_list[1][0].ref_index [1][h->mb2b8_xy[mb_xy]];
1034     }
1035
1036     if(h->direct_spatial_mv_pred){
1037         int ref[2];
1038         int mv[2][2];
1039         int list;
1040
1041         /* FIXME interlacing + spatial direct uses wrong colocated block positions */
1042
1043         /* ref = min(neighbors) */
1044         for(list=0; list<2; list++){
1045             int refa = h->ref_cache[list][scan8[0] - 1];
1046             int refb = h->ref_cache[list][scan8[0] - 8];
1047             int refc = h->ref_cache[list][scan8[0] - 8 + 4];
1048             if(refc == PART_NOT_AVAILABLE)
1049                 refc = h->ref_cache[list][scan8[0] - 8 - 1];
1050             ref[list] = FFMIN3((unsigned)refa, (unsigned)refb, (unsigned)refc);
1051             if(ref[list] < 0)
1052                 ref[list] = -1;
1053         }
1054
1055         if(ref[0] < 0 && ref[1] < 0){
1056             ref[0] = ref[1] = 0;
1057             mv[0][0] = mv[0][1] =
1058             mv[1][0] = mv[1][1] = 0;
1059         }else{
1060             for(list=0; list<2; list++){
1061                 if(ref[list] >= 0)
1062                     pred_motion(h, 0, 4, list, ref[list], &mv[list][0], &mv[list][1]);
1063                 else
1064                     mv[list][0] = mv[list][1] = 0;
1065             }
1066         }
1067
1068         if(ref[1] < 0){
1069             if(!is_b8x8)
1070                 *mb_type &= ~MB_TYPE_L1;
1071             sub_mb_type &= ~MB_TYPE_L1;
1072         }else if(ref[0] < 0){
1073             if(!is_b8x8)
1074                 *mb_type &= ~MB_TYPE_L0;
1075             sub_mb_type &= ~MB_TYPE_L0;
1076         }
1077
1078         if(IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])){
1079             for(i8=0; i8<4; i8++){
1080                 int x8 = i8&1;
1081                 int y8 = i8>>1;
1082                 int xy8 = x8+y8*b8_stride;
1083                 int xy4 = 3*x8+y8*b4_stride;
1084                 int a=0, b=0;
1085
1086                 if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
1087                     continue;
1088                 h->sub_mb_type[i8] = sub_mb_type;
1089
1090                 fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[0], 1);
1091                 fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[1], 1);
1092                 if(!IS_INTRA(mb_type_col[y8])
1093                    && (   (l1ref0[xy8] == 0 && FFABS(l1mv0[xy4][0]) <= 1 && FFABS(l1mv0[xy4][1]) <= 1)
1094                        || (l1ref0[xy8]  < 0 && l1ref1[xy8] == 0 && FFABS(l1mv1[xy4][0]) <= 1 && FFABS(l1mv1[xy4][1]) <= 1))){
1095                     if(ref[0] > 0)
1096                         a= pack16to32(mv[0][0],mv[0][1]);
1097                     if(ref[1] > 0)
1098                         b= pack16to32(mv[1][0],mv[1][1]);
1099                 }else{
1100                     a= pack16to32(mv[0][0],mv[0][1]);
1101                     b= pack16to32(mv[1][0],mv[1][1]);
1102                 }
1103                 fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, a, 4);
1104                 fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, b, 4);
1105             }
1106         }else if(IS_16X16(*mb_type)){
1107             int a=0, b=0;
1108
1109             fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, (uint8_t)ref[0], 1);
1110             fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, (uint8_t)ref[1], 1);
1111             if(!IS_INTRA(mb_type_col[0])
1112                && (   (l1ref0[0] == 0 && FFABS(l1mv0[0][0]) <= 1 && FFABS(l1mv0[0][1]) <= 1)
1113                    || (l1ref0[0]  < 0 && l1ref1[0] == 0 && FFABS(l1mv1[0][0]) <= 1 && FFABS(l1mv1[0][1]) <= 1
1114                        && (h->x264_build>33 || !h->x264_build)))){
1115                 if(ref[0] > 0)
1116                     a= pack16to32(mv[0][0],mv[0][1]);
1117                 if(ref[1] > 0)
1118                     b= pack16to32(mv[1][0],mv[1][1]);
1119             }else{
1120                 a= pack16to32(mv[0][0],mv[0][1]);
1121                 b= pack16to32(mv[1][0],mv[1][1]);
1122             }
1123             fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, a, 4);
1124             fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, b, 4);
1125         }else{
1126             for(i8=0; i8<4; i8++){
1127                 const int x8 = i8&1;
1128                 const int y8 = i8>>1;
1129
1130                 if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
1131                     continue;
1132                 h->sub_mb_type[i8] = sub_mb_type;
1133
1134                 fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, pack16to32(mv[0][0],mv[0][1]), 4);
1135                 fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, pack16to32(mv[1][0],mv[1][1]), 4);
1136                 fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[0], 1);
1137                 fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[1], 1);
1138
1139                 /* col_zero_flag */
1140                 if(!IS_INTRA(mb_type_col[0]) && (   l1ref0[x8 + y8*h->b8_stride] == 0
1141                                               || (l1ref0[x8 + y8*h->b8_stride] < 0 && l1ref1[x8 + y8*h->b8_stride] == 0
1142                                                   && (h->x264_build>33 || !h->x264_build)))){
1143                     const int16_t (*l1mv)[2]= l1ref0[x8 + y8*h->b8_stride] == 0 ? l1mv0 : l1mv1;
1144                     if(IS_SUB_8X8(sub_mb_type)){
1145                         const int16_t *mv_col = l1mv[x8*3 + y8*3*h->b_stride];
1146                         if(FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1){
1147                             if(ref[0] == 0)
1148                                 fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4);
1149                             if(ref[1] == 0)
1150                                 fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4);
1151                         }
1152                     }else
1153                     for(i4=0; i4<4; i4++){
1154                         const int16_t *mv_col = l1mv[x8*2 + (i4&1) + (y8*2 + (i4>>1))*h->b_stride];
1155                         if(FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1){
1156                             if(ref[0] == 0)
1157                                 *(uint32_t*)h->mv_cache[0][scan8[i8*4+i4]] = 0;
1158                             if(ref[1] == 0)
1159                                 *(uint32_t*)h->mv_cache[1][scan8[i8*4+i4]] = 0;
1160                         }
1161                     }
1162                 }
1163             }
1164         }
1165     }else{ /* direct temporal mv pred */
1166         const int *map_col_to_list0[2] = {h->map_col_to_list0[0], h->map_col_to_list0[1]};
1167         const int *dist_scale_factor = h->dist_scale_factor;
1168
1169         if(FRAME_MBAFF && IS_INTERLACED(*mb_type)){
1170             map_col_to_list0[0] = h->map_col_to_list0_field[0];
1171             map_col_to_list0[1] = h->map_col_to_list0_field[1];
1172             dist_scale_factor = h->dist_scale_factor_field;
1173         }
1174         if(IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])){
1175             /* FIXME assumes direct_8x8_inference == 1 */
1176             int y_shift  = 2*!IS_INTERLACED(*mb_type);
1177             int ref_shift= FRAME_MBAFF ? y_shift : 1;
1178
1179             for(i8=0; i8<4; i8++){
1180                 const int x8 = i8&1;
1181                 const int y8 = i8>>1;
1182                 int ref0, scale;
1183                 const int16_t (*l1mv)[2]= l1mv0;
1184
1185                 if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
1186                     continue;
1187                 h->sub_mb_type[i8] = sub_mb_type;
1188
1189                 fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1);
1190                 if(IS_INTRA(mb_type_col[y8])){
1191                     fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, 0, 1);
1192                     fill_rectangle(&h-> mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4);
1193                     fill_rectangle(&h-> mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4);
1194                     continue;
1195                 }
1196
1197                 ref0 = l1ref0[x8 + y8*b8_stride];
1198                 if(ref0 >= 0)
1199                     ref0 = map_col_to_list0[0][ref0*2>>ref_shift];
1200                 else{
1201                     ref0 = map_col_to_list0[1][l1ref1[x8 + y8*b8_stride]*2>>ref_shift];
1202                     l1mv= l1mv1;
1203                 }
1204                 scale = dist_scale_factor[ref0];
1205                 fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, ref0, 1);
1206
1207                 {
1208                     const int16_t *mv_col = l1mv[x8*3 + y8*b4_stride];
1209                     int my_col = (mv_col[1]<<y_shift)/2;
1210                     int mx = (scale * mv_col[0] + 128) >> 8;
1211                     int my = (scale * my_col + 128) >> 8;
1212                     fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, pack16to32(mx,my), 4);
1213                     fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, pack16to32(mx-mv_col[0],my-my_col), 4);
1214                 }
1215             }
1216             return;
1217         }
1218
1219         /* one-to-one mv scaling */
1220
1221         if(IS_16X16(*mb_type)){
1222             int ref, mv0, mv1;
1223
1224             fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, 0, 1);
1225             if(IS_INTRA(mb_type_col[0])){
1226                 ref=mv0=mv1=0;
1227             }else{
1228                 const int ref0 = l1ref0[0] >= 0 ? map_col_to_list0[0][l1ref0[0]]
1229                                                 : map_col_to_list0[1][l1ref1[0]];
1230                 const int scale = dist_scale_factor[ref0];
1231                 const int16_t *mv_col = l1ref0[0] >= 0 ? l1mv0[0] : l1mv1[0];
1232                 int mv_l0[2];
1233                 mv_l0[0] = (scale * mv_col[0] + 128) >> 8;
1234                 mv_l0[1] = (scale * mv_col[1] + 128) >> 8;
1235                 ref= ref0;
1236                 mv0= pack16to32(mv_l0[0],mv_l0[1]);
1237                 mv1= pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]);
1238             }
1239             fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
1240             fill_rectangle(&h-> mv_cache[0][scan8[0]], 4, 4, 8, mv0, 4);
1241             fill_rectangle(&h-> mv_cache[1][scan8[0]], 4, 4, 8, mv1, 4);
1242         }else{
1243             for(i8=0; i8<4; i8++){
1244                 const int x8 = i8&1;
1245                 const int y8 = i8>>1;
1246                 int ref0, scale;
1247                 const int16_t (*l1mv)[2]= l1mv0;
1248
1249                 if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
1250                     continue;
1251                 h->sub_mb_type[i8] = sub_mb_type;
1252                 fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1);
1253                 if(IS_INTRA(mb_type_col[0])){
1254                     fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, 0, 1);
1255                     fill_rectangle(&h-> mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4);
1256                     fill_rectangle(&h-> mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4);
1257                     continue;
1258                 }
1259
1260                 ref0 = l1ref0[x8 + y8*h->b8_stride];
1261                 if(ref0 >= 0)
1262                     ref0 = map_col_to_list0[0][ref0];
1263                 else{
1264                     ref0 = map_col_to_list0[1][l1ref1[x8 + y8*h->b8_stride]];
1265                     l1mv= l1mv1;
1266                 }
1267                 scale = dist_scale_factor[ref0];
1268
1269                 fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, ref0, 1);
1270                 if(IS_SUB_8X8(sub_mb_type)){
1271                     const int16_t *mv_col = l1mv[x8*3 + y8*3*h->b_stride];
1272                     int mx = (scale * mv_col[0] + 128) >> 8;
1273                     int my = (scale * mv_col[1] + 128) >> 8;
1274                     fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, pack16to32(mx,my), 4);
1275                     fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, pack16to32(mx-mv_col[0],my-mv_col[1]), 4);
1276                 }else
1277                 for(i4=0; i4<4; i4++){
1278                     const int16_t *mv_col = l1mv[x8*2 + (i4&1) + (y8*2 + (i4>>1))*h->b_stride];
1279                     int16_t *mv_l0 = h->mv_cache[0][scan8[i8*4+i4]];
1280                     mv_l0[0] = (scale * mv_col[0] + 128) >> 8;
1281                     mv_l0[1] = (scale * mv_col[1] + 128) >> 8;
1282                     *(uint32_t*)h->mv_cache[1][scan8[i8*4+i4]] =
1283                         pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]);
1284                 }
1285             }
1286         }
1287     }
1288 }
1289
1290 static inline void write_back_motion(H264Context *h, int mb_type){
1291     MpegEncContext * const s = &h->s;
1292     const int b_xy = 4*s->mb_x + 4*s->mb_y*h->b_stride;
1293     const int b8_xy= 2*s->mb_x + 2*s->mb_y*h->b8_stride;
1294     int list;
1295
1296     if(!USES_LIST(mb_type, 0))
1297         fill_rectangle(&s->current_picture.ref_index[0][b8_xy], 2, 2, h->b8_stride, (uint8_t)LIST_NOT_USED, 1);
1298
1299     for(list=0; list<h->list_count; list++){
1300         int y;
1301         if(!USES_LIST(mb_type, list))
1302             continue;
1303
1304         for(y=0; y<4; y++){
1305             *(uint64_t*)s->current_picture.motion_val[list][b_xy + 0 + y*h->b_stride]= *(uint64_t*)h->mv_cache[list][scan8[0]+0 + 8*y];
1306             *(uint64_t*)s->current_picture.motion_val[list][b_xy + 2 + y*h->b_stride]= *(uint64_t*)h->mv_cache[list][scan8[0]+2 + 8*y];
1307         }
1308         if( h->pps.cabac ) {
1309             if(IS_SKIP(mb_type))
1310                 fill_rectangle(h->mvd_table[list][b_xy], 4, 4, h->b_stride, 0, 4);
1311             else
1312             for(y=0; y<4; y++){
1313                 *(uint64_t*)h->mvd_table[list][b_xy + 0 + y*h->b_stride]= *(uint64_t*)h->mvd_cache[list][scan8[0]+0 + 8*y];
1314                 *(uint64_t*)h->mvd_table[list][b_xy + 2 + y*h->b_stride]= *(uint64_t*)h->mvd_cache[list][scan8[0]+2 + 8*y];
1315             }
1316         }
1317
1318         {
1319             int8_t *ref_index = &s->current_picture.ref_index[list][b8_xy];
1320             ref_index[0+0*h->b8_stride]= h->ref_cache[list][scan8[0]];
1321             ref_index[1+0*h->b8_stride]= h->ref_cache[list][scan8[4]];
1322             ref_index[0+1*h->b8_stride]= h->ref_cache[list][scan8[8]];
1323             ref_index[1+1*h->b8_stride]= h->ref_cache[list][scan8[12]];
1324         }
1325     }
1326
1327     if(h->slice_type_nos == FF_B_TYPE && h->pps.cabac){
1328         if(IS_8X8(mb_type)){
1329             uint8_t *direct_table = &h->direct_table[b8_xy];
1330             direct_table[1+0*h->b8_stride] = IS_DIRECT(h->sub_mb_type[1]) ? 1 : 0;
1331             direct_table[0+1*h->b8_stride] = IS_DIRECT(h->sub_mb_type[2]) ? 1 : 0;
1332             direct_table[1+1*h->b8_stride] = IS_DIRECT(h->sub_mb_type[3]) ? 1 : 0;
1333         }
1334     }
1335 }
1336
1337 /**
1338  * Decodes a network abstraction layer unit.
1339  * @param consumed is the number of bytes used as input
1340  * @param length is the length of the array
1341  * @param dst_length is the number of decoded bytes FIXME here or a decode rbsp tailing?
1342  * @returns decoded bytes, might be src+1 if no escapes
1343  */
1344 static const uint8_t *decode_nal(H264Context *h, const uint8_t *src, int *dst_length, int *consumed, int length){
1345     int i, si, di;
1346     uint8_t *dst;
1347     int bufidx;
1348
1349 //    src[0]&0x80;                //forbidden bit
1350     h->nal_ref_idc= src[0]>>5;
1351     h->nal_unit_type= src[0]&0x1F;
1352
1353     src++; length--;
1354 #if 0
1355     for(i=0; i<length; i++)
1356         printf("%2X ", src[i]);
1357 #endif
1358     for(i=0; i+1<length; i+=2){
1359         if(src[i]) continue;
1360         if(i>0 && src[i-1]==0) i--;
1361         if(i+2<length && src[i+1]==0 && src[i+2]<=3){
1362             if(src[i+2]!=3){
1363                 /* startcode, so we must be past the end */
1364                 length=i;
1365             }
1366             break;
1367         }
1368     }
1369
1370     if(i>=length-1){ //no escaped 0
1371         *dst_length= length;
1372         *consumed= length+1; //+1 for the header
1373         return src;
1374     }
1375
1376     bufidx = h->nal_unit_type == NAL_DPC ? 1 : 0; // use second escape buffer for inter data
1377     h->rbsp_buffer[bufidx]= av_fast_realloc(h->rbsp_buffer[bufidx], &h->rbsp_buffer_size[bufidx], length);
1378     dst= h->rbsp_buffer[bufidx];
1379
1380     if (dst == NULL){
1381         return NULL;
1382     }
1383
1384 //printf("decoding esc\n");
1385     si=di=0;
1386     while(si<length){
1387         //remove escapes (very rare 1:2^22)
1388         if(si+2<length && src[si]==0 && src[si+1]==0 && src[si+2]<=3){
1389             if(src[si+2]==3){ //escape
1390                 dst[di++]= 0;
1391                 dst[di++]= 0;
1392                 si+=3;
1393                 continue;
1394             }else //next start code
1395                 break;
1396         }
1397
1398         dst[di++]= src[si++];
1399     }
1400
1401     *dst_length= di;
1402     *consumed= si + 1;//+1 for the header
1403 //FIXME store exact number of bits in the getbitcontext (it is needed for decoding)
1404     return dst;
1405 }
1406
1407 /**
1408  * identifies the exact end of the bitstream
1409  * @return the length of the trailing, or 0 if damaged
1410  */
1411 static int decode_rbsp_trailing(H264Context *h, const uint8_t *src){
1412     int v= *src;
1413     int r;
1414
1415     tprintf(h->s.avctx, "rbsp trailing %X\n", v);
1416
1417     for(r=1; r<9; r++){
1418         if(v&1) return r;
1419         v>>=1;
1420     }
1421     return 0;
1422 }
1423
1424 /**
1425  * IDCT transforms the 16 dc values and dequantizes them.
1426  * @param qp quantization parameter
1427  */
1428 static void h264_luma_dc_dequant_idct_c(DCTELEM *block, int qp, int qmul){
1429 #define stride 16
1430     int i;
1431     int temp[16]; //FIXME check if this is a good idea
1432     static const int x_offset[4]={0, 1*stride, 4* stride,  5*stride};
1433     static const int y_offset[4]={0, 2*stride, 8* stride, 10*stride};
1434
1435 //memset(block, 64, 2*256);
1436 //return;
1437     for(i=0; i<4; i++){
1438         const int offset= y_offset[i];
1439         const int z0= block[offset+stride*0] + block[offset+stride*4];
1440         const int z1= block[offset+stride*0] - block[offset+stride*4];
1441         const int z2= block[offset+stride*1] - block[offset+stride*5];
1442         const int z3= block[offset+stride*1] + block[offset+stride*5];
1443
1444         temp[4*i+0]= z0+z3;
1445         temp[4*i+1]= z1+z2;
1446         temp[4*i+2]= z1-z2;
1447         temp[4*i+3]= z0-z3;
1448     }
1449
1450     for(i=0; i<4; i++){
1451         const int offset= x_offset[i];
1452         const int z0= temp[4*0+i] + temp[4*2+i];
1453         const int z1= temp[4*0+i] - temp[4*2+i];
1454         const int z2= temp[4*1+i] - temp[4*3+i];
1455         const int z3= temp[4*1+i] + temp[4*3+i];
1456
1457         block[stride*0 +offset]= ((((z0 + z3)*qmul + 128 ) >> 8)); //FIXME think about merging this into decode_residual
1458         block[stride*2 +offset]= ((((z1 + z2)*qmul + 128 ) >> 8));
1459         block[stride*8 +offset]= ((((z1 - z2)*qmul + 128 ) >> 8));
1460         block[stride*10+offset]= ((((z0 - z3)*qmul + 128 ) >> 8));
1461     }
1462 }
1463
1464 #if 0
1465 /**
1466  * DCT transforms the 16 dc values.
1467  * @param qp quantization parameter ??? FIXME
1468  */
1469 static void h264_luma_dc_dct_c(DCTELEM *block/*, int qp*/){
1470 //    const int qmul= dequant_coeff[qp][0];
1471     int i;
1472     int temp[16]; //FIXME check if this is a good idea
1473     static const int x_offset[4]={0, 1*stride, 4* stride,  5*stride};
1474     static const int y_offset[4]={0, 2*stride, 8* stride, 10*stride};
1475
1476     for(i=0; i<4; i++){
1477         const int offset= y_offset[i];
1478         const int z0= block[offset+stride*0] + block[offset+stride*4];
1479         const int z1= block[offset+stride*0] - block[offset+stride*4];
1480         const int z2= block[offset+stride*1] - block[offset+stride*5];
1481         const int z3= block[offset+stride*1] + block[offset+stride*5];
1482
1483         temp[4*i+0]= z0+z3;
1484         temp[4*i+1]= z1+z2;
1485         temp[4*i+2]= z1-z2;
1486         temp[4*i+3]= z0-z3;
1487     }
1488
1489     for(i=0; i<4; i++){
1490         const int offset= x_offset[i];
1491         const int z0= temp[4*0+i] + temp[4*2+i];
1492         const int z1= temp[4*0+i] - temp[4*2+i];
1493         const int z2= temp[4*1+i] - temp[4*3+i];
1494         const int z3= temp[4*1+i] + temp[4*3+i];
1495
1496         block[stride*0 +offset]= (z0 + z3)>>1;
1497         block[stride*2 +offset]= (z1 + z2)>>1;
1498         block[stride*8 +offset]= (z1 - z2)>>1;
1499         block[stride*10+offset]= (z0 - z3)>>1;
1500     }
1501 }
1502 #endif
1503
1504 #undef xStride
1505 #undef stride
1506
1507 static void chroma_dc_dequant_idct_c(DCTELEM *block, int qp, int qmul){
1508     const int stride= 16*2;
1509     const int xStride= 16;
1510     int a,b,c,d,e;
1511
1512     a= block[stride*0 + xStride*0];
1513     b= block[stride*0 + xStride*1];
1514     c= block[stride*1 + xStride*0];
1515     d= block[stride*1 + xStride*1];
1516
1517     e= a-b;
1518     a= a+b;
1519     b= c-d;
1520     c= c+d;
1521
1522     block[stride*0 + xStride*0]= ((a+c)*qmul) >> 7;
1523     block[stride*0 + xStride*1]= ((e+b)*qmul) >> 7;
1524     block[stride*1 + xStride*0]= ((a-c)*qmul) >> 7;
1525     block[stride*1 + xStride*1]= ((e-b)*qmul) >> 7;
1526 }
1527
1528 #if 0
1529 static void chroma_dc_dct_c(DCTELEM *block){
1530     const int stride= 16*2;
1531     const int xStride= 16;
1532     int a,b,c,d,e;
1533
1534     a= block[stride*0 + xStride*0];
1535     b= block[stride*0 + xStride*1];
1536     c= block[stride*1 + xStride*0];
1537     d= block[stride*1 + xStride*1];
1538
1539     e= a-b;
1540     a= a+b;
1541     b= c-d;
1542     c= c+d;
1543
1544     block[stride*0 + xStride*0]= (a+c);
1545     block[stride*0 + xStride*1]= (e+b);
1546     block[stride*1 + xStride*0]= (a-c);
1547     block[stride*1 + xStride*1]= (e-b);
1548 }
1549 #endif
1550
1551 /**
1552  * gets the chroma qp.
1553  */
1554 static inline int get_chroma_qp(H264Context *h, int t, int qscale){
1555     return h->pps.chroma_qp_table[t][qscale];
1556 }
1557
1558 //FIXME need to check that this does not overflow signed 32 bit for low qp, I am not sure, it's very close
1559 //FIXME check that gcc inlines this (and optimizes intra & separate_dc stuff away)
1560 static inline int quantize_c(DCTELEM *block, uint8_t *scantable, int qscale, int intra, int separate_dc){
1561     int i;
1562     const int * const quant_table= quant_coeff[qscale];
1563     const int bias= intra ? (1<<QUANT_SHIFT)/3 : (1<<QUANT_SHIFT)/6;
1564     const unsigned int threshold1= (1<<QUANT_SHIFT) - bias - 1;
1565     const unsigned int threshold2= (threshold1<<1);
1566     int last_non_zero;
1567
1568     if(separate_dc){
1569         if(qscale<=18){
1570             //avoid overflows
1571             const int dc_bias= intra ? (1<<(QUANT_SHIFT-2))/3 : (1<<(QUANT_SHIFT-2))/6;
1572             const unsigned int dc_threshold1= (1<<(QUANT_SHIFT-2)) - dc_bias - 1;
1573             const unsigned int dc_threshold2= (dc_threshold1<<1);
1574
1575             int level= block[0]*quant_coeff[qscale+18][0];
1576             if(((unsigned)(level+dc_threshold1))>dc_threshold2){
1577                 if(level>0){
1578                     level= (dc_bias + level)>>(QUANT_SHIFT-2);
1579                     block[0]= level;
1580                 }else{
1581                     level= (dc_bias - level)>>(QUANT_SHIFT-2);
1582                     block[0]= -level;
1583                 }
1584 //                last_non_zero = i;
1585             }else{
1586                 block[0]=0;
1587             }
1588         }else{
1589             const int dc_bias= intra ? (1<<(QUANT_SHIFT+1))/3 : (1<<(QUANT_SHIFT+1))/6;
1590             const unsigned int dc_threshold1= (1<<(QUANT_SHIFT+1)) - dc_bias - 1;
1591             const unsigned int dc_threshold2= (dc_threshold1<<1);
1592
1593             int level= block[0]*quant_table[0];
1594             if(((unsigned)(level+dc_threshold1))>dc_threshold2){
1595                 if(level>0){
1596                     level= (dc_bias + level)>>(QUANT_SHIFT+1);
1597                     block[0]= level;
1598                 }else{
1599                     level= (dc_bias - level)>>(QUANT_SHIFT+1);
1600                     block[0]= -level;
1601                 }
1602 //                last_non_zero = i;
1603             }else{
1604                 block[0]=0;
1605             }
1606         }
1607         last_non_zero= 0;
1608         i=1;
1609     }else{
1610         last_non_zero= -1;
1611         i=0;
1612     }
1613
1614     for(; i<16; i++){
1615         const int j= scantable[i];
1616         int level= block[j]*quant_table[j];
1617
1618 //        if(   bias+level >= (1<<(QMAT_SHIFT - 3))
1619 //           || bias-level >= (1<<(QMAT_SHIFT - 3))){
1620         if(((unsigned)(level+threshold1))>threshold2){
1621             if(level>0){
1622                 level= (bias + level)>>QUANT_SHIFT;
1623                 block[j]= level;
1624             }else{
1625                 level= (bias - level)>>QUANT_SHIFT;
1626                 block[j]= -level;
1627             }
1628             last_non_zero = i;
1629         }else{
1630             block[j]=0;
1631         }
1632     }
1633
1634     return last_non_zero;
1635 }
1636
1637 static inline void mc_dir_part(H264Context *h, Picture *pic, int n, int square, int chroma_height, int delta, int list,
1638                            uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1639                            int src_x_offset, int src_y_offset,
1640                            qpel_mc_func *qpix_op, h264_chroma_mc_func chroma_op){
1641     MpegEncContext * const s = &h->s;
1642     const int mx= h->mv_cache[list][ scan8[n] ][0] + src_x_offset*8;
1643     int my=       h->mv_cache[list][ scan8[n] ][1] + src_y_offset*8;
1644     const int luma_xy= (mx&3) + ((my&3)<<2);
1645     uint8_t * src_y = pic->data[0] + (mx>>2) + (my>>2)*h->mb_linesize;
1646     uint8_t * src_cb, * src_cr;
1647     int extra_width= h->emu_edge_width;
1648     int extra_height= h->emu_edge_height;
1649     int emu=0;
1650     const int full_mx= mx>>2;
1651     const int full_my= my>>2;
1652     const int pic_width  = 16*s->mb_width;
1653     const int pic_height = 16*s->mb_height >> MB_FIELD;
1654
1655     if(!pic->data[0]) //FIXME this is unacceptable, some sensible error concealment must be done for missing reference frames
1656         return;
1657
1658     if(mx&7) extra_width -= 3;
1659     if(my&7) extra_height -= 3;
1660
1661     if(   full_mx < 0-extra_width
1662        || full_my < 0-extra_height
1663        || full_mx + 16/*FIXME*/ > pic_width + extra_width
1664        || full_my + 16/*FIXME*/ > pic_height + extra_height){
1665         ff_emulated_edge_mc(s->edge_emu_buffer, src_y - 2 - 2*h->mb_linesize, h->mb_linesize, 16+5, 16+5/*FIXME*/, full_mx-2, full_my-2, pic_width, pic_height);
1666             src_y= s->edge_emu_buffer + 2 + 2*h->mb_linesize;
1667         emu=1;
1668     }
1669
1670     qpix_op[luma_xy](dest_y, src_y, h->mb_linesize); //FIXME try variable height perhaps?
1671     if(!square){
1672         qpix_op[luma_xy](dest_y + delta, src_y + delta, h->mb_linesize);
1673     }
1674
1675     if(ENABLE_GRAY && s->flags&CODEC_FLAG_GRAY) return;
1676
1677     if(MB_FIELD){
1678         // chroma offset when predicting from a field of opposite parity
1679         my += 2 * ((s->mb_y & 1) - (pic->reference - 1));
1680         emu |= (my>>3) < 0 || (my>>3) + 8 >= (pic_height>>1);
1681     }
1682     src_cb= pic->data[1] + (mx>>3) + (my>>3)*h->mb_uvlinesize;
1683     src_cr= pic->data[2] + (mx>>3) + (my>>3)*h->mb_uvlinesize;
1684
1685     if(emu){
1686         ff_emulated_edge_mc(s->edge_emu_buffer, src_cb, h->mb_uvlinesize, 9, 9/*FIXME*/, (mx>>3), (my>>3), pic_width>>1, pic_height>>1);
1687             src_cb= s->edge_emu_buffer;
1688     }
1689     chroma_op(dest_cb, src_cb, h->mb_uvlinesize, chroma_height, mx&7, my&7);
1690
1691     if(emu){
1692         ff_emulated_edge_mc(s->edge_emu_buffer, src_cr, h->mb_uvlinesize, 9, 9/*FIXME*/, (mx>>3), (my>>3), pic_width>>1, pic_height>>1);
1693             src_cr= s->edge_emu_buffer;
1694     }
1695     chroma_op(dest_cr, src_cr, h->mb_uvlinesize, chroma_height, mx&7, my&7);
1696 }
1697
1698 static inline void mc_part_std(H264Context *h, int n, int square, int chroma_height, int delta,
1699                            uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1700                            int x_offset, int y_offset,
1701                            qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put,
1702                            qpel_mc_func *qpix_avg, h264_chroma_mc_func chroma_avg,
1703                            int list0, int list1){
1704     MpegEncContext * const s = &h->s;
1705     qpel_mc_func *qpix_op=  qpix_put;
1706     h264_chroma_mc_func chroma_op= chroma_put;
1707
1708     dest_y  += 2*x_offset + 2*y_offset*h->  mb_linesize;
1709     dest_cb +=   x_offset +   y_offset*h->mb_uvlinesize;
1710     dest_cr +=   x_offset +   y_offset*h->mb_uvlinesize;
1711     x_offset += 8*s->mb_x;
1712     y_offset += 8*(s->mb_y >> MB_FIELD);
1713
1714     if(list0){
1715         Picture *ref= &h->ref_list[0][ h->ref_cache[0][ scan8[n] ] ];
1716         mc_dir_part(h, ref, n, square, chroma_height, delta, 0,
1717                            dest_y, dest_cb, dest_cr, x_offset, y_offset,
1718                            qpix_op, chroma_op);
1719
1720         qpix_op=  qpix_avg;
1721         chroma_op= chroma_avg;
1722     }
1723
1724     if(list1){
1725         Picture *ref= &h->ref_list[1][ h->ref_cache[1][ scan8[n] ] ];
1726         mc_dir_part(h, ref, n, square, chroma_height, delta, 1,
1727                            dest_y, dest_cb, dest_cr, x_offset, y_offset,
1728                            qpix_op, chroma_op);
1729     }
1730 }
1731
1732 static inline void mc_part_weighted(H264Context *h, int n, int square, int chroma_height, int delta,
1733                            uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1734                            int x_offset, int y_offset,
1735                            qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put,
1736                            h264_weight_func luma_weight_op, h264_weight_func chroma_weight_op,
1737                            h264_biweight_func luma_weight_avg, h264_biweight_func chroma_weight_avg,
1738                            int list0, int list1){
1739     MpegEncContext * const s = &h->s;
1740
1741     dest_y  += 2*x_offset + 2*y_offset*h->  mb_linesize;
1742     dest_cb +=   x_offset +   y_offset*h->mb_uvlinesize;
1743     dest_cr +=   x_offset +   y_offset*h->mb_uvlinesize;
1744     x_offset += 8*s->mb_x;
1745     y_offset += 8*(s->mb_y >> MB_FIELD);
1746
1747     if(list0 && list1){
1748         /* don't optimize for luma-only case, since B-frames usually
1749          * use implicit weights => chroma too. */
1750         uint8_t *tmp_cb = s->obmc_scratchpad;
1751         uint8_t *tmp_cr = s->obmc_scratchpad + 8;
1752         uint8_t *tmp_y  = s->obmc_scratchpad + 8*h->mb_uvlinesize;
1753         int refn0 = h->ref_cache[0][ scan8[n] ];
1754         int refn1 = h->ref_cache[1][ scan8[n] ];
1755
1756         mc_dir_part(h, &h->ref_list[0][refn0], n, square, chroma_height, delta, 0,
1757                     dest_y, dest_cb, dest_cr,
1758                     x_offset, y_offset, qpix_put, chroma_put);
1759         mc_dir_part(h, &h->ref_list[1][refn1], n, square, chroma_height, delta, 1,
1760                     tmp_y, tmp_cb, tmp_cr,
1761                     x_offset, y_offset, qpix_put, chroma_put);
1762
1763         if(h->use_weight == 2){
1764             int weight0 = h->implicit_weight[refn0][refn1];
1765             int weight1 = 64 - weight0;
1766             luma_weight_avg(  dest_y,  tmp_y,  h->  mb_linesize, 5, weight0, weight1, 0);
1767             chroma_weight_avg(dest_cb, tmp_cb, h->mb_uvlinesize, 5, weight0, weight1, 0);
1768             chroma_weight_avg(dest_cr, tmp_cr, h->mb_uvlinesize, 5, weight0, weight1, 0);
1769         }else{
1770             luma_weight_avg(dest_y, tmp_y, h->mb_linesize, h->luma_log2_weight_denom,
1771                             h->luma_weight[0][refn0], h->luma_weight[1][refn1],
1772                             h->luma_offset[0][refn0] + h->luma_offset[1][refn1]);
1773             chroma_weight_avg(dest_cb, tmp_cb, h->mb_uvlinesize, h->chroma_log2_weight_denom,
1774                             h->chroma_weight[0][refn0][0], h->chroma_weight[1][refn1][0],
1775                             h->chroma_offset[0][refn0][0] + h->chroma_offset[1][refn1][0]);
1776             chroma_weight_avg(dest_cr, tmp_cr, h->mb_uvlinesize, h->chroma_log2_weight_denom,
1777                             h->chroma_weight[0][refn0][1], h->chroma_weight[1][refn1][1],
1778                             h->chroma_offset[0][refn0][1] + h->chroma_offset[1][refn1][1]);
1779         }
1780     }else{
1781         int list = list1 ? 1 : 0;
1782         int refn = h->ref_cache[list][ scan8[n] ];
1783         Picture *ref= &h->ref_list[list][refn];
1784         mc_dir_part(h, ref, n, square, chroma_height, delta, list,
1785                     dest_y, dest_cb, dest_cr, x_offset, y_offset,
1786                     qpix_put, chroma_put);
1787
1788         luma_weight_op(dest_y, h->mb_linesize, h->luma_log2_weight_denom,
1789                        h->luma_weight[list][refn], h->luma_offset[list][refn]);
1790         if(h->use_weight_chroma){
1791             chroma_weight_op(dest_cb, h->mb_uvlinesize, h->chroma_log2_weight_denom,
1792                              h->chroma_weight[list][refn][0], h->chroma_offset[list][refn][0]);
1793             chroma_weight_op(dest_cr, h->mb_uvlinesize, h->chroma_log2_weight_denom,
1794                              h->chroma_weight[list][refn][1], h->chroma_offset[list][refn][1]);
1795         }
1796     }
1797 }
1798
1799 static inline void mc_part(H264Context *h, int n, int square, int chroma_height, int delta,
1800                            uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1801                            int x_offset, int y_offset,
1802                            qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put,
1803                            qpel_mc_func *qpix_avg, h264_chroma_mc_func chroma_avg,
1804                            h264_weight_func *weight_op, h264_biweight_func *weight_avg,
1805                            int list0, int list1){
1806     if((h->use_weight==2 && list0 && list1
1807         && (h->implicit_weight[ h->ref_cache[0][scan8[n]] ][ h->ref_cache[1][scan8[n]] ] != 32))
1808        || h->use_weight==1)
1809         mc_part_weighted(h, n, square, chroma_height, delta, dest_y, dest_cb, dest_cr,
1810                          x_offset, y_offset, qpix_put, chroma_put,
1811                          weight_op[0], weight_op[3], weight_avg[0], weight_avg[3], list0, list1);
1812     else
1813         mc_part_std(h, n, square, chroma_height, delta, dest_y, dest_cb, dest_cr,
1814                     x_offset, y_offset, qpix_put, chroma_put, qpix_avg, chroma_avg, list0, list1);
1815 }
1816
1817 static inline void prefetch_motion(H264Context *h, int list){
1818     /* fetch pixels for estimated mv 4 macroblocks ahead
1819      * optimized for 64byte cache lines */
1820     MpegEncContext * const s = &h->s;
1821     const int refn = h->ref_cache[list][scan8[0]];
1822     if(refn >= 0){
1823         const int mx= (h->mv_cache[list][scan8[0]][0]>>2) + 16*s->mb_x + 8;
1824         const int my= (h->mv_cache[list][scan8[0]][1]>>2) + 16*s->mb_y;
1825         uint8_t **src= h->ref_list[list][refn].data;
1826         int off= mx + (my + (s->mb_x&3)*4)*h->mb_linesize + 64;
1827         s->dsp.prefetch(src[0]+off, s->linesize, 4);
1828         off= (mx>>1) + ((my>>1) + (s->mb_x&7))*s->uvlinesize + 64;
1829         s->dsp.prefetch(src[1]+off, src[2]-src[1], 2);
1830     }
1831 }
1832
1833 static void hl_motion(H264Context *h, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1834                       qpel_mc_func (*qpix_put)[16], h264_chroma_mc_func (*chroma_put),
1835                       qpel_mc_func (*qpix_avg)[16], h264_chroma_mc_func (*chroma_avg),
1836                       h264_weight_func *weight_op, h264_biweight_func *weight_avg){
1837     MpegEncContext * const s = &h->s;
1838     const int mb_xy= h->mb_xy;
1839     const int mb_type= s->current_picture.mb_type[mb_xy];
1840
1841     assert(IS_INTER(mb_type));
1842
1843     prefetch_motion(h, 0);
1844
1845     if(IS_16X16(mb_type)){
1846         mc_part(h, 0, 1, 8, 0, dest_y, dest_cb, dest_cr, 0, 0,
1847                 qpix_put[0], chroma_put[0], qpix_avg[0], chroma_avg[0],
1848                 &weight_op[0], &weight_avg[0],
1849                 IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1));
1850     }else if(IS_16X8(mb_type)){
1851         mc_part(h, 0, 0, 4, 8, dest_y, dest_cb, dest_cr, 0, 0,
1852                 qpix_put[1], chroma_put[0], qpix_avg[1], chroma_avg[0],
1853                 &weight_op[1], &weight_avg[1],
1854                 IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1));
1855         mc_part(h, 8, 0, 4, 8, dest_y, dest_cb, dest_cr, 0, 4,
1856                 qpix_put[1], chroma_put[0], qpix_avg[1], chroma_avg[0],
1857                 &weight_op[1], &weight_avg[1],
1858                 IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1));
1859     }else if(IS_8X16(mb_type)){
1860         mc_part(h, 0, 0, 8, 8*h->mb_linesize, dest_y, dest_cb, dest_cr, 0, 0,
1861                 qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
1862                 &weight_op[2], &weight_avg[2],
1863                 IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1));
1864         mc_part(h, 4, 0, 8, 8*h->mb_linesize, dest_y, dest_cb, dest_cr, 4, 0,
1865                 qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
1866                 &weight_op[2], &weight_avg[2],
1867                 IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1));
1868     }else{
1869         int i;
1870
1871         assert(IS_8X8(mb_type));
1872
1873         for(i=0; i<4; i++){
1874             const int sub_mb_type= h->sub_mb_type[i];
1875             const int n= 4*i;
1876             int x_offset= (i&1)<<2;
1877             int y_offset= (i&2)<<1;
1878
1879             if(IS_SUB_8X8(sub_mb_type)){
1880                 mc_part(h, n, 1, 4, 0, dest_y, dest_cb, dest_cr, x_offset, y_offset,
1881                     qpix_put[1], chroma_put[1], qpix_avg[1], chroma_avg[1],
1882                     &weight_op[3], &weight_avg[3],
1883                     IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
1884             }else if(IS_SUB_8X4(sub_mb_type)){
1885                 mc_part(h, n  , 0, 2, 4, dest_y, dest_cb, dest_cr, x_offset, y_offset,
1886                     qpix_put[2], chroma_put[1], qpix_avg[2], chroma_avg[1],
1887                     &weight_op[4], &weight_avg[4],
1888                     IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
1889                 mc_part(h, n+2, 0, 2, 4, dest_y, dest_cb, dest_cr, x_offset, y_offset+2,
1890                     qpix_put[2], chroma_put[1], qpix_avg[2], chroma_avg[1],
1891                     &weight_op[4], &weight_avg[4],
1892                     IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
1893             }else if(IS_SUB_4X8(sub_mb_type)){
1894                 mc_part(h, n  , 0, 4, 4*h->mb_linesize, dest_y, dest_cb, dest_cr, x_offset, y_offset,
1895                     qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
1896                     &weight_op[5], &weight_avg[5],
1897                     IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
1898                 mc_part(h, n+1, 0, 4, 4*h->mb_linesize, dest_y, dest_cb, dest_cr, x_offset+2, y_offset,
1899                     qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
1900                     &weight_op[5], &weight_avg[5],
1901                     IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
1902             }else{
1903                 int j;
1904                 assert(IS_SUB_4X4(sub_mb_type));
1905                 for(j=0; j<4; j++){
1906                     int sub_x_offset= x_offset + 2*(j&1);
1907                     int sub_y_offset= y_offset +   (j&2);
1908                     mc_part(h, n+j, 1, 2, 0, dest_y, dest_cb, dest_cr, sub_x_offset, sub_y_offset,
1909                         qpix_put[2], chroma_put[2], qpix_avg[2], chroma_avg[2],
1910                         &weight_op[6], &weight_avg[6],
1911                         IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1));
1912                 }
1913             }
1914         }
1915     }
1916
1917     prefetch_motion(h, 1);
1918 }
1919
1920 static av_cold void decode_init_vlc(void){
1921     static int done = 0;
1922
1923     if (!done) {
1924         int i;
1925         int offset;
1926         done = 1;
1927
1928         chroma_dc_coeff_token_vlc.table = chroma_dc_coeff_token_vlc_table;
1929         chroma_dc_coeff_token_vlc.table_allocated = chroma_dc_coeff_token_vlc_table_size;
1930         init_vlc(&chroma_dc_coeff_token_vlc, CHROMA_DC_COEFF_TOKEN_VLC_BITS, 4*5,
1931                  &chroma_dc_coeff_token_len [0], 1, 1,
1932                  &chroma_dc_coeff_token_bits[0], 1, 1,
1933                  INIT_VLC_USE_NEW_STATIC);
1934
1935         offset = 0;
1936         for(i=0; i<4; i++){
1937             coeff_token_vlc[i].table = coeff_token_vlc_tables+offset;
1938             coeff_token_vlc[i].table_allocated = coeff_token_vlc_tables_size[i];
1939             init_vlc(&coeff_token_vlc[i], COEFF_TOKEN_VLC_BITS, 4*17,
1940                      &coeff_token_len [i][0], 1, 1,
1941                      &coeff_token_bits[i][0], 1, 1,
1942                      INIT_VLC_USE_NEW_STATIC);
1943             offset += coeff_token_vlc_tables_size[i];
1944         }
1945         /*
1946          * This is a one time safety check to make sure that
1947          * the packed static coeff_token_vlc table sizes
1948          * were initialized correctly.
1949          */
1950         assert(offset == sizeof(coeff_token_vlc_tables)/(sizeof(VLC_TYPE)*2));
1951
1952         for(i=0; i<3; i++){
1953             chroma_dc_total_zeros_vlc[i].table = chroma_dc_total_zeros_vlc_tables[i];
1954             chroma_dc_total_zeros_vlc[i].table_allocated = chroma_dc_total_zeros_vlc_tables_size;
1955             init_vlc(&chroma_dc_total_zeros_vlc[i],
1956                      CHROMA_DC_TOTAL_ZEROS_VLC_BITS, 4,
1957                      &chroma_dc_total_zeros_len [i][0], 1, 1,
1958                      &chroma_dc_total_zeros_bits[i][0], 1, 1,
1959                      INIT_VLC_USE_NEW_STATIC);
1960         }
1961         for(i=0; i<15; i++){
1962             total_zeros_vlc[i].table = total_zeros_vlc_tables[i];
1963             total_zeros_vlc[i].table_allocated = total_zeros_vlc_tables_size;
1964             init_vlc(&total_zeros_vlc[i],
1965                      TOTAL_ZEROS_VLC_BITS, 16,
1966                      &total_zeros_len [i][0], 1, 1,
1967                      &total_zeros_bits[i][0], 1, 1,
1968                      INIT_VLC_USE_NEW_STATIC);
1969         }
1970
1971         for(i=0; i<6; i++){
1972             run_vlc[i].table = run_vlc_tables[i];
1973             run_vlc[i].table_allocated = run_vlc_tables_size;
1974             init_vlc(&run_vlc[i],
1975                      RUN_VLC_BITS, 7,
1976                      &run_len [i][0], 1, 1,
1977                      &run_bits[i][0], 1, 1,
1978                      INIT_VLC_USE_NEW_STATIC);
1979         }
1980         run7_vlc.table = run7_vlc_table,
1981         run7_vlc.table_allocated = run7_vlc_table_size;
1982         init_vlc(&run7_vlc, RUN7_VLC_BITS, 16,
1983                  &run_len [6][0], 1, 1,
1984                  &run_bits[6][0], 1, 1,
1985                  INIT_VLC_USE_NEW_STATIC);
1986     }
1987 }
1988
1989 static void free_tables(H264Context *h){
1990     int i;
1991     H264Context *hx;
1992     av_freep(&h->intra4x4_pred_mode);
1993     av_freep(&h->chroma_pred_mode_table);
1994     av_freep(&h->cbp_table);
1995     av_freep(&h->mvd_table[0]);
1996     av_freep(&h->mvd_table[1]);
1997     av_freep(&h->direct_table);
1998     av_freep(&h->non_zero_count);
1999     av_freep(&h->slice_table_base);
2000     h->slice_table= NULL;
2001
2002     av_freep(&h->mb2b_xy);
2003     av_freep(&h->mb2b8_xy);
2004
2005     for(i = 0; i < MAX_SPS_COUNT; i++)
2006         av_freep(h->sps_buffers + i);
2007
2008     for(i = 0; i < MAX_PPS_COUNT; i++)
2009         av_freep(h->pps_buffers + i);
2010
2011     for(i = 0; i < h->s.avctx->thread_count; i++) {
2012         hx = h->thread_context[i];
2013         if(!hx) continue;
2014         av_freep(&hx->top_borders[1]);
2015         av_freep(&hx->top_borders[0]);
2016         av_freep(&hx->s.obmc_scratchpad);
2017     }
2018 }
2019
2020 static void init_dequant8_coeff_table(H264Context *h){
2021     int i,q,x;
2022     const int transpose = (h->s.dsp.h264_idct8_add != ff_h264_idct8_add_c); //FIXME ugly
2023     h->dequant8_coeff[0] = h->dequant8_buffer[0];
2024     h->dequant8_coeff[1] = h->dequant8_buffer[1];
2025
2026     for(i=0; i<2; i++ ){
2027         if(i && !memcmp(h->pps.scaling_matrix8[0], h->pps.scaling_matrix8[1], 64*sizeof(uint8_t))){
2028             h->dequant8_coeff[1] = h->dequant8_buffer[0];
2029             break;
2030         }
2031
2032         for(q=0; q<52; q++){
2033             int shift = ff_div6[q];
2034             int idx = ff_rem6[q];
2035             for(x=0; x<64; x++)
2036                 h->dequant8_coeff[i][q][transpose ? (x>>3)|((x&7)<<3) : x] =
2037                     ((uint32_t)dequant8_coeff_init[idx][ dequant8_coeff_init_scan[((x>>1)&12) | (x&3)] ] *
2038                     h->pps.scaling_matrix8[i][x]) << shift;
2039         }
2040     }
2041 }
2042
2043 static void init_dequant4_coeff_table(H264Context *h){
2044     int i,j,q,x;
2045     const int transpose = (h->s.dsp.h264_idct_add != ff_h264_idct_add_c); //FIXME ugly
2046     for(i=0; i<6; i++ ){
2047         h->dequant4_coeff[i] = h->dequant4_buffer[i];
2048         for(j=0; j<i; j++){
2049             if(!memcmp(h->pps.scaling_matrix4[j], h->pps.scaling_matrix4[i], 16*sizeof(uint8_t))){
2050                 h->dequant4_coeff[i] = h->dequant4_buffer[j];
2051                 break;
2052             }
2053         }
2054         if(j<i)
2055             continue;
2056
2057         for(q=0; q<52; q++){
2058             int shift = ff_div6[q] + 2;
2059             int idx = ff_rem6[q];
2060             for(x=0; x<16; x++)
2061                 h->dequant4_coeff[i][q][transpose ? (x>>2)|((x<<2)&0xF) : x] =
2062                     ((uint32_t)dequant4_coeff_init[idx][(x&1) + ((x>>2)&1)] *
2063                     h->pps.scaling_matrix4[i][x]) << shift;
2064         }
2065     }
2066 }
2067
2068 static void init_dequant_tables(H264Context *h){
2069     int i,x;
2070     init_dequant4_coeff_table(h);
2071     if(h->pps.transform_8x8_mode)
2072         init_dequant8_coeff_table(h);
2073     if(h->sps.transform_bypass){
2074         for(i=0; i<6; i++)
2075             for(x=0; x<16; x++)
2076                 h->dequant4_coeff[i][0][x] = 1<<6;
2077         if(h->pps.transform_8x8_mode)
2078             for(i=0; i<2; i++)
2079                 for(x=0; x<64; x++)
2080                     h->dequant8_coeff[i][0][x] = 1<<6;
2081     }
2082 }
2083
2084
2085 /**
2086  * allocates tables.
2087  * needs width/height
2088  */
2089 static int alloc_tables(H264Context *h){
2090     MpegEncContext * const s = &h->s;
2091     const int big_mb_num= s->mb_stride * (s->mb_height+1);
2092     int x,y;
2093
2094     CHECKED_ALLOCZ(h->intra4x4_pred_mode, big_mb_num * 8  * sizeof(uint8_t))
2095
2096     CHECKED_ALLOCZ(h->non_zero_count    , big_mb_num * 16 * sizeof(uint8_t))
2097     CHECKED_ALLOCZ(h->slice_table_base  , (big_mb_num+s->mb_stride) * sizeof(uint8_t))
2098     CHECKED_ALLOCZ(h->cbp_table, big_mb_num * sizeof(uint16_t))
2099
2100     CHECKED_ALLOCZ(h->chroma_pred_mode_table, big_mb_num * sizeof(uint8_t))
2101     CHECKED_ALLOCZ(h->mvd_table[0], 32*big_mb_num * sizeof(uint16_t));
2102     CHECKED_ALLOCZ(h->mvd_table[1], 32*big_mb_num * sizeof(uint16_t));
2103     CHECKED_ALLOCZ(h->direct_table, 32*big_mb_num * sizeof(uint8_t));
2104
2105     memset(h->slice_table_base, -1, (big_mb_num+s->mb_stride)  * sizeof(uint8_t));
2106     h->slice_table= h->slice_table_base + s->mb_stride*2 + 1;
2107
2108     CHECKED_ALLOCZ(h->mb2b_xy  , big_mb_num * sizeof(uint32_t));
2109     CHECKED_ALLOCZ(h->mb2b8_xy , big_mb_num * sizeof(uint32_t));
2110     for(y=0; y<s->mb_height; y++){
2111         for(x=0; x<s->mb_width; x++){
2112             const int mb_xy= x + y*s->mb_stride;
2113             const int b_xy = 4*x + 4*y*h->b_stride;
2114             const int b8_xy= 2*x + 2*y*h->b8_stride;
2115
2116             h->mb2b_xy [mb_xy]= b_xy;
2117             h->mb2b8_xy[mb_xy]= b8_xy;
2118         }
2119     }
2120
2121     s->obmc_scratchpad = NULL;
2122
2123     if(!h->dequant4_coeff[0])
2124         init_dequant_tables(h);
2125
2126     return 0;
2127 fail:
2128     free_tables(h);
2129     return -1;
2130 }
2131
2132 /**
2133  * Mimic alloc_tables(), but for every context thread.
2134  */
2135 static void clone_tables(H264Context *dst, H264Context *src){
2136     dst->intra4x4_pred_mode       = src->intra4x4_pred_mode;
2137     dst->non_zero_count           = src->non_zero_count;
2138     dst->slice_table              = src->slice_table;
2139     dst->cbp_table                = src->cbp_table;
2140     dst->mb2b_xy                  = src->mb2b_xy;
2141     dst->mb2b8_xy                 = src->mb2b8_xy;
2142     dst->chroma_pred_mode_table   = src->chroma_pred_mode_table;
2143     dst->mvd_table[0]             = src->mvd_table[0];
2144     dst->mvd_table[1]             = src->mvd_table[1];
2145     dst->direct_table             = src->direct_table;
2146
2147     dst->s.obmc_scratchpad = NULL;
2148     ff_h264_pred_init(&dst->hpc, src->s.codec_id);
2149 }
2150
2151 /**
2152  * Init context
2153  * Allocate buffers which are not shared amongst multiple threads.
2154  */
2155 static int context_init(H264Context *h){
2156     CHECKED_ALLOCZ(h->top_borders[0], h->s.mb_width * (16+8+8) * sizeof(uint8_t))
2157     CHECKED_ALLOCZ(h->top_borders[1], h->s.mb_width * (16+8+8) * sizeof(uint8_t))
2158
2159     return 0;
2160 fail:
2161     return -1; // free_tables will clean up for us
2162 }
2163
2164 static av_cold void common_init(H264Context *h){
2165     MpegEncContext * const s = &h->s;
2166
2167     s->width = s->avctx->width;
2168     s->height = s->avctx->height;
2169     s->codec_id= s->avctx->codec->id;
2170
2171     ff_h264_pred_init(&h->hpc, s->codec_id);
2172
2173     h->dequant_coeff_pps= -1;
2174     s->unrestricted_mv=1;
2175     s->decode=1; //FIXME
2176
2177     memset(h->pps.scaling_matrix4, 16, 6*16*sizeof(uint8_t));
2178     memset(h->pps.scaling_matrix8, 16, 2*64*sizeof(uint8_t));
2179 }
2180
2181 static av_cold int decode_init(AVCodecContext *avctx){
2182     H264Context *h= avctx->priv_data;
2183     MpegEncContext * const s = &h->s;
2184
2185     MPV_decode_defaults(s);
2186
2187     s->avctx = avctx;
2188     common_init(h);
2189
2190     s->out_format = FMT_H264;
2191     s->workaround_bugs= avctx->workaround_bugs;
2192
2193     // set defaults
2194 //    s->decode_mb= ff_h263_decode_mb;
2195     s->quarter_sample = 1;
2196     s->low_delay= 1;
2197
2198     if(avctx->codec_id == CODEC_ID_SVQ3)
2199         avctx->pix_fmt= PIX_FMT_YUVJ420P;
2200     else
2201         avctx->pix_fmt= PIX_FMT_YUV420P;
2202
2203     decode_init_vlc();
2204
2205     if(avctx->extradata_size > 0 && avctx->extradata &&
2206        *(char *)avctx->extradata == 1){
2207         h->is_avc = 1;
2208         h->got_avcC = 0;
2209     } else {
2210         h->is_avc = 0;
2211     }
2212
2213     h->thread_context[0] = h;
2214     h->outputed_poc = INT_MIN;
2215     return 0;
2216 }
2217
2218 static int frame_start(H264Context *h){
2219     MpegEncContext * const s = &h->s;
2220     int i;
2221
2222     if(MPV_frame_start(s, s->avctx) < 0)
2223         return -1;
2224     ff_er_frame_start(s);
2225     /*
2226      * MPV_frame_start uses pict_type to derive key_frame.
2227      * This is incorrect for H.264; IDR markings must be used.
2228      * Zero here; IDR markings per slice in frame or fields are ORed in later.
2229      * See decode_nal_units().
2230      */
2231     s->current_picture_ptr->key_frame= 0;
2232
2233     assert(s->linesize && s->uvlinesize);
2234
2235     for(i=0; i<16; i++){
2236         h->block_offset[i]= 4*((scan8[i] - scan8[0])&7) + 4*s->linesize*((scan8[i] - scan8[0])>>3);
2237         h->block_offset[24+i]= 4*((scan8[i] - scan8[0])&7) + 8*s->linesize*((scan8[i] - scan8[0])>>3);
2238     }
2239     for(i=0; i<4; i++){
2240         h->block_offset[16+i]=
2241         h->block_offset[20+i]= 4*((scan8[i] - scan8[0])&7) + 4*s->uvlinesize*((scan8[i] - scan8[0])>>3);
2242         h->block_offset[24+16+i]=
2243         h->block_offset[24+20+i]= 4*((scan8[i] - scan8[0])&7) + 8*s->uvlinesize*((scan8[i] - scan8[0])>>3);
2244     }
2245
2246     /* can't be in alloc_tables because linesize isn't known there.
2247      * FIXME: redo bipred weight to not require extra buffer? */
2248     for(i = 0; i < s->avctx->thread_count; i++)
2249         if(!h->thread_context[i]->s.obmc_scratchpad)
2250             h->thread_context[i]->s.obmc_scratchpad = av_malloc(16*2*s->linesize + 8*2*s->uvlinesize);
2251
2252     /* some macroblocks will be accessed before they're available */
2253     if(FRAME_MBAFF || s->avctx->thread_count > 1)
2254         memset(h->slice_table, -1, (s->mb_height*s->mb_stride-1) * sizeof(uint8_t));
2255
2256 //    s->decode= (s->flags&CODEC_FLAG_PSNR) || !s->encoding || s->current_picture.reference /*|| h->contains_intra*/ || 1;
2257
2258     // We mark the current picture as non-reference after allocating it, so
2259     // that if we break out due to an error it can be released automatically
2260     // in the next MPV_frame_start().
2261     // SVQ3 as well as most other codecs have only last/next/current and thus
2262     // get released even with set reference, besides SVQ3 and others do not
2263     // mark frames as reference later "naturally".
2264     if(s->codec_id != CODEC_ID_SVQ3)
2265         s->current_picture_ptr->reference= 0;
2266
2267     s->current_picture_ptr->field_poc[0]=
2268     s->current_picture_ptr->field_poc[1]= INT_MAX;
2269     assert(s->current_picture_ptr->long_ref==0);
2270
2271     return 0;
2272 }
2273
2274 static inline void backup_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int simple){
2275     MpegEncContext * const s = &h->s;
2276     int i;
2277
2278     src_y  -=   linesize;
2279     src_cb -= uvlinesize;
2280     src_cr -= uvlinesize;
2281
2282     // There are two lines saved, the line above the the top macroblock of a pair,
2283     // and the line above the bottom macroblock
2284     h->left_border[0]= h->top_borders[0][s->mb_x][15];
2285     for(i=1; i<17; i++){
2286         h->left_border[i]= src_y[15+i*  linesize];
2287     }
2288
2289     *(uint64_t*)(h->top_borders[0][s->mb_x]+0)= *(uint64_t*)(src_y +  16*linesize);
2290     *(uint64_t*)(h->top_borders[0][s->mb_x]+8)= *(uint64_t*)(src_y +8+16*linesize);
2291
2292     if(simple || !ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2293         h->left_border[17  ]= h->top_borders[0][s->mb_x][16+7];
2294         h->left_border[17+9]= h->top_borders[0][s->mb_x][24+7];
2295         for(i=1; i<9; i++){
2296             h->left_border[i+17  ]= src_cb[7+i*uvlinesize];
2297             h->left_border[i+17+9]= src_cr[7+i*uvlinesize];
2298         }
2299         *(uint64_t*)(h->top_borders[0][s->mb_x]+16)= *(uint64_t*)(src_cb+8*uvlinesize);
2300         *(uint64_t*)(h->top_borders[0][s->mb_x]+24)= *(uint64_t*)(src_cr+8*uvlinesize);
2301     }
2302 }
2303
2304 static inline void xchg_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int xchg, int simple){
2305     MpegEncContext * const s = &h->s;
2306     int temp8, i;
2307     uint64_t temp64;
2308     int deblock_left;
2309     int deblock_top;
2310     int mb_xy;
2311
2312     if(h->deblocking_filter == 2) {
2313         mb_xy = h->mb_xy;
2314         deblock_left = h->slice_table[mb_xy] == h->slice_table[mb_xy - 1];
2315         deblock_top  = h->slice_table[mb_xy] == h->slice_table[h->top_mb_xy];
2316     } else {
2317         deblock_left = (s->mb_x > 0);
2318         deblock_top =  (s->mb_y > 0);
2319     }
2320
2321     src_y  -=   linesize + 1;
2322     src_cb -= uvlinesize + 1;
2323     src_cr -= uvlinesize + 1;
2324
2325 #define XCHG(a,b,t,xchg)\
2326 t= a;\
2327 if(xchg)\
2328     a= b;\
2329 b= t;
2330
2331     if(deblock_left){
2332         for(i = !deblock_top; i<17; i++){
2333             XCHG(h->left_border[i     ], src_y [i*  linesize], temp8, xchg);
2334         }
2335     }
2336
2337     if(deblock_top){
2338         XCHG(*(uint64_t*)(h->top_borders[0][s->mb_x]+0), *(uint64_t*)(src_y +1), temp64, xchg);
2339         XCHG(*(uint64_t*)(h->top_borders[0][s->mb_x]+8), *(uint64_t*)(src_y +9), temp64, 1);
2340         if(s->mb_x+1 < s->mb_width){
2341             XCHG(*(uint64_t*)(h->top_borders[0][s->mb_x+1]), *(uint64_t*)(src_y +17), temp64, 1);
2342         }
2343     }
2344
2345     if(simple || !ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2346         if(deblock_left){
2347             for(i = !deblock_top; i<9; i++){
2348                 XCHG(h->left_border[i+17  ], src_cb[i*uvlinesize], temp8, xchg);
2349                 XCHG(h->left_border[i+17+9], src_cr[i*uvlinesize], temp8, xchg);
2350             }
2351         }
2352         if(deblock_top){
2353             XCHG(*(uint64_t*)(h->top_borders[0][s->mb_x]+16), *(uint64_t*)(src_cb+1), temp64, 1);
2354             XCHG(*(uint64_t*)(h->top_borders[0][s->mb_x]+24), *(uint64_t*)(src_cr+1), temp64, 1);
2355         }
2356     }
2357 }
2358
2359 static inline void backup_pair_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize){
2360     MpegEncContext * const s = &h->s;
2361     int i;
2362
2363     src_y  -= 2 *   linesize;
2364     src_cb -= 2 * uvlinesize;
2365     src_cr -= 2 * uvlinesize;
2366
2367     // There are two lines saved, the line above the the top macroblock of a pair,
2368     // and the line above the bottom macroblock
2369     h->left_border[0]= h->top_borders[0][s->mb_x][15];
2370     h->left_border[1]= h->top_borders[1][s->mb_x][15];
2371     for(i=2; i<34; i++){
2372         h->left_border[i]= src_y[15+i*  linesize];
2373     }
2374
2375     *(uint64_t*)(h->top_borders[0][s->mb_x]+0)= *(uint64_t*)(src_y +  32*linesize);
2376     *(uint64_t*)(h->top_borders[0][s->mb_x]+8)= *(uint64_t*)(src_y +8+32*linesize);
2377     *(uint64_t*)(h->top_borders[1][s->mb_x]+0)= *(uint64_t*)(src_y +  33*linesize);
2378     *(uint64_t*)(h->top_borders[1][s->mb_x]+8)= *(uint64_t*)(src_y +8+33*linesize);
2379
2380     if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2381         h->left_border[34     ]= h->top_borders[0][s->mb_x][16+7];
2382         h->left_border[34+   1]= h->top_borders[1][s->mb_x][16+7];
2383         h->left_border[34+18  ]= h->top_borders[0][s->mb_x][24+7];
2384         h->left_border[34+18+1]= h->top_borders[1][s->mb_x][24+7];
2385         for(i=2; i<18; i++){
2386             h->left_border[i+34   ]= src_cb[7+i*uvlinesize];
2387             h->left_border[i+34+18]= src_cr[7+i*uvlinesize];
2388         }
2389         *(uint64_t*)(h->top_borders[0][s->mb_x]+16)= *(uint64_t*)(src_cb+16*uvlinesize);
2390         *(uint64_t*)(h->top_borders[0][s->mb_x]+24)= *(uint64_t*)(src_cr+16*uvlinesize);
2391         *(uint64_t*)(h->top_borders[1][s->mb_x]+16)= *(uint64_t*)(src_cb+17*uvlinesize);
2392         *(uint64_t*)(h->top_borders[1][s->mb_x]+24)= *(uint64_t*)(src_cr+17*uvlinesize);
2393     }
2394 }
2395
2396 static inline void xchg_pair_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int xchg){
2397     MpegEncContext * const s = &h->s;
2398     int temp8, i;
2399     uint64_t temp64;
2400     int deblock_left = (s->mb_x > 0);
2401     int deblock_top  = (s->mb_y > 1);
2402
2403     tprintf(s->avctx, "xchg_pair_border: src_y:%p src_cb:%p src_cr:%p ls:%d uvls:%d\n", src_y, src_cb, src_cr, linesize, uvlinesize);
2404
2405     src_y  -= 2 *   linesize + 1;
2406     src_cb -= 2 * uvlinesize + 1;
2407     src_cr -= 2 * uvlinesize + 1;
2408
2409 #define XCHG(a,b,t,xchg)\
2410 t= a;\
2411 if(xchg)\
2412     a= b;\
2413 b= t;
2414
2415     if(deblock_left){
2416         for(i = (!deblock_top)<<1; i<34; i++){
2417             XCHG(h->left_border[i     ], src_y [i*  linesize], temp8, xchg);
2418         }
2419     }
2420
2421     if(deblock_top){
2422         XCHG(*(uint64_t*)(h->top_borders[0][s->mb_x]+0), *(uint64_t*)(src_y +1), temp64, xchg);
2423         XCHG(*(uint64_t*)(h->top_borders[0][s->mb_x]+8), *(uint64_t*)(src_y +9), temp64, 1);
2424         XCHG(*(uint64_t*)(h->top_borders[1][s->mb_x]+0), *(uint64_t*)(src_y +1 +linesize), temp64, xchg);
2425         XCHG(*(uint64_t*)(h->top_borders[1][s->mb_x]+8), *(uint64_t*)(src_y +9 +linesize), temp64, 1);
2426         if(s->mb_x+1 < s->mb_width){
2427             XCHG(*(uint64_t*)(h->top_borders[0][s->mb_x+1]), *(uint64_t*)(src_y +17), temp64, 1);
2428             XCHG(*(uint64_t*)(h->top_borders[1][s->mb_x+1]), *(uint64_t*)(src_y +17 +linesize), temp64, 1);
2429         }
2430     }
2431
2432     if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2433         if(deblock_left){
2434             for(i = (!deblock_top) << 1; i<18; i++){
2435                 XCHG(h->left_border[i+34   ], src_cb[i*uvlinesize], temp8, xchg);
2436                 XCHG(h->left_border[i+34+18], src_cr[i*uvlinesize], temp8, xchg);
2437             }
2438         }
2439         if(deblock_top){
2440             XCHG(*(uint64_t*)(h->top_borders[0][s->mb_x]+16), *(uint64_t*)(src_cb+1), temp64, 1);
2441             XCHG(*(uint64_t*)(h->top_borders[0][s->mb_x]+24), *(uint64_t*)(src_cr+1), temp64, 1);
2442             XCHG(*(uint64_t*)(h->top_borders[1][s->mb_x]+16), *(uint64_t*)(src_cb+1 +uvlinesize), temp64, 1);
2443             XCHG(*(uint64_t*)(h->top_borders[1][s->mb_x]+24), *(uint64_t*)(src_cr+1 +uvlinesize), temp64, 1);
2444         }
2445     }
2446 }
2447
2448 static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple){
2449     MpegEncContext * const s = &h->s;
2450     const int mb_x= s->mb_x;
2451     const int mb_y= s->mb_y;
2452     const int mb_xy= h->mb_xy;
2453     const int mb_type= s->current_picture.mb_type[mb_xy];
2454     uint8_t  *dest_y, *dest_cb, *dest_cr;
2455     int linesize, uvlinesize /*dct_offset*/;
2456     int i;
2457     int *block_offset = &h->block_offset[0];
2458     const unsigned int bottom = mb_y & 1;
2459     const int transform_bypass = (s->qscale == 0 && h->sps.transform_bypass), is_h264 = (simple || s->codec_id == CODEC_ID_H264);
2460     void (*idct_add)(uint8_t *dst, DCTELEM *block, int stride);
2461     void (*idct_dc_add)(uint8_t *dst, DCTELEM *block, int stride);
2462
2463     dest_y  = s->current_picture.data[0] + (mb_y * 16* s->linesize  ) + mb_x * 16;
2464     dest_cb = s->current_picture.data[1] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
2465     dest_cr = s->current_picture.data[2] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
2466
2467     s->dsp.prefetch(dest_y + (s->mb_x&3)*4*s->linesize + 64, s->linesize, 4);
2468     s->dsp.prefetch(dest_cb + (s->mb_x&7)*s->uvlinesize + 64, dest_cr - dest_cb, 2);
2469
2470     if (!simple && MB_FIELD) {
2471         linesize   = h->mb_linesize   = s->linesize * 2;
2472         uvlinesize = h->mb_uvlinesize = s->uvlinesize * 2;
2473         block_offset = &h->block_offset[24];
2474         if(mb_y&1){ //FIXME move out of this function?
2475             dest_y -= s->linesize*15;
2476             dest_cb-= s->uvlinesize*7;
2477             dest_cr-= s->uvlinesize*7;
2478         }
2479         if(FRAME_MBAFF) {
2480             int list;
2481             for(list=0; list<h->list_count; list++){
2482                 if(!USES_LIST(mb_type, list))
2483                     continue;
2484                 if(IS_16X16(mb_type)){
2485                     int8_t *ref = &h->ref_cache[list][scan8[0]];
2486                     fill_rectangle(ref, 4, 4, 8, (16+*ref)^(s->mb_y&1), 1);
2487                 }else{
2488                     for(i=0; i<16; i+=4){
2489                         //FIXME can refs be smaller than 8x8 when !direct_8x8_inference ?
2490                         int ref = h->ref_cache[list][scan8[i]];
2491                         if(ref >= 0)
2492                             fill_rectangle(&h->ref_cache[list][scan8[i]], 2, 2, 8, (16+ref)^(s->mb_y&1), 1);
2493                     }
2494                 }
2495             }
2496         }
2497     } else {
2498         linesize   = h->mb_linesize   = s->linesize;
2499         uvlinesize = h->mb_uvlinesize = s->uvlinesize;
2500 //        dct_offset = s->linesize * 16;
2501     }
2502
2503     if(transform_bypass){
2504         idct_dc_add =
2505         idct_add = IS_8x8DCT(mb_type) ? s->dsp.add_pixels8 : s->dsp.add_pixels4;
2506     }else if(IS_8x8DCT(mb_type)){
2507         idct_dc_add = s->dsp.h264_idct8_dc_add;
2508         idct_add = s->dsp.h264_idct8_add;
2509     }else{
2510         idct_dc_add = s->dsp.h264_idct_dc_add;
2511         idct_add = s->dsp.h264_idct_add;
2512     }
2513
2514     if(!simple && FRAME_MBAFF && h->deblocking_filter && IS_INTRA(mb_type)
2515        && (!bottom || !IS_INTRA(s->current_picture.mb_type[mb_xy-s->mb_stride]))){
2516         int mbt_y = mb_y&~1;
2517         uint8_t *top_y  = s->current_picture.data[0] + (mbt_y * 16* s->linesize  ) + mb_x * 16;
2518         uint8_t *top_cb = s->current_picture.data[1] + (mbt_y * 8 * s->uvlinesize) + mb_x * 8;
2519         uint8_t *top_cr = s->current_picture.data[2] + (mbt_y * 8 * s->uvlinesize) + mb_x * 8;
2520         xchg_pair_border(h, top_y, top_cb, top_cr, s->linesize, s->uvlinesize, 1);
2521     }
2522
2523     if (!simple && IS_INTRA_PCM(mb_type)) {
2524         for (i=0; i<16; i++) {
2525             memcpy(dest_y + i*  linesize, h->mb       + i*8, 16);
2526         }
2527         for (i=0; i<8; i++) {
2528             memcpy(dest_cb+ i*uvlinesize, h->mb + 128 + i*4,  8);
2529             memcpy(dest_cr+ i*uvlinesize, h->mb + 160 + i*4,  8);
2530         }
2531     } else {
2532         if(IS_INTRA(mb_type)){
2533             if(h->deblocking_filter && (simple || !FRAME_MBAFF))
2534                 xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 1, simple);
2535
2536             if(simple || !ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2537                 h->hpc.pred8x8[ h->chroma_pred_mode ](dest_cb, uvlinesize);
2538                 h->hpc.pred8x8[ h->chroma_pred_mode ](dest_cr, uvlinesize);
2539             }
2540
2541             if(IS_INTRA4x4(mb_type)){
2542                 if(simple || !s->encoding){
2543                     if(IS_8x8DCT(mb_type)){
2544                         for(i=0; i<16; i+=4){
2545                             uint8_t * const ptr= dest_y + block_offset[i];
2546                             const int dir= h->intra4x4_pred_mode_cache[ scan8[i] ];
2547                             const int nnz = h->non_zero_count_cache[ scan8[i] ];
2548                             h->hpc.pred8x8l[ dir ](ptr, (h->topleft_samples_available<<i)&0x8000,
2549                                                    (h->topright_samples_available<<i)&0x4000, linesize);
2550                             if(nnz){
2551                                 if(nnz == 1 && h->mb[i*16])
2552                                     idct_dc_add(ptr, h->mb + i*16, linesize);
2553                                 else
2554                                     idct_add(ptr, h->mb + i*16, linesize);
2555                             }
2556                         }
2557                     }else
2558                     for(i=0; i<16; i++){
2559                         uint8_t * const ptr= dest_y + block_offset[i];
2560                         uint8_t *topright;
2561                         const int dir= h->intra4x4_pred_mode_cache[ scan8[i] ];
2562                         int nnz, tr;
2563
2564                         if(dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED){
2565                             const int topright_avail= (h->topright_samples_available<<i)&0x8000;
2566                             assert(mb_y || linesize <= block_offset[i]);
2567                             if(!topright_avail){
2568                                 tr= ptr[3 - linesize]*0x01010101;
2569                                 topright= (uint8_t*) &tr;
2570                             }else
2571                                 topright= ptr + 4 - linesize;
2572                         }else
2573                             topright= NULL;
2574
2575                         h->hpc.pred4x4[ dir ](ptr, topright, linesize);
2576                         nnz = h->non_zero_count_cache[ scan8[i] ];
2577                         if(nnz){
2578                             if(is_h264){
2579                                 if(nnz == 1 && h->mb[i*16])
2580                                     idct_dc_add(ptr, h->mb + i*16, linesize);
2581                                 else
2582                                     idct_add(ptr, h->mb + i*16, linesize);
2583                             }else
2584                                 svq3_add_idct_c(ptr, h->mb + i*16, linesize, s->qscale, 0);
2585                         }
2586                     }
2587                 }
2588             }else{
2589                 h->hpc.pred16x16[ h->intra16x16_pred_mode ](dest_y , linesize);
2590                 if(is_h264){
2591                     if(!transform_bypass)
2592                         h264_luma_dc_dequant_idct_c(h->mb, s->qscale, h->dequant4_coeff[0][s->qscale][0]);
2593                 }else
2594                     svq3_luma_dc_dequant_idct_c(h->mb, s->qscale);
2595             }
2596             if(h->deblocking_filter && (simple || !FRAME_MBAFF))
2597                 xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 0, simple);
2598         }else if(is_h264){
2599             hl_motion(h, dest_y, dest_cb, dest_cr,
2600                       s->me.qpel_put, s->dsp.put_h264_chroma_pixels_tab,
2601                       s->me.qpel_avg, s->dsp.avg_h264_chroma_pixels_tab,
2602                       s->dsp.weight_h264_pixels_tab, s->dsp.biweight_h264_pixels_tab);
2603         }
2604
2605
2606         if(!IS_INTRA4x4(mb_type)){
2607             if(is_h264){
2608                 if(IS_INTRA16x16(mb_type)){
2609                     for(i=0; i<16; i++){
2610                         if(h->non_zero_count_cache[ scan8[i] ])
2611                             idct_add(dest_y + block_offset[i], h->mb + i*16, linesize);
2612                         else if(h->mb[i*16])
2613                             idct_dc_add(dest_y + block_offset[i], h->mb + i*16, linesize);
2614                     }
2615                 }else{
2616                     const int di = IS_8x8DCT(mb_type) ? 4 : 1;
2617                     for(i=0; i<16; i+=di){
2618                         int nnz = h->non_zero_count_cache[ scan8[i] ];
2619                         if(nnz){
2620                             if(nnz==1 && h->mb[i*16])
2621                                 idct_dc_add(dest_y + block_offset[i], h->mb + i*16, linesize);
2622                             else
2623                                 idct_add(dest_y + block_offset[i], h->mb + i*16, linesize);
2624                         }
2625                     }
2626                 }
2627             }else{
2628                 for(i=0; i<16; i++){
2629                     if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){ //FIXME benchmark weird rule, & below
2630                         uint8_t * const ptr= dest_y + block_offset[i];
2631                         svq3_add_idct_c(ptr, h->mb + i*16, linesize, s->qscale, IS_INTRA(mb_type) ? 1 : 0);
2632                     }
2633                 }
2634             }
2635         }
2636
2637         if(simple || !ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2638             uint8_t *dest[2] = {dest_cb, dest_cr};
2639             if(transform_bypass){
2640                 idct_add = idct_dc_add = s->dsp.add_pixels4;
2641             }else{
2642                 idct_add = s->dsp.h264_idct_add;
2643                 idct_dc_add = s->dsp.h264_idct_dc_add;
2644                 chroma_dc_dequant_idct_c(h->mb + 16*16, h->chroma_qp[0], h->dequant4_coeff[IS_INTRA(mb_type) ? 1:4][h->chroma_qp[0]][0]);
2645                 chroma_dc_dequant_idct_c(h->mb + 16*16+4*16, h->chroma_qp[1], h->dequant4_coeff[IS_INTRA(mb_type) ? 2:5][h->chroma_qp[1]][0]);
2646             }
2647             if(is_h264){
2648                 for(i=16; i<16+8; i++){
2649                     if(h->non_zero_count_cache[ scan8[i] ])
2650                         idct_add(dest[(i&4)>>2] + block_offset[i], h->mb + i*16, uvlinesize);
2651                     else if(h->mb[i*16])
2652                         idct_dc_add(dest[(i&4)>>2] + block_offset[i], h->mb + i*16, uvlinesize);
2653                 }
2654             }else{
2655                 for(i=16; i<16+8; i++){
2656                     if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){
2657                         uint8_t * const ptr= dest[(i&4)>>2] + block_offset[i];
2658                         svq3_add_idct_c(ptr, h->mb + i*16, uvlinesize, chroma_qp[s->qscale + 12] - 12, 2);
2659                     }
2660                 }
2661             }
2662         }
2663     }
2664     if(h->deblocking_filter) {
2665         if (!simple && FRAME_MBAFF) {
2666             //FIXME try deblocking one mb at a time?
2667             // the reduction in load/storing mvs and such might outweigh the extra backup/xchg_border
2668             const int mb_y = s->mb_y - 1;
2669             uint8_t  *pair_dest_y, *pair_dest_cb, *pair_dest_cr;
2670             const int mb_xy= mb_x + mb_y*s->mb_stride;
2671             const int mb_type_top   = s->current_picture.mb_type[mb_xy];
2672             const int mb_type_bottom= s->current_picture.mb_type[mb_xy+s->mb_stride];
2673             if (!bottom) return;
2674             pair_dest_y  = s->current_picture.data[0] + (mb_y * 16* s->linesize  ) + mb_x * 16;
2675             pair_dest_cb = s->current_picture.data[1] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
2676             pair_dest_cr = s->current_picture.data[2] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
2677
2678             if(IS_INTRA(mb_type_top | mb_type_bottom))
2679                 xchg_pair_border(h, pair_dest_y, pair_dest_cb, pair_dest_cr, s->linesize, s->uvlinesize, 0);
2680
2681             backup_pair_border(h, pair_dest_y, pair_dest_cb, pair_dest_cr, s->linesize, s->uvlinesize);
2682             // deblock a pair
2683             // top
2684             s->mb_y--; h->mb_xy -= s->mb_stride;
2685             tprintf(h->s.avctx, "call mbaff filter_mb mb_x:%d mb_y:%d pair_dest_y = %p, dest_y = %p\n", mb_x, mb_y, pair_dest_y, dest_y);
2686             fill_caches(h, mb_type_top, 1); //FIXME don't fill stuff which isn't used by filter_mb
2687             h->chroma_qp[0] = get_chroma_qp(h, 0, s->current_picture.qscale_table[mb_xy]);
2688             h->chroma_qp[1] = get_chroma_qp(h, 1, s->current_picture.qscale_table[mb_xy]);
2689             filter_mb(h, mb_x, mb_y, pair_dest_y, pair_dest_cb, pair_dest_cr, linesize, uvlinesize);
2690             // bottom
2691             s->mb_y++; h->mb_xy += s->mb_stride;
2692             tprintf(h->s.avctx, "call mbaff filter_mb\n");
2693             fill_caches(h, mb_type_bottom, 1); //FIXME don't fill stuff which isn't used by filter_mb
2694             h->chroma_qp[0] = get_chroma_qp(h, 0, s->current_picture.qscale_table[mb_xy+s->mb_stride]);
2695             h->chroma_qp[1] = get_chroma_qp(h, 1, s->current_picture.qscale_table[mb_xy+s->mb_stride]);
2696             filter_mb(h, mb_x, mb_y+1, dest_y, dest_cb, dest_cr, linesize, uvlinesize);
2697         } else {
2698             tprintf(h->s.avctx, "call filter_mb\n");
2699             backup_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, simple);
2700             fill_caches(h, mb_type, 1); //FIXME don't fill stuff which isn't used by filter_mb
2701             h->chroma_qp[0] = get_chroma_qp(h, 0, s->current_picture.qscale_table[mb_xy]);
2702             h->chroma_qp[1] = get_chroma_qp(h, 1, s->current_picture.qscale_table[mb_xy]);
2703             filter_mb_fast(h, mb_x, mb_y, dest_y, dest_cb, dest_cr, linesize, uvlinesize);
2704         }
2705     }
2706 }
2707
2708 /**
2709  * Process a macroblock; this case avoids checks for expensive uncommon cases.
2710  */
2711 static void hl_decode_mb_simple(H264Context *h){
2712     hl_decode_mb_internal(h, 1);
2713 }
2714
2715 /**
2716  * Process a macroblock; this handles edge cases, such as interlacing.
2717  */
2718 static void av_noinline hl_decode_mb_complex(H264Context *h){
2719     hl_decode_mb_internal(h, 0);
2720 }
2721
2722 static void hl_decode_mb(H264Context *h){
2723     MpegEncContext * const s = &h->s;
2724     const int mb_xy= h->mb_xy;
2725     const int mb_type= s->current_picture.mb_type[mb_xy];
2726     int is_complex = FRAME_MBAFF || MB_FIELD || IS_INTRA_PCM(mb_type) || s->codec_id != CODEC_ID_H264 ||
2727                     (ENABLE_GRAY && (s->flags&CODEC_FLAG_GRAY)) || (ENABLE_H264_ENCODER && s->encoding) || ENABLE_SMALL;
2728
2729     if(ENABLE_H264_ENCODER && !s->decode)
2730         return;
2731
2732     if (is_complex)
2733         hl_decode_mb_complex(h);
2734     else hl_decode_mb_simple(h);
2735 }
2736
2737 static void pic_as_field(Picture *pic, const int parity){
2738     int i;
2739     for (i = 0; i < 4; ++i) {
2740         if (parity == PICT_BOTTOM_FIELD)
2741             pic->data[i] += pic->linesize[i];
2742         pic->reference = parity;
2743         pic->linesize[i] *= 2;
2744     }
2745     pic->poc= pic->field_poc[parity == PICT_BOTTOM_FIELD];
2746 }
2747
2748 static int split_field_copy(Picture *dest, Picture *src,
2749                             int parity, int id_add){
2750     int match = !!(src->reference & parity);
2751
2752     if (match) {
2753         *dest = *src;
2754         if(parity != PICT_FRAME){
2755             pic_as_field(dest, parity);
2756             dest->pic_id *= 2;
2757             dest->pic_id += id_add;
2758         }
2759     }
2760
2761     return match;
2762 }
2763
2764 static int build_def_list(Picture *def, Picture **in, int len, int is_long, int sel){
2765     int i[2]={0};
2766     int index=0;
2767
2768     while(i[0]<len || i[1]<len){
2769         while(i[0]<len && !(in[ i[0] ] && (in[ i[0] ]->reference & sel)))
2770             i[0]++;
2771         while(i[1]<len && !(in[ i[1] ] && (in[ i[1] ]->reference & (sel^3))))
2772             i[1]++;
2773         if(i[0] < len){
2774             in[ i[0] ]->pic_id= is_long ? i[0] : in[ i[0] ]->frame_num;
2775             split_field_copy(&def[index++], in[ i[0]++ ], sel  , 1);
2776         }
2777         if(i[1] < len){
2778             in[ i[1] ]->pic_id= is_long ? i[1] : in[ i[1] ]->frame_num;
2779             split_field_copy(&def[index++], in[ i[1]++ ], sel^3, 0);
2780         }
2781     }
2782
2783     return index;
2784 }
2785
2786 static int add_sorted(Picture **sorted, Picture **src, int len, int limit, int dir){
2787     int i, best_poc;
2788     int out_i= 0;
2789
2790     for(;;){
2791         best_poc= dir ? INT_MIN : INT_MAX;
2792
2793         for(i=0; i<len; i++){
2794             const int poc= src[i]->poc;
2795             if(((poc > limit) ^ dir) && ((poc < best_poc) ^ dir)){
2796                 best_poc= poc;
2797                 sorted[out_i]= src[i];
2798             }
2799         }
2800         if(best_poc == (dir ? INT_MIN : INT_MAX))
2801             break;
2802         limit= sorted[out_i++]->poc - dir;
2803     }
2804     return out_i;
2805 }
2806
2807 /**
2808  * fills the default_ref_list.
2809  */
2810 static int fill_default_ref_list(H264Context *h){
2811     MpegEncContext * const s = &h->s;
2812     int i, len;
2813
2814     if(h->slice_type_nos==FF_B_TYPE){
2815         Picture *sorted[32];
2816         int cur_poc, list;
2817         int lens[2];
2818
2819         if(FIELD_PICTURE)
2820             cur_poc= s->current_picture_ptr->field_poc[ s->picture_structure == PICT_BOTTOM_FIELD ];
2821         else
2822             cur_poc= s->current_picture_ptr->poc;
2823
2824         for(list= 0; list<2; list++){
2825             len= add_sorted(sorted    , h->short_ref, h->short_ref_count, cur_poc, 1^list);
2826             len+=add_sorted(sorted+len, h->short_ref, h->short_ref_count, cur_poc, 0^list);
2827             assert(len<=32);
2828             len= build_def_list(h->default_ref_list[list]    , sorted     , len, 0, s->picture_structure);
2829             len+=build_def_list(h->default_ref_list[list]+len, h->long_ref, 16 , 1, s->picture_structure);
2830             assert(len<=32);
2831
2832             if(len < h->ref_count[list])
2833                 memset(&h->default_ref_list[list][len], 0, sizeof(Picture)*(h->ref_count[list] - len));
2834             lens[list]= len;
2835         }
2836
2837         if(lens[0] == lens[1] && lens[1] > 1){
2838             for(i=0; h->default_ref_list[0][i].data[0] == h->default_ref_list[1][i].data[0] && i<lens[0]; i++);
2839             if(i == lens[0])
2840                 FFSWAP(Picture, h->default_ref_list[1][0], h->default_ref_list[1][1]);
2841         }
2842     }else{
2843         len = build_def_list(h->default_ref_list[0]    , h->short_ref, h->short_ref_count, 0, s->picture_structure);
2844         len+= build_def_list(h->default_ref_list[0]+len, h-> long_ref, 16                , 1, s->picture_structure);
2845         assert(len <= 32);
2846         if(len < h->ref_count[0])
2847             memset(&h->default_ref_list[0][len], 0, sizeof(Picture)*(h->ref_count[0] - len));
2848     }
2849 #ifdef TRACE
2850     for (i=0; i<h->ref_count[0]; i++) {
2851         tprintf(h->s.avctx, "List0: %s fn:%d 0x%p\n", (h->default_ref_list[0][i].long_ref ? "LT" : "ST"), h->default_ref_list[0][i].pic_id, h->default_ref_list[0][i].data[0]);
2852     }
2853     if(h->slice_type_nos==FF_B_TYPE){
2854         for (i=0; i<h->ref_count[1]; i++) {
2855             tprintf(h->s.avctx, "List1: %s fn:%d 0x%p\n", (h->default_ref_list[1][i].long_ref ? "LT" : "ST"), h->default_ref_list[1][i].pic_id, h->default_ref_list[1][i].data[0]);
2856         }
2857     }
2858 #endif
2859     return 0;
2860 }
2861
2862 static void print_short_term(H264Context *h);
2863 static void print_long_term(H264Context *h);
2864
2865 /**
2866  * Extract structure information about the picture described by pic_num in
2867  * the current decoding context (frame or field). Note that pic_num is
2868  * picture number without wrapping (so, 0<=pic_num<max_pic_num).
2869  * @param pic_num picture number for which to extract structure information
2870  * @param structure one of PICT_XXX describing structure of picture
2871  *                      with pic_num
2872  * @return frame number (short term) or long term index of picture
2873  *         described by pic_num
2874  */
2875 static int pic_num_extract(H264Context *h, int pic_num, int *structure){
2876     MpegEncContext * const s = &h->s;
2877
2878     *structure = s->picture_structure;
2879     if(FIELD_PICTURE){
2880         if (!(pic_num & 1))
2881             /* opposite field */
2882             *structure ^= PICT_FRAME;
2883         pic_num >>= 1;
2884     }
2885
2886     return pic_num;
2887 }
2888
2889 static int decode_ref_pic_list_reordering(H264Context *h){
2890     MpegEncContext * const s = &h->s;
2891     int list, index, pic_structure;
2892
2893     print_short_term(h);
2894     print_long_term(h);
2895
2896     for(list=0; list<h->list_count; list++){
2897         memcpy(h->ref_list[list], h->default_ref_list[list], sizeof(Picture)*h->ref_count[list]);
2898
2899         if(get_bits1(&s->gb)){
2900             int pred= h->curr_pic_num;
2901
2902             for(index=0; ; index++){
2903                 unsigned int reordering_of_pic_nums_idc= get_ue_golomb(&s->gb);
2904                 unsigned int pic_id;
2905                 int i;
2906                 Picture *ref = NULL;
2907
2908                 if(reordering_of_pic_nums_idc==3)
2909                     break;
2910
2911                 if(index >= h->ref_count[list]){
2912                     av_log(h->s.avctx, AV_LOG_ERROR, "reference count overflow\n");
2913                     return -1;
2914                 }
2915
2916                 if(reordering_of_pic_nums_idc<3){
2917                     if(reordering_of_pic_nums_idc<2){
2918                         const unsigned int abs_diff_pic_num= get_ue_golomb(&s->gb) + 1;
2919                         int frame_num;
2920
2921                         if(abs_diff_pic_num > h->max_pic_num){
2922                             av_log(h->s.avctx, AV_LOG_ERROR, "abs_diff_pic_num overflow\n");
2923                             return -1;
2924                         }
2925
2926                         if(reordering_of_pic_nums_idc == 0) pred-= abs_diff_pic_num;
2927                         else                                pred+= abs_diff_pic_num;
2928                         pred &= h->max_pic_num - 1;
2929
2930                         frame_num = pic_num_extract(h, pred, &pic_structure);
2931
2932                         for(i= h->short_ref_count-1; i>=0; i--){
2933                             ref = h->short_ref[i];
2934                             assert(ref->reference);
2935                             assert(!ref->long_ref);
2936                             if(
2937                                    ref->frame_num == frame_num &&
2938                                    (ref->reference & pic_structure)
2939                               )
2940                                 break;
2941                         }
2942                         if(i>=0)
2943                             ref->pic_id= pred;
2944                     }else{
2945                         int long_idx;
2946                         pic_id= get_ue_golomb(&s->gb); //long_term_pic_idx
2947
2948                         long_idx= pic_num_extract(h, pic_id, &pic_structure);
2949
2950                         if(long_idx>31){
2951                             av_log(h->s.avctx, AV_LOG_ERROR, "long_term_pic_idx overflow\n");
2952                             return -1;
2953                         }
2954                         ref = h->long_ref[long_idx];
2955                         assert(!(ref && !ref->reference));
2956                         if(ref && (ref->reference & pic_structure)){
2957                             ref->pic_id= pic_id;
2958                             assert(ref->long_ref);
2959                             i=0;
2960                         }else{
2961                             i=-1;
2962                         }
2963                     }
2964
2965                     if (i < 0) {
2966                         av_log(h->s.avctx, AV_LOG_ERROR, "reference picture missing during reorder\n");
2967                         memset(&h->ref_list[list][index], 0, sizeof(Picture)); //FIXME
2968                     } else {
2969                         for(i=index; i+1<h->ref_count[list]; i++){
2970                             if(ref->long_ref == h->ref_list[list][i].long_ref && ref->pic_id == h->ref_list[list][i].pic_id)
2971                                 break;
2972                         }
2973                         for(; i > index; i--){
2974                             h->ref_list[list][i]= h->ref_list[list][i-1];
2975                         }
2976                         h->ref_list[list][index]= *ref;
2977                         if (FIELD_PICTURE){
2978                             pic_as_field(&h->ref_list[list][index], pic_structure);
2979                         }
2980                     }
2981                 }else{
2982                     av_log(h->s.avctx, AV_LOG_ERROR, "illegal reordering_of_pic_nums_idc\n");
2983                     return -1;
2984                 }
2985             }
2986         }
2987     }
2988     for(list=0; list<h->list_count; list++){
2989         for(index= 0; index < h->ref_count[list]; index++){
2990             if(!h->ref_list[list][index].data[0]){
2991                 av_log(h->s.avctx, AV_LOG_ERROR, "Missing reference picture\n");
2992                 h->ref_list[list][index]= s->current_picture; //FIXME this is not a sensible solution
2993             }
2994         }
2995     }
2996
2997     if(h->slice_type_nos==FF_B_TYPE && !h->direct_spatial_mv_pred)
2998         direct_dist_scale_factor(h);
2999     direct_ref_list_init(h);
3000     return 0;
3001 }
3002
3003 static void fill_mbaff_ref_list(H264Context *h){
3004     int list, i, j;
3005     for(list=0; list<2; list++){ //FIXME try list_count
3006         for(i=0; i<h->ref_count[list]; i++){
3007             Picture *frame = &h->ref_list[list][i];
3008             Picture *field = &h->ref_list[list][16+2*i];
3009             field[0] = *frame;
3010             for(j=0; j<3; j++)
3011                 field[0].linesize[j] <<= 1;
3012             field[0].reference = PICT_TOP_FIELD;
3013             field[1] = field[0];
3014             for(j=0; j<3; j++)
3015                 field[1].data[j] += frame->linesize[j];
3016             field[1].reference = PICT_BOTTOM_FIELD;
3017
3018             h->luma_weight[list][16+2*i] = h->luma_weight[list][16+2*i+1] = h->luma_weight[list][i];
3019             h->luma_offset[list][16+2*i] = h->luma_offset[list][16+2*i+1] = h->luma_offset[list][i];
3020             for(j=0; j<2; j++){
3021                 h->chroma_weight[list][16+2*i][j] = h->chroma_weight[list][16+2*i+1][j] = h->chroma_weight[list][i][j];
3022                 h->chroma_offset[list][16+2*i][j] = h->chroma_offset[list][16+2*i+1][j] = h->chroma_offset[list][i][j];
3023             }
3024         }
3025     }
3026     for(j=0; j<h->ref_count[1]; j++){
3027         for(i=0; i<h->ref_count[0]; i++)
3028             h->implicit_weight[j][16+2*i] = h->implicit_weight[j][16+2*i+1] = h->implicit_weight[j][i];
3029         memcpy(h->implicit_weight[16+2*j],   h->implicit_weight[j], sizeof(*h->implicit_weight));
3030         memcpy(h->implicit_weight[16+2*j+1], h->implicit_weight[j], sizeof(*h->implicit_weight));
3031     }
3032 }
3033
3034 static int pred_weight_table(H264Context *h){
3035     MpegEncContext * const s = &h->s;
3036     int list, i;
3037     int luma_def, chroma_def;
3038
3039     h->use_weight= 0;
3040     h->use_weight_chroma= 0;
3041     h->luma_log2_weight_denom= get_ue_golomb(&s->gb);
3042     h->chroma_log2_weight_denom= get_ue_golomb(&s->gb);
3043     luma_def = 1<<h->luma_log2_weight_denom;
3044     chroma_def = 1<<h->chroma_log2_weight_denom;
3045
3046     for(list=0; list<2; list++){
3047         for(i=0; i<h->ref_count[list]; i++){
3048             int luma_weight_flag, chroma_weight_flag;
3049
3050             luma_weight_flag= get_bits1(&s->gb);
3051             if(luma_weight_flag){
3052                 h->luma_weight[list][i]= get_se_golomb(&s->gb);
3053                 h->luma_offset[list][i]= get_se_golomb(&s->gb);
3054                 if(   h->luma_weight[list][i] != luma_def
3055                    || h->luma_offset[list][i] != 0)
3056                     h->use_weight= 1;
3057             }else{
3058                 h->luma_weight[list][i]= luma_def;
3059                 h->luma_offset[list][i]= 0;
3060             }
3061
3062             if(CHROMA){
3063                 chroma_weight_flag= get_bits1(&s->gb);
3064                 if(chroma_weight_flag){
3065                     int j;
3066                     for(j=0; j<2; j++){
3067                         h->chroma_weight[list][i][j]= get_se_golomb(&s->gb);
3068                         h->chroma_offset[list][i][j]= get_se_golomb(&s->gb);
3069                         if(   h->chroma_weight[list][i][j] != chroma_def
3070                         || h->chroma_offset[list][i][j] != 0)
3071                             h->use_weight_chroma= 1;
3072                     }
3073                 }else{
3074                     int j;
3075                     for(j=0; j<2; j++){
3076                         h->chroma_weight[list][i][j]= chroma_def;
3077                         h->chroma_offset[list][i][j]= 0;
3078                     }
3079                 }
3080             }
3081         }
3082         if(h->slice_type_nos != FF_B_TYPE) break;
3083     }
3084     h->use_weight= h->use_weight || h->use_weight_chroma;
3085     return 0;
3086 }
3087
3088 static void implicit_weight_table(H264Context *h){
3089     MpegEncContext * const s = &h->s;
3090     int ref0, ref1;
3091     int cur_poc = s->current_picture_ptr->poc;
3092
3093     if(   h->ref_count[0] == 1 && h->ref_count[1] == 1
3094        && h->ref_list[0][0].poc + h->ref_list[1][0].poc == 2*cur_poc){
3095         h->use_weight= 0;
3096         h->use_weight_chroma= 0;
3097         return;
3098     }
3099
3100     h->use_weight= 2;
3101     h->use_weight_chroma= 2;
3102     h->luma_log2_weight_denom= 5;
3103     h->chroma_log2_weight_denom= 5;
3104
3105     for(ref0=0; ref0 < h->ref_count[0]; ref0++){
3106         int poc0 = h->ref_list[0][ref0].poc;
3107         for(ref1=0; ref1 < h->ref_count[1]; ref1++){
3108             int poc1 = h->ref_list[1][ref1].poc;
3109             int td = av_clip(poc1 - poc0, -128, 127);
3110             if(td){
3111                 int tb = av_clip(cur_poc - poc0, -128, 127);
3112                 int tx = (16384 + (FFABS(td) >> 1)) / td;
3113                 int dist_scale_factor = av_clip((tb*tx + 32) >> 6, -1024, 1023) >> 2;
3114                 if(dist_scale_factor < -64 || dist_scale_factor > 128)
3115                     h->implicit_weight[ref0][ref1] = 32;
3116                 else
3117                     h->implicit_weight[ref0][ref1] = 64 - dist_scale_factor;
3118             }else
3119                 h->implicit_weight[ref0][ref1] = 32;
3120         }
3121     }
3122 }
3123
3124 /**
3125  * Mark a picture as no longer needed for reference. The refmask
3126  * argument allows unreferencing of individual fields or the whole frame.
3127  * If the picture becomes entirely unreferenced, but is being held for
3128  * display purposes, it is marked as such.
3129  * @param refmask mask of fields to unreference; the mask is bitwise
3130  *                anded with the reference marking of pic
3131  * @return non-zero if pic becomes entirely unreferenced (except possibly
3132  *         for display purposes) zero if one of the fields remains in
3133  *         reference
3134  */
3135 static inline int unreference_pic(H264Context *h, Picture *pic, int refmask){
3136     int i;
3137     if (pic->reference &= refmask) {
3138         return 0;
3139     } else {
3140         for(i = 0; h->delayed_pic[i]; i++)
3141             if(pic == h->delayed_pic[i]){
3142                 pic->reference=DELAYED_PIC_REF;
3143                 break;
3144             }
3145         return 1;
3146     }
3147 }
3148
3149 /**
3150  * instantaneous decoder refresh.
3151  */
3152 static void idr(H264Context *h){
3153     int i;
3154
3155     for(i=0; i<16; i++){
3156         remove_long(h, i, 0);
3157     }
3158     assert(h->long_ref_count==0);
3159
3160     for(i=0; i<h->short_ref_count; i++){
3161         unreference_pic(h, h->short_ref[i], 0);
3162         h->short_ref[i]= NULL;
3163     }
3164     h->short_ref_count=0;
3165     h->prev_frame_num= 0;
3166     h->prev_frame_num_offset= 0;
3167     h->prev_poc_msb=
3168     h->prev_poc_lsb= 0;
3169 }
3170
3171 /* forget old pics after a seek */
3172 static void flush_dpb(AVCodecContext *avctx){
3173     H264Context *h= avctx->priv_data;
3174     int i;
3175     for(i=0; i<MAX_DELAYED_PIC_COUNT; i++) {
3176         if(h->delayed_pic[i])
3177             h->delayed_pic[i]->reference= 0;
3178         h->delayed_pic[i]= NULL;
3179     }
3180     h->outputed_poc= INT_MIN;
3181     idr(h);
3182     if(h->s.current_picture_ptr)
3183         h->s.current_picture_ptr->reference= 0;
3184     h->s.first_field= 0;
3185     ff_mpeg_flush(avctx);
3186 }
3187
3188 /**
3189  * Find a Picture in the short term reference list by frame number.
3190  * @param frame_num frame number to search for
3191  * @param idx the index into h->short_ref where returned picture is found
3192  *            undefined if no picture found.
3193  * @return pointer to the found picture, or NULL if no pic with the provided
3194  *                 frame number is found
3195  */
3196 static Picture * find_short(H264Context *h, int frame_num, int *idx){
3197     MpegEncContext * const s = &h->s;
3198     int i;
3199
3200     for(i=0; i<h->short_ref_count; i++){
3201         Picture *pic= h->short_ref[i];
3202         if(s->avctx->debug&FF_DEBUG_MMCO)
3203             av_log(h->s.avctx, AV_LOG_DEBUG, "%d %d %p\n", i, pic->frame_num, pic);
3204         if(pic->frame_num == frame_num) {
3205             *idx = i;
3206             return pic;
3207         }
3208     }
3209     return NULL;
3210 }
3211
3212 /**
3213  * Remove a picture from the short term reference list by its index in
3214  * that list.  This does no checking on the provided index; it is assumed
3215  * to be valid. Other list entries are shifted down.
3216  * @param i index into h->short_ref of picture to remove.
3217  */
3218 static void remove_short_at_index(H264Context *h, int i){
3219     assert(i >= 0 && i < h->short_ref_count);
3220     h->short_ref[i]= NULL;
3221     if (--h->short_ref_count)
3222         memmove(&h->short_ref[i], &h->short_ref[i+1], (h->short_ref_count - i)*sizeof(Picture*));
3223 }
3224
3225 /**
3226  *
3227  * @return the removed picture or NULL if an error occurs
3228  */
3229 static Picture * remove_short(H264Context *h, int frame_num, int ref_mask){
3230     MpegEncContext * const s = &h->s;
3231     Picture *pic;
3232     int i;
3233
3234     if(s->avctx->debug&FF_DEBUG_MMCO)
3235         av_log(h->s.avctx, AV_LOG_DEBUG, "remove short %d count %d\n", frame_num, h->short_ref_count);
3236
3237     pic = find_short(h, frame_num, &i);
3238     if (pic){
3239         if(unreference_pic(h, pic, ref_mask))
3240         remove_short_at_index(h, i);
3241     }
3242
3243     return pic;
3244 }
3245
3246 /**
3247  * Remove a picture from the long term reference list by its index in
3248  * that list.
3249  * @return the removed picture or NULL if an error occurs
3250  */
3251 static Picture * remove_long(H264Context *h, int i, int ref_mask){
3252     Picture *pic;
3253
3254     pic= h->long_ref[i];
3255     if (pic){
3256         if(unreference_pic(h, pic, ref_mask)){
3257             assert(h->long_ref[i]->long_ref == 1);
3258             h->long_ref[i]->long_ref= 0;
3259             h->long_ref[i]= NULL;
3260             h->long_ref_count--;
3261         }
3262     }
3263
3264     return pic;
3265 }
3266
3267 /**
3268  * print short term list
3269  */
3270 static void print_short_term(H264Context *h) {
3271     uint32_t i;
3272     if(h->s.avctx->debug&FF_DEBUG_MMCO) {
3273         av_log(h->s.avctx, AV_LOG_DEBUG, "short term list:\n");
3274         for(i=0; i<h->short_ref_count; i++){
3275             Picture *pic= h->short_ref[i];
3276             av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n", i, pic->frame_num, pic->poc, pic->data[0]);
3277         }
3278     }
3279 }
3280
3281 /**
3282  * print long term list
3283  */
3284 static void print_long_term(H264Context *h) {
3285     uint32_t i;
3286     if(h->s.avctx->debug&FF_DEBUG_MMCO) {
3287         av_log(h->s.avctx, AV_LOG_DEBUG, "long term list:\n");
3288         for(i = 0; i < 16; i++){
3289             Picture *pic= h->long_ref[i];
3290             if (pic) {
3291                 av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n", i, pic->frame_num, pic->poc, pic->data[0]);
3292             }
3293         }
3294     }
3295 }
3296
3297 /**
3298  * Executes the reference picture marking (memory management control operations).
3299  */
3300 static int execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
3301     MpegEncContext * const s = &h->s;
3302     int i, j;
3303     int current_ref_assigned=0;
3304     Picture *pic;
3305
3306     if((s->avctx->debug&FF_DEBUG_MMCO) && mmco_count==0)
3307         av_log(h->s.avctx, AV_LOG_DEBUG, "no mmco here\n");
3308
3309     for(i=0; i<mmco_count; i++){
3310         int structure, frame_num;
3311         if(s->avctx->debug&FF_DEBUG_MMCO)
3312             av_log(h->s.avctx, AV_LOG_DEBUG, "mmco:%d %d %d\n", h->mmco[i].opcode, h->mmco[i].short_pic_num, h->mmco[i].long_arg);
3313
3314         if(   mmco[i].opcode == MMCO_SHORT2UNUSED
3315            || mmco[i].opcode == MMCO_SHORT2LONG){
3316             frame_num = pic_num_extract(h, mmco[i].short_pic_num, &structure);
3317             pic = find_short(h, frame_num, &j);
3318             if(!pic){
3319                 if(mmco[i].opcode != MMCO_SHORT2LONG || !h->long_ref[mmco[i].long_arg]
3320                    || h->long_ref[mmco[i].long_arg]->frame_num != frame_num)
3321                 av_log(h->s.avctx, AV_LOG_ERROR, "mmco: unref short failure\n");
3322                 continue;
3323             }
3324         }
3325
3326         switch(mmco[i].opcode){
3327         case MMCO_SHORT2UNUSED:
3328             if(s->avctx->debug&FF_DEBUG_MMCO)
3329                 av_log(h->s.avctx, AV_LOG_DEBUG, "mmco: unref short %d count %d\n", h->mmco[i].short_pic_num, h->short_ref_count);
3330             remove_short(h, frame_num, structure ^ PICT_FRAME);
3331             break;
3332         case MMCO_SHORT2LONG:
3333                 if (h->long_ref[mmco[i].long_arg] != pic)
3334                     remove_long(h, mmco[i].long_arg, 0);
3335
3336                 remove_short_at_index(h, j);
3337                 h->long_ref[ mmco[i].long_arg ]= pic;
3338                 if (h->long_ref[ mmco[i].long_arg ]){
3339                     h->long_ref[ mmco[i].long_arg ]->long_ref=1;
3340                     h->long_ref_count++;
3341                 }
3342             break;
3343         case MMCO_LONG2UNUSED:
3344             j = pic_num_extract(h, mmco[i].long_arg, &structure);
3345             pic = h->long_ref[j];
3346             if (pic) {
3347                 remove_long(h, j, structure ^ PICT_FRAME);
3348             } else if(s->avctx->debug&FF_DEBUG_MMCO)
3349                 av_log(h->s.avctx, AV_LOG_DEBUG, "mmco: unref long failure\n");
3350             break;
3351         case MMCO_LONG:
3352                     // Comment below left from previous code as it is an interresting note.
3353                     /* First field in pair is in short term list or
3354                      * at a different long term index.
3355                      * This is not allowed; see 7.4.3.3, notes 2 and 3.
3356                      * Report the problem and keep the pair where it is,
3357                      * and mark this field valid.
3358                      */
3359
3360             if (h->long_ref[mmco[i].long_arg] != s->current_picture_ptr) {
3361                 remove_long(h, mmco[i].long_arg, 0);
3362
3363                 h->long_ref[ mmco[i].long_arg ]= s->current_picture_ptr;
3364                 h->long_ref[ mmco[i].long_arg ]->long_ref=1;
3365                 h->long_ref_count++;
3366             }
3367
3368             s->current_picture_ptr->reference |= s->picture_structure;
3369             current_ref_assigned=1;
3370             break;
3371         case MMCO_SET_MAX_LONG:
3372             assert(mmco[i].long_arg <= 16);
3373             // just remove the long term which index is greater than new max
3374             for(j = mmco[i].long_arg; j<16; j++){
3375                 remove_long(h, j, 0);
3376             }
3377             break;
3378         case MMCO_RESET:
3379             while(h->short_ref_count){
3380                 remove_short(h, h->short_ref[0]->frame_num, 0);
3381             }
3382             for(j = 0; j < 16; j++) {
3383                 remove_long(h, j, 0);
3384             }
3385             s->current_picture_ptr->poc=
3386             s->current_picture_ptr->field_poc[0]=
3387             s->current_picture_ptr->field_poc[1]=
3388             h->poc_lsb=
3389             h->poc_msb=
3390             h->frame_num=
3391             s->current_picture_ptr->frame_num= 0;
3392             break;
3393         default: assert(0);
3394         }
3395     }
3396
3397     if (!current_ref_assigned) {
3398         /* Second field of complementary field pair; the first field of
3399          * which is already referenced. If short referenced, it
3400          * should be first entry in short_ref. If not, it must exist
3401          * in long_ref; trying to put it on the short list here is an
3402          * error in the encoded bit stream (ref: 7.4.3.3, NOTE 2 and 3).
3403          */
3404         if (h->short_ref_count && h->short_ref[0] == s->current_picture_ptr) {
3405             /* Just mark the second field valid */
3406             s->current_picture_ptr->reference = PICT_FRAME;
3407         } else if (s->current_picture_ptr->long_ref) {
3408             av_log(h->s.avctx, AV_LOG_ERROR, "illegal short term reference "
3409                                              "assignment for second field "
3410                                              "in complementary field pair "
3411                                              "(first field is long term)\n");
3412         } else {
3413             pic= remove_short(h, s->current_picture_ptr->frame_num, 0);
3414             if(pic){
3415                 av_log(h->s.avctx, AV_LOG_ERROR, "illegal short term buffer state detected\n");
3416             }
3417
3418             if(h->short_ref_count)
3419                 memmove(&h->short_ref[1], &h->short_ref[0], h->short_ref_count*sizeof(Picture*));
3420
3421             h->short_ref[0]= s->current_picture_ptr;
3422             h->short_ref_count++;
3423             s->current_picture_ptr->reference |= s->picture_structure;
3424         }
3425     }
3426
3427     if (h->long_ref_count + h->short_ref_count > h->sps.ref_frame_count){
3428
3429         /* We have too many reference frames, probably due to corrupted
3430          * stream. Need to discard one frame. Prevents overrun of the
3431          * short_ref and long_ref buffers.
3432          */
3433         av_log(h->s.avctx, AV_LOG_ERROR,
3434                "number of reference frames exceeds max (probably "
3435                "corrupt input), discarding one\n");
3436
3437         if (h->long_ref_count && !h->short_ref_count) {
3438             for (i = 0; i < 16; ++i)
3439                 if (h->long_ref[i])
3440                     break;
3441
3442             assert(i < 16);
3443             remove_long(h, i, 0);
3444         } else {
3445             pic = h->short_ref[h->short_ref_count - 1];
3446             remove_short(h, pic->frame_num, 0);
3447         }
3448     }
3449
3450     print_short_term(h);
3451     print_long_term(h);
3452     return 0;
3453 }
3454
3455 static int decode_ref_pic_marking(H264Context *h, GetBitContext *gb){
3456     MpegEncContext * const s = &h->s;
3457     int i;
3458
3459     h->mmco_index= 0;
3460     if(h->nal_unit_type == NAL_IDR_SLICE){ //FIXME fields
3461         s->broken_link= get_bits1(gb) -1;
3462         if(get_bits1(gb)){
3463             h->mmco[0].opcode= MMCO_LONG;
3464             h->mmco[0].long_arg= 0;
3465             h->mmco_index= 1;
3466         }
3467     }else{
3468         if(get_bits1(gb)){ // adaptive_ref_pic_marking_mode_flag
3469             for(i= 0; i<MAX_MMCO_COUNT; i++) {
3470                 MMCOOpcode opcode= get_ue_golomb(gb);
3471
3472                 h->mmco[i].opcode= opcode;
3473                 if(opcode==MMCO_SHORT2UNUSED || opcode==MMCO_SHORT2LONG){
3474                     h->mmco[i].short_pic_num= (h->curr_pic_num - get_ue_golomb(gb) - 1) & (h->max_pic_num - 1);
3475 /*                    if(h->mmco[i].short_pic_num >= h->short_ref_count || h->short_ref[ h->mmco[i].short_pic_num ] == NULL){
3476                         av_log(s->avctx, AV_LOG_ERROR, "illegal short ref in memory management control operation %d\n", mmco);
3477                         return -1;
3478                     }*/
3479                 }
3480                 if(opcode==MMCO_SHORT2LONG || opcode==MMCO_LONG2UNUSED || opcode==MMCO_LONG || opcode==MMCO_SET_MAX_LONG){
3481                     unsigned int long_arg= get_ue_golomb(gb);
3482                     if(long_arg >= 32 || (long_arg >= 16 && !(opcode == MMCO_LONG2UNUSED && FIELD_PICTURE))){
3483                         av_log(h->s.avctx, AV_LOG_ERROR, "illegal long ref in memory management control operation %d\n", opcode);
3484                         return -1;
3485                     }
3486                     h->mmco[i].long_arg= long_arg;
3487                 }
3488
3489                 if(opcode > (unsigned)MMCO_LONG){
3490                     av_log(h->s.avctx, AV_LOG_ERROR, "illegal memory management control operation %d\n", opcode);
3491                     return -1;
3492                 }
3493                 if(opcode == MMCO_END)
3494                     break;
3495             }
3496             h->mmco_index= i;
3497         }else{
3498             assert(h->long_ref_count + h->short_ref_count <= h->sps.ref_frame_count);
3499
3500             if(h->short_ref_count && h->long_ref_count + h->short_ref_count == h->sps.ref_frame_count &&
3501                     !(FIELD_PICTURE && !s->first_field && s->current_picture_ptr->reference)) {
3502                 h->mmco[0].opcode= MMCO_SHORT2UNUSED;
3503                 h->mmco[0].short_pic_num= h->short_ref[ h->short_ref_count - 1 ]->frame_num;
3504                 h->mmco_index= 1;
3505                 if (FIELD_PICTURE) {
3506                     h->mmco[0].short_pic_num *= 2;
3507                     h->mmco[1].opcode= MMCO_SHORT2UNUSED;
3508                     h->mmco[1].short_pic_num= h->mmco[0].short_pic_num + 1;
3509                     h->mmco_index= 2;
3510                 }
3511             }
3512         }
3513     }
3514
3515     return 0;
3516 }
3517
3518 static int init_poc(H264Context *h){
3519     MpegEncContext * const s = &h->s;
3520     const int max_frame_num= 1<<h->sps.log2_max_frame_num;
3521     int field_poc[2];
3522     Picture *cur = s->current_picture_ptr;
3523
3524     h->frame_num_offset= h->prev_frame_num_offset;
3525     if(h->frame_num < h->prev_frame_num)
3526         h->frame_num_offset += max_frame_num;
3527
3528     if(h->sps.poc_type==0){
3529         const int max_poc_lsb= 1<<h->sps.log2_max_poc_lsb;
3530
3531         if     (h->poc_lsb < h->prev_poc_lsb && h->prev_poc_lsb - h->poc_lsb >= max_poc_lsb/2)
3532             h->poc_msb = h->prev_poc_msb + max_poc_lsb;
3533         else if(h->poc_lsb > h->prev_poc_lsb && h->prev_poc_lsb - h->poc_lsb < -max_poc_lsb/2)
3534             h->poc_msb = h->prev_poc_msb - max_poc_lsb;
3535         else
3536             h->poc_msb = h->prev_poc_msb;
3537 //printf("poc: %d %d\n", h->poc_msb, h->poc_lsb);
3538         field_poc[0] =
3539         field_poc[1] = h->poc_msb + h->poc_lsb;
3540         if(s->picture_structure == PICT_FRAME)
3541             field_poc[1] += h->delta_poc_bottom;
3542     }else if(h->sps.poc_type==1){
3543         int abs_frame_num, expected_delta_per_poc_cycle, expectedpoc;
3544         int i;
3545
3546         if(h->sps.poc_cycle_length != 0)
3547             abs_frame_num = h->frame_num_offset + h->frame_num;
3548         else
3549             abs_frame_num = 0;
3550
3551         if(h->nal_ref_idc==0 && abs_frame_num > 0)
3552             abs_frame_num--;
3553
3554         expected_delta_per_poc_cycle = 0;
3555         for(i=0; i < h->sps.poc_cycle_length; i++)
3556             expected_delta_per_poc_cycle += h->sps.offset_for_ref_frame[ i ]; //FIXME integrate during sps parse
3557
3558         if(abs_frame_num > 0){
3559             int poc_cycle_cnt          = (abs_frame_num - 1) / h->sps.poc_cycle_length;
3560             int frame_num_in_poc_cycle = (abs_frame_num - 1) % h->sps.poc_cycle_length;
3561
3562             expectedpoc = poc_cycle_cnt * expected_delta_per_poc_cycle;
3563             for(i = 0; i <= frame_num_in_poc_cycle; i++)
3564                 expectedpoc = expectedpoc + h->sps.offset_for_ref_frame[ i ];
3565         } else
3566             expectedpoc = 0;
3567
3568         if(h->nal_ref_idc == 0)
3569             expectedpoc = expectedpoc + h->sps.offset_for_non_ref_pic;
3570
3571         field_poc[0] = expectedpoc + h->delta_poc[0];
3572         field_poc[1] = field_poc[0] + h->sps.offset_for_top_to_bottom_field;
3573
3574         if(s->picture_structure == PICT_FRAME)
3575             field_poc[1] += h->delta_poc[1];
3576     }else{
3577         int poc= 2*(h->frame_num_offset + h->frame_num);
3578
3579         if(!h->nal_ref_idc)
3580             poc--;
3581
3582         field_poc[0]= poc;
3583         field_poc[1]= poc;
3584     }
3585
3586     if(s->picture_structure != PICT_BOTTOM_FIELD)
3587         s->current_picture_ptr->field_poc[0]= field_poc[0];
3588     if(s->picture_structure != PICT_TOP_FIELD)
3589         s->current_picture_ptr->field_poc[1]= field_poc[1];
3590     cur->poc= FFMIN(cur->field_poc[0], cur->field_poc[1]);
3591
3592     return 0;
3593 }
3594
3595
3596 /**
3597  * initialize scan tables
3598  */
3599 static void init_scan_tables(H264Context *h){
3600     MpegEncContext * const s = &h->s;
3601     int i;
3602     if(s->dsp.h264_idct_add == ff_h264_idct_add_c){ //FIXME little ugly
3603         memcpy(h->zigzag_scan, zigzag_scan, 16*sizeof(uint8_t));
3604         memcpy(h-> field_scan,  field_scan, 16*sizeof(uint8_t));
3605     }else{
3606         for(i=0; i<16; i++){
3607 #define T(x) (x>>2) | ((x<<2) & 0xF)
3608             h->zigzag_scan[i] = T(zigzag_scan[i]);
3609             h-> field_scan[i] = T( field_scan[i]);
3610 #undef T
3611         }
3612     }
3613     if(s->dsp.h264_idct8_add == ff_h264_idct8_add_c){
3614         memcpy(h->zigzag_scan8x8,       zigzag_scan8x8,       64*sizeof(uint8_t));
3615         memcpy(h->zigzag_scan8x8_cavlc, zigzag_scan8x8_cavlc, 64*sizeof(uint8_t));
3616         memcpy(h->field_scan8x8,        field_scan8x8,        64*sizeof(uint8_t));
3617         memcpy(h->field_scan8x8_cavlc,  field_scan8x8_cavlc,  64*sizeof(uint8_t));
3618     }else{
3619         for(i=0; i<64; i++){
3620 #define T(x) (x>>3) | ((x&7)<<3)
3621             h->zigzag_scan8x8[i]       = T(zigzag_scan8x8[i]);
3622             h->zigzag_scan8x8_cavlc[i] = T(zigzag_scan8x8_cavlc[i]);
3623             h->field_scan8x8[i]        = T(field_scan8x8[i]);
3624             h->field_scan8x8_cavlc[i]  = T(field_scan8x8_cavlc[i]);
3625 #undef T
3626         }
3627     }
3628     if(h->sps.transform_bypass){ //FIXME same ugly
3629         h->zigzag_scan_q0          = zigzag_scan;
3630         h->zigzag_scan8x8_q0       = zigzag_scan8x8;
3631         h->zigzag_scan8x8_cavlc_q0 = zigzag_scan8x8_cavlc;
3632         h->field_scan_q0           = field_scan;
3633         h->field_scan8x8_q0        = field_scan8x8;
3634         h->field_scan8x8_cavlc_q0  = field_scan8x8_cavlc;
3635     }else{
3636         h->zigzag_scan_q0          = h->zigzag_scan;
3637         h->zigzag_scan8x8_q0       = h->zigzag_scan8x8;
3638         h->zigzag_scan8x8_cavlc_q0 = h->zigzag_scan8x8_cavlc;
3639         h->field_scan_q0           = h->field_scan;
3640         h->field_scan8x8_q0        = h->field_scan8x8;
3641         h->field_scan8x8_cavlc_q0  = h->field_scan8x8_cavlc;
3642     }
3643 }
3644
3645 /**
3646  * Replicates H264 "master" context to thread contexts.
3647  */
3648 static void clone_slice(H264Context *dst, H264Context *src)
3649 {
3650     memcpy(dst->block_offset,     src->block_offset, sizeof(dst->block_offset));
3651     dst->s.current_picture_ptr  = src->s.current_picture_ptr;
3652     dst->s.current_picture      = src->s.current_picture;
3653     dst->s.linesize             = src->s.linesize;
3654     dst->s.uvlinesize           = src->s.uvlinesize;
3655     dst->s.first_field          = src->s.first_field;
3656
3657     dst->prev_poc_msb           = src->prev_poc_msb;
3658     dst->prev_poc_lsb           = src->prev_poc_lsb;
3659     dst->prev_frame_num_offset  = src->prev_frame_num_offset;
3660     dst->prev_frame_num         = src->prev_frame_num;
3661     dst->short_ref_count        = src->short_ref_count;
3662
3663     memcpy(dst->short_ref,        src->short_ref,        sizeof(dst->short_ref));
3664     memcpy(dst->long_ref,         src->long_ref,         sizeof(dst->long_ref));
3665     memcpy(dst->default_ref_list, src->default_ref_list, sizeof(dst->default_ref_list));
3666     memcpy(dst->ref_list,         src->ref_list,         sizeof(dst->ref_list));
3667
3668     memcpy(dst->dequant4_coeff,   src->dequant4_coeff,   sizeof(src->dequant4_coeff));
3669     memcpy(dst->dequant8_coeff,   src->dequant8_coeff,   sizeof(src->dequant8_coeff));
3670 }
3671
3672 /**
3673  * decodes a slice header.
3674  * This will also call MPV_common_init() and frame_start() as needed.
3675  *
3676  * @param h h264context
3677  * @param h0 h264 master context (differs from 'h' when doing sliced based parallel decoding)
3678  *
3679  * @return 0 if okay, <0 if an error occurred, 1 if decoding must not be multithreaded
3680  */
3681 static int decode_slice_header(H264Context *h, H264Context *h0){
3682     MpegEncContext * const s = &h->s;
3683     MpegEncContext * const s0 = &h0->s;
3684     unsigned int first_mb_in_slice;
3685     unsigned int pps_id;
3686     int num_ref_idx_active_override_flag;
3687     static const uint8_t slice_type_map[5]= {FF_P_TYPE, FF_B_TYPE, FF_I_TYPE, FF_SP_TYPE, FF_SI_TYPE};
3688     unsigned int slice_type, tmp, i, j;
3689     int default_ref_list_done = 0;
3690     int last_pic_structure;
3691
3692     s->dropable= h->nal_ref_idc == 0;
3693
3694     if((s->avctx->flags2 & CODEC_FLAG2_FAST) && !h->nal_ref_idc){
3695         s->me.qpel_put= s->dsp.put_2tap_qpel_pixels_tab;
3696         s->me.qpel_avg= s->dsp.avg_2tap_qpel_pixels_tab;
3697     }else{
3698         s->me.qpel_put= s->dsp.put_h264_qpel_pixels_tab;
3699         s->me.qpel_avg= s->dsp.avg_h264_qpel_pixels_tab;
3700     }
3701
3702     first_mb_in_slice= get_ue_golomb(&s->gb);
3703
3704     if((s->flags2 & CODEC_FLAG2_CHUNKS) && first_mb_in_slice == 0){
3705         h0->current_slice = 0;
3706         if (!s0->first_field)
3707             s->current_picture_ptr= NULL;
3708     }
3709
3710     slice_type= get_ue_golomb(&s->gb);
3711     if(slice_type > 9){
3712         av_log(h->s.avctx, AV_LOG_ERROR, "slice type too large (%d) at %d %d\n", h->slice_type, s->mb_x, s->mb_y);
3713         return -1;
3714     }
3715     if(slice_type > 4){
3716         slice_type -= 5;
3717         h->slice_type_fixed=1;
3718     }else
3719         h->slice_type_fixed=0;
3720
3721     slice_type= slice_type_map[ slice_type ];
3722     if (slice_type == FF_I_TYPE
3723         || (h0->current_slice != 0 && slice_type == h0->last_slice_type) ) {
3724         default_ref_list_done = 1;
3725     }
3726     h->slice_type= slice_type;
3727     h->slice_type_nos= slice_type & 3;
3728
3729     s->pict_type= h->slice_type; // to make a few old functions happy, it's wrong though
3730     if (s->pict_type == FF_B_TYPE && s0->last_picture_ptr == NULL) {
3731         av_log(h->s.avctx, AV_LOG_ERROR,
3732                "B picture before any references, skipping\n");
3733         return -1;
3734     }
3735
3736     pps_id= get_ue_golomb(&s->gb);
3737     if(pps_id>=MAX_PPS_COUNT){
3738         av_log(h->s.avctx, AV_LOG_ERROR, "pps_id out of range\n");
3739         return -1;
3740     }
3741     if(!h0->pps_buffers[pps_id]) {
3742         av_log(h->s.avctx, AV_LOG_ERROR, "non-existing PPS referenced\n");
3743         return -1;
3744     }
3745     h->pps= *h0->pps_buffers[pps_id];
3746
3747     if(!h0->sps_buffers[h->pps.sps_id]) {
3748         av_log(h->s.avctx, AV_LOG_ERROR, "non-existing SPS referenced\n");
3749         return -1;
3750     }
3751     h->sps = *h0->sps_buffers[h->pps.sps_id];
3752
3753     if(h == h0 && h->dequant_coeff_pps != pps_id){
3754         h->dequant_coeff_pps = pps_id;
3755         init_dequant_tables(h);
3756     }
3757
3758     s->mb_width= h->sps.mb_width;
3759     s->mb_height= h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag);
3760
3761     h->b_stride=  s->mb_width*4;
3762     h->b8_stride= s->mb_width*2;
3763
3764     s->width = 16*s->mb_width - 2*FFMIN(h->sps.crop_right, 7);
3765     if(h->sps.frame_mbs_only_flag)
3766         s->height= 16*s->mb_height - 2*FFMIN(h->sps.crop_bottom, 7);
3767     else
3768         s->height= 16*s->mb_height - 4*FFMIN(h->sps.crop_bottom, 3);
3769
3770     if (s->context_initialized
3771         && (   s->width != s->avctx->width || s->height != s->avctx->height)) {
3772         if(h != h0)
3773             return -1;   // width / height changed during parallelized decoding
3774         free_tables(h);
3775         MPV_common_end(s);
3776     }
3777     if (!s->context_initialized) {
3778         if(h != h0)
3779             return -1;  // we cant (re-)initialize context during parallel decoding
3780         if (MPV_common_init(s) < 0)
3781             return -1;
3782         s->first_field = 0;
3783
3784         init_scan_tables(h);
3785         alloc_tables(h);
3786
3787         for(i = 1; i < s->avctx->thread_count; i++) {
3788             H264Context *c;
3789             c = h->thread_context[i] = av_malloc(sizeof(H264Context));
3790             memcpy(c, h->s.thread_context[i], sizeof(MpegEncContext));
3791             memset(&c->s + 1, 0, sizeof(H264Context) - sizeof(MpegEncContext));
3792             c->sps = h->sps;
3793             c->pps = h->pps;
3794             init_scan_tables(c);
3795             clone_tables(c, h);
3796         }
3797
3798         for(i = 0; i < s->avctx->thread_count; i++)
3799             if(context_init(h->thread_context[i]) < 0)
3800                 return -1;
3801
3802         s->avctx->width = s->width;
3803         s->avctx->height = s->height;
3804         s->avctx->sample_aspect_ratio= h->sps.sar;
3805         if(!s->avctx->sample_aspect_ratio.den)
3806             s->avctx->sample_aspect_ratio.den = 1;
3807
3808         if(h->sps.timing_info_present_flag){
3809             s->avctx->time_base= (AVRational){h->sps.num_units_in_tick * 2, h->sps.time_scale};
3810             if(h->x264_build > 0 && h->x264_build < 44)
3811                 s->avctx->time_base.den *= 2;
3812             av_reduce(&s->avctx->time_base.num, &s->avctx->time_base.den,
3813                       s->avctx->time_base.num, s->avctx->time_base.den, 1<<30);
3814         }
3815     }
3816
3817     h->frame_num= get_bits(&s->gb, h->sps.log2_max_frame_num);
3818
3819     h->mb_mbaff = 0;
3820     h->mb_aff_frame = 0;
3821     last_pic_structure = s0->picture_structure;
3822     if(h->sps.frame_mbs_only_flag){
3823         s->picture_structure= PICT_FRAME;
3824     }else{
3825         if(get_bits1(&s->gb)) { //field_pic_flag
3826             s->picture_structure= PICT_TOP_FIELD + get_bits1(&s->gb); //bottom_field_flag
3827         } else {
3828             s->picture_structure= PICT_FRAME;
3829             h->mb_aff_frame = h->sps.mb_aff;
3830         }
3831     }
3832     h->mb_field_decoding_flag= s->picture_structure != PICT_FRAME;
3833
3834     if(h0->current_slice == 0){
3835         while(h->frame_num !=  h->prev_frame_num &&
3836               h->frame_num != (h->prev_frame_num+1)%(1<<h->sps.log2_max_frame_num)){
3837             av_log(NULL, AV_LOG_DEBUG, "Frame num gap %d %d\n", h->frame_num, h->prev_frame_num);
3838             frame_start(h);
3839             h->prev_frame_num++;
3840             h->prev_frame_num %= 1<<h->sps.log2_max_frame_num;
3841             s->current_picture_ptr->frame_num= h->prev_frame_num;
3842             execute_ref_pic_marking(h, NULL, 0);
3843         }
3844
3845         /* See if we have a decoded first field looking for a pair... */
3846         if (s0->first_field) {
3847             assert(s0->current_picture_ptr);
3848             assert(s0->current_picture_ptr->data[0]);
3849             assert(s0->current_picture_ptr->reference != DELAYED_PIC_REF);
3850
3851             /* figure out if we have a complementary field pair */
3852             if (!FIELD_PICTURE || s->picture_structure == last_pic_structure) {
3853                 /*
3854                  * Previous field is unmatched. Don't display it, but let it
3855                  * remain for reference if marked as such.
3856                  */
3857                 s0->current_picture_ptr = NULL;
3858                 s0->first_field = FIELD_PICTURE;
3859
3860             } else {
3861                 if (h->nal_ref_idc &&
3862                         s0->current_picture_ptr->reference &&
3863                         s0->current_picture_ptr->frame_num != h->frame_num) {
3864                     /*
3865                      * This and previous field were reference, but had
3866                      * different frame_nums. Consider this field first in
3867                      * pair. Throw away previous field except for reference
3868                      * purposes.
3869                      */
3870                     s0->first_field = 1;
3871                     s0->current_picture_ptr = NULL;
3872
3873                 } else {
3874                     /* Second field in complementary pair */
3875                     s0->first_field = 0;
3876                 }
3877             }
3878
3879         } else {
3880             /* Frame or first field in a potentially complementary pair */
3881             assert(!s0->current_picture_ptr);
3882             s0->first_field = FIELD_PICTURE;
3883         }
3884
3885         if((!FIELD_PICTURE || s0->first_field) && frame_start(h) < 0) {
3886             s0->first_field = 0;
3887             return -1;
3888         }
3889     }
3890     if(h != h0)
3891         clone_slice(h, h0);
3892
3893     s->current_picture_ptr->frame_num= h->frame_num; //FIXME frame_num cleanup
3894
3895     assert(s->mb_num == s->mb_width * s->mb_height);
3896     if(first_mb_in_slice << FIELD_OR_MBAFF_PICTURE >= s->mb_num ||
3897        first_mb_in_slice                    >= s->mb_num){
3898         av_log(h->s.avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n");
3899         return -1;
3900     }
3901     s->resync_mb_x = s->mb_x = first_mb_in_slice % s->mb_width;
3902     s->resync_mb_y = s->mb_y = (first_mb_in_slice / s->mb_width) << FIELD_OR_MBAFF_PICTURE;
3903     if (s->picture_structure == PICT_BOTTOM_FIELD)
3904         s->resync_mb_y = s->mb_y = s->mb_y + 1;
3905     assert(s->mb_y < s->mb_height);
3906
3907     if(s->picture_structure==PICT_FRAME){
3908         h->curr_pic_num=   h->frame_num;
3909         h->max_pic_num= 1<< h->sps.log2_max_frame_num;
3910     }else{
3911         h->curr_pic_num= 2*h->frame_num + 1;
3912         h->max_pic_num= 1<<(h->sps.log2_max_frame_num + 1);
3913     }
3914
3915     if(h->nal_unit_type == NAL_IDR_SLICE){
3916         get_ue_golomb(&s->gb); /* idr_pic_id */
3917     }
3918
3919     if(h->sps.poc_type==0){
3920         h->poc_lsb= get_bits(&s->gb, h->sps.log2_max_poc_lsb);
3921
3922         if(h->pps.pic_order_present==1 && s->picture_structure==PICT_FRAME){
3923             h->delta_poc_bottom= get_se_golomb(&s->gb);
3924         }
3925     }
3926
3927     if(h->sps.poc_type==1 && !h->sps.delta_pic_order_always_zero_flag){
3928         h->delta_poc[0]= get_se_golomb(&s->gb);
3929
3930         if(h->pps.pic_order_present==1 && s->picture_structure==PICT_FRAME)
3931             h->delta_poc[1]= get_se_golomb(&s->gb);
3932     }
3933
3934     init_poc(h);
3935
3936     if(h->pps.redundant_pic_cnt_present){
3937         h->redundant_pic_count= get_ue_golomb(&s->gb);
3938     }
3939
3940     //set defaults, might be overridden a few lines later
3941     h->ref_count[0]= h->pps.ref_count[0];
3942     h->ref_count[1]= h->pps.ref_count[1];
3943
3944     if(h->slice_type_nos != FF_I_TYPE){
3945         if(h->slice_type_nos == FF_B_TYPE){
3946             h->direct_spatial_mv_pred= get_bits1(&s->gb);
3947         }
3948         num_ref_idx_active_override_flag= get_bits1(&s->gb);
3949
3950         if(num_ref_idx_active_override_flag){
3951             h->ref_count[0]= get_ue_golomb(&s->gb) + 1;
3952             if(h->slice_type_nos==FF_B_TYPE)
3953                 h->ref_count[1]= get_ue_golomb(&s->gb) + 1;
3954
3955             if(h->ref_count[0]-1 > 32-1 || h->ref_count[1]-1 > 32-1){
3956                 av_log(h->s.avctx, AV_LOG_ERROR, "reference overflow\n");
3957                 h->ref_count[0]= h->ref_count[1]= 1;
3958                 return -1;
3959             }
3960         }
3961         if(h->slice_type_nos == FF_B_TYPE)
3962             h->list_count= 2;
3963         else
3964             h->list_count= 1;
3965     }else
3966         h->list_count= 0;
3967
3968     if(!default_ref_list_done){
3969         fill_default_ref_list(h);
3970     }
3971
3972     if(h->slice_type_nos!=FF_I_TYPE && decode_ref_pic_list_reordering(h) < 0)
3973         return -1;
3974
3975     if(   (h->pps.weighted_pred          && h->slice_type_nos == FF_P_TYPE )
3976        ||  (h->pps.weighted_bipred_idc==1 && h->slice_type_nos== FF_B_TYPE ) )
3977         pred_weight_table(h);
3978     else if(h->pps.weighted_bipred_idc==2 && h->slice_type_nos== FF_B_TYPE)
3979         implicit_weight_table(h);
3980     else
3981         h->use_weight = 0;
3982
3983     if(h->nal_ref_idc)
3984         decode_ref_pic_marking(h0, &s->gb);
3985
3986     if(FRAME_MBAFF)
3987         fill_mbaff_ref_list(h);
3988
3989     if( h->slice_type_nos != FF_I_TYPE && h->pps.cabac ){
3990         tmp = get_ue_golomb(&s->gb);
3991         if(tmp > 2){
3992             av_log(s->avctx, AV_LOG_ERROR, "cabac_init_idc overflow\n");
3993             return -1;
3994         }
3995         h->cabac_init_idc= tmp;
3996     }
3997
3998     h->last_qscale_diff = 0;
3999     tmp = h->pps.init_qp + get_se_golomb(&s->gb);
4000     if(tmp>51){
4001         av_log(s->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp);
4002         return -1;
4003     }
4004     s->qscale= tmp;
4005     h->chroma_qp[0] = get_chroma_qp(h, 0, s->qscale);
4006     h->chroma_qp[1] = get_chroma_qp(h, 1, s->qscale);
4007     //FIXME qscale / qp ... stuff
4008     if(h->slice_type == FF_SP_TYPE){
4009         get_bits1(&s->gb); /* sp_for_switch_flag */
4010     }
4011     if(h->slice_type==FF_SP_TYPE || h->slice_type == FF_SI_TYPE){
4012         get_se_golomb(&s->gb); /* slice_qs_delta */
4013     }
4014
4015     h->deblocking_filter = 1;
4016     h->slice_alpha_c0_offset = 0;
4017     h->slice_beta_offset = 0;
4018     if( h->pps.deblocking_filter_parameters_present ) {
4019         tmp= get_ue_golomb(&s->gb);
4020         if(tmp > 2){
4021             av_log(s->avctx, AV_LOG_ERROR, "deblocking_filter_idc %u out of range\n", tmp);
4022             return -1;
4023         }
4024         h->deblocking_filter= tmp;
4025         if(h->deblocking_filter < 2)
4026             h->deblocking_filter^= 1; // 1<->0
4027
4028         if( h->deblocking_filter ) {
4029             h->slice_alpha_c0_offset = get_se_golomb(&s->gb) << 1;
4030             h->slice_beta_offset = get_se_golomb(&s->gb) << 1;
4031         }
4032     }
4033
4034     if(   s->avctx->skip_loop_filter >= AVDISCARD_ALL
4035        ||(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY && h->slice_type_nos != FF_I_TYPE)
4036        ||(s->avctx->skip_loop_filter >= AVDISCARD_BIDIR  && h->slice_type_nos == FF_B_TYPE)
4037        ||(s->avctx->skip_loop_filter >= AVDISCARD_NONREF && h->nal_ref_idc == 0))
4038         h->deblocking_filter= 0;
4039
4040     if(h->deblocking_filter == 1 && h0->max_contexts > 1) {
4041         if(s->avctx->flags2 & CODEC_FLAG2_FAST) {
4042             /* Cheat slightly for speed:
4043                Do not bother to deblock across slices. */
4044             h->deblocking_filter