int i_mb_4x4 = 16 * h->mb.i_mb_stride * h->mb.i_mb_y + 4 * h->mb.i_mb_x;
int i_mb_8x8 = 4 * h->mb.i_mb_stride * h->mb.i_mb_y + 2 * h->mb.i_mb_x;
int i8, i4;
- int b8x8;
const int type_col = h->fref1[0]->mb_type[ h->mb.i_mb_xy ];
x264_macroblock_cache_ref( h, 0, 0, 4, 4, 1, 0 );
x264_macroblock_cache_mv( h, 0, 0, 4, 4, 1, 0 );
return 1;
}
- b8x8 = h->sps->b_direct8x8_inference ||
- (type_col != P_8x8 && type_col != B_SKIP && type_col != B_DIRECT && type_col != B_8x8);
for( i8 = 0; i8 < 4; i8++ )
{
if( i_ref >= 0 )
{
const int dist_scale_factor = h->mb.dist_scale_factor[i_ref][0];
-
+ const int16_t *mv_col = h->fref1[0]->mv[0][ i_mb_4x4 + 3*x8 + 3*y8 * h->mb.i_b4_stride];
+ const int l0x = ( dist_scale_factor * mv_col[0] + 128 ) >> 8;
+ const int l0y = ( dist_scale_factor * mv_col[1] + 128 ) >> 8;
x264_macroblock_cache_ref( h, 2*x8, 2*y8, 2, 2, 0, i_ref );
-
- if( b8x8 )
- {
- const int16_t *mv_col = h->fref1[0]->mv[0][ i_mb_4x4 + 3*x8 + 3*y8 * h->mb.i_b4_stride];
- const int l0x = ( dist_scale_factor * mv_col[0] + 128 ) >> 8;
- const int l0y = ( dist_scale_factor * mv_col[1] + 128 ) >> 8;
- x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, 0, pack16to32_mask(l0x, l0y) );
- x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, 1, pack16to32_mask(l0x-mv_col[0], l0y-mv_col[1]) );
- }
- else
- {
- for( i4 = 0; i4 < 4; i4++ )
- {
- const int x4 = i4%2 + 2*x8;
- const int y4 = i4/2 + 2*y8;
- const int16_t *mv_col = h->fref1[0]->mv[0][ i_mb_4x4 + x4 + y4 * h->mb.i_b4_stride ];
- const int l0x = ( dist_scale_factor * mv_col[0] + 128 ) >> 8;
- const int l0y = ( dist_scale_factor * mv_col[1] + 128 ) >> 8;
- x264_macroblock_cache_mv( h, x4, y4, 1, 1, 0, pack16to32_mask(l0x, l0y) );
- x264_macroblock_cache_mv( h, x4, y4, 1, 1, 1, pack16to32_mask(l0x-mv_col[0], l0y-mv_col[1]) );
- }
- }
+ x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, 0, pack16to32_mask(l0x, l0y) );
+ x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, 1, pack16to32_mask(l0x-mv_col[0], l0y-mv_col[1]) );
}
else
{
if( h->param.i_threads > 1 )
{
- int di = b8x8 ? 4 : 1;
- for( i4=0; i4<16; i4+=di )
+ for( i4=0; i4<16; i4+=4 )
{
if( h->mb.cache.mv[0][x264_scan8[i4]][1] > h->mb.mv_max_spel[1]
|| h->mb.cache.mv[1][x264_scan8[i4]][1] > h->mb.mv_max_spel[1] )
int ref[2];
DECLARE_ALIGNED_8( int16_t mv[2][2] );
int i_list;
- int i8, i4;
- int b8x8;
+ int i8;
const int8_t *l1ref0 = &h->fref1[0]->ref[0][ h->mb.i_b8_xy ];
const int8_t *l1ref1 = &h->fref1[0]->ref[1][ h->mb.i_b8_xy ];
const int16_t (*l1mv0)[2] = (const int16_t (*)[2]) &h->fref1[0]->mv[0][ h->mb.i_b4_xy ];
if( IS_INTRA( type_col ) || (ref[0]&&ref[1]) )
return 1;
- b8x8 = h->sps->b_direct8x8_inference ||
- (type_col != P_8x8 && type_col != B_SKIP && type_col != B_DIRECT && type_col != B_8x8);
-
/* col_zero_flag */
for( i8=0; i8<4; i8++ )
{
if( l1ref0[o8] == 0 || ( l1ref0[o8] < 0 && l1ref1[o8] == 0 ) )
{
const int16_t (*l1mv)[2] = (l1ref0[o8] == 0) ? l1mv0 : l1mv1;
- if( b8x8 )
+ const int16_t *mvcol = l1mv[3*x8 + 3*y8 * h->mb.i_b4_stride];
+ if( abs( mvcol[0] ) <= 1 && abs( mvcol[1] ) <= 1 )
{
- const int16_t *mvcol = l1mv[3*x8 + 3*y8 * h->mb.i_b4_stride];
- if( abs( mvcol[0] ) <= 1 && abs( mvcol[1] ) <= 1 )
- {
- if( ref[0] == 0 )
- x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, 0, 0 );
- if( ref[1] == 0 )
- x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, 1, 0 );
- }
- }
- else
- {
- for( i4=0; i4<4; i4++ )
- {
- const int x4 = i4%2 + 2*x8;
- const int y4 = i4/2 + 2*y8;
- const int16_t *mvcol = l1mv[x4 + y4 * h->mb.i_b4_stride];
- if( abs( mvcol[0] ) <= 1 && abs( mvcol[1] ) <= 1 )
- {
- if( ref[0] == 0 )
- x264_macroblock_cache_mv( h, x4, y4, 1, 1, 0, 0 );
- if( ref[1] == 0 )
- x264_macroblock_cache_mv( h, x4, y4, 1, 1, 1, 0 );
- }
- }
+ if( ref[0] == 0 )
+ x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, 0, 0 );
+ if( ref[1] == 0 )
+ x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, 1, 0 );
}
}
}
{
const int i8 = x264_scan8[0] + x + 8*y;
- /* FIXME: optimize based on current block size, not global settings? */
- if( h->sps->b_direct8x8_inference )
- {
- if( h->mb.cache.ref[0][i8] >= 0 )
- if( h->mb.cache.ref[1][i8] >= 0 )
- x264_mb_mc_01xywh( h, x, y, 2, 2 );
- else
- x264_mb_mc_0xywh( h, x, y, 2, 2 );
+ if( h->mb.cache.ref[0][i8] >= 0 )
+ if( h->mb.cache.ref[1][i8] >= 0 )
+ x264_mb_mc_01xywh( h, x, y, 2, 2 );
else
- x264_mb_mc_1xywh( h, x, y, 2, 2 );
- }
+ x264_mb_mc_0xywh( h, x, y, 2, 2 );
else
- {
- if( h->mb.cache.ref[0][i8] >= 0 )
- {
- if( h->mb.cache.ref[1][i8] >= 0 )
- {
- x264_mb_mc_01xywh( h, x+0, y+0, 1, 1 );
- x264_mb_mc_01xywh( h, x+1, y+0, 1, 1 );
- x264_mb_mc_01xywh( h, x+0, y+1, 1, 1 );
- x264_mb_mc_01xywh( h, x+1, y+1, 1, 1 );
- }
- else
- {
- x264_mb_mc_0xywh( h, x+0, y+0, 1, 1 );
- x264_mb_mc_0xywh( h, x+1, y+0, 1, 1 );
- x264_mb_mc_0xywh( h, x+0, y+1, 1, 1 );
- x264_mb_mc_0xywh( h, x+1, y+1, 1, 1 );
- }
- }
- else
- {
- x264_mb_mc_1xywh( h, x+0, y+0, 1, 1 );
- x264_mb_mc_1xywh( h, x+1, y+0, 1, 1 );
- x264_mb_mc_1xywh( h, x+0, y+1, 1, 1 );
- x264_mb_mc_1xywh( h, x+1, y+1, 1, 1 );
- }
- }
+ x264_mb_mc_1xywh( h, x, y, 2, 2 );
}
void x264_mb_mc_8x8( x264_t *h, int i8 )
case D_L1_8x8:
x264_mb_mc_1xywh( h, x, y, 2, 2 );
break;
- case D_L1_8x4:
- x264_mb_mc_1xywh( h, x, y+0, 2, 1 );
- x264_mb_mc_1xywh( h, x, y+1, 2, 1 );
- break;
- case D_L1_4x8:
- x264_mb_mc_1xywh( h, x+0, y, 1, 2 );
- x264_mb_mc_1xywh( h, x+1, y, 1, 2 );
- break;
- case D_L1_4x4:
- x264_mb_mc_1xywh( h, x+0, y+0, 1, 1 );
- x264_mb_mc_1xywh( h, x+1, y+0, 1, 1 );
- x264_mb_mc_1xywh( h, x+0, y+1, 1, 1 );
- x264_mb_mc_1xywh( h, x+1, y+1, 1, 1 );
- break;
case D_BI_8x8:
x264_mb_mc_01xywh( h, x, y, 2, 2 );
break;
- case D_BI_8x4:
- x264_mb_mc_01xywh( h, x, y+0, 2, 1 );
- x264_mb_mc_01xywh( h, x, y+1, 2, 1 );
- break;
- case D_BI_4x8:
- x264_mb_mc_01xywh( h, x+0, y, 1, 2 );
- x264_mb_mc_01xywh( h, x+1, y, 1, 2 );
- break;
- case D_BI_4x4:
- x264_mb_mc_01xywh( h, x+0, y+0, 1, 1 );
- x264_mb_mc_01xywh( h, x+1, y+0, 1, 1 );
- x264_mb_mc_01xywh( h, x+0, y+1, 1, 1 );
- x264_mb_mc_01xywh( h, x+1, y+1, 1, 1 );
- break;
case D_DIRECT_8x8:
x264_mb_mc_direct8x8( h, x, y );
break;