SSSE3/SSE4/AVX 9-way fully merged i8x8 analysis (sa8d_x9)
[x262.git] / common / mc.c
index 96cc650..c2b77f5 100644 (file)
@@ -1,7 +1,7 @@
 /*****************************************************************************
  * mc.c: motion compensation
  *****************************************************************************
- * Copyright (C) 2003-2010 x264 project
+ * Copyright (C) 2003-2011 x264 project
  *
  * Authors: Laurent Aimar <fenrir@via.ecp.fr>
  *          Loren Merritt <lorenm@u.washington.edu>
@@ -90,9 +90,11 @@ PIXEL_AVG_C( pixel_avg_16x8,  16, 8 )
 PIXEL_AVG_C( pixel_avg_8x16,  8, 16 )
 PIXEL_AVG_C( pixel_avg_8x8,   8, 8 )
 PIXEL_AVG_C( pixel_avg_8x4,   8, 4 )
+PIXEL_AVG_C( pixel_avg_4x16,  4, 16 )
 PIXEL_AVG_C( pixel_avg_4x8,   4, 8 )
 PIXEL_AVG_C( pixel_avg_4x4,   4, 4 )
 PIXEL_AVG_C( pixel_avg_4x2,   4, 2 )
+PIXEL_AVG_C( pixel_avg_2x8,   2, 8 )
 PIXEL_AVG_C( pixel_avg_2x4,   2, 4 )
 PIXEL_AVG_C( pixel_avg_2x2,   2, 2 )
 
@@ -143,7 +145,7 @@ static weight_fn_t x264_mc_weight_wtab[6] =
     mc_weight_w16,
     mc_weight_w20,
 };
-const x264_weight_t weight_none[3] = { {{0}} };
+const x264_weight_t x264_weight_none[3] = { {{0}} };
 static void mc_copy( pixel *src, int i_src_stride, pixel *dst, int i_dst_stride, int i_width, int i_height )
 {
     for( int y = 0; y < i_height; y++ )
@@ -280,7 +282,7 @@ MC_COPY( 8 )
 MC_COPY( 4 )
 
 void x264_plane_copy_c( pixel *dst, int i_dst,
-                        uint8_t *src, int i_src, int w, int h )
+                        pixel *src, int i_src, int w, int h )
 {
     while( h-- )
     {
@@ -314,9 +316,25 @@ void x264_plane_copy_deinterleave_c( pixel *dstu, int i_dstu,
         }
 }
 
-static void store_interleave_8x8x2( pixel *dst, int i_dst, pixel *srcu, pixel *srcv )
+void x264_plane_copy_deinterleave_rgb_c( pixel *dsta, int i_dsta,
+                                         pixel *dstb, int i_dstb,
+                                         pixel *dstc, int i_dstc,
+                                         pixel *src, int i_src, int pw, int w, int h )
 {
-    for( int y=0; y<8; y++, dst+=i_dst, srcu+=FDEC_STRIDE, srcv+=FDEC_STRIDE )
+    for( int y=0; y<h; y++, dsta+=i_dsta, dstb+=i_dstb, dstc+=i_dstc, src+=i_src )
+    {
+        for( int x=0; x<w; x++ )
+        {
+            dsta[x] = src[x*pw];
+            dstb[x] = src[x*pw+1];
+            dstc[x] = src[x*pw+2];
+        }
+    }
+}
+
+static void store_interleave_chroma( pixel *dst, int i_dst, pixel *srcu, pixel *srcv, int height )
+{
+    for( int y=0; y<height; y++, dst+=i_dst, srcu+=FDEC_STRIDE, srcv+=FDEC_STRIDE )
         for( int x=0; x<8; x++ )
         {
             dst[2*x]   = srcu[x];
@@ -324,14 +342,14 @@ static void store_interleave_8x8x2( pixel *dst, int i_dst, pixel *srcu, pixel *s
         }
 }
 
-static void load_deinterleave_8x8x2_fenc( pixel *dst, pixel *src, int i_src )
+static void load_deinterleave_chroma_fenc( pixel *dst, pixel *src, int i_src, int height )
 {
-    x264_plane_copy_deinterleave_c( dst, FENC_STRIDE, dst+FENC_STRIDE/2, FENC_STRIDE, src, i_src, 8, 8 );
+    x264_plane_copy_deinterleave_c( dst, FENC_STRIDE, dst+FENC_STRIDE/2, FENC_STRIDE, src, i_src, 8, height );
 }
 
-static void load_deinterleave_8x8x2_fdec( pixel *dst, pixel *src, int i_src )
+static void load_deinterleave_chroma_fdec( pixel *dst, pixel *src, int i_src, int height )
 {
-    x264_plane_copy_deinterleave_c( dst, FDEC_STRIDE, dst+FDEC_STRIDE/2, FDEC_STRIDE, src, i_src, 8, 8 );
+    x264_plane_copy_deinterleave_c( dst, FDEC_STRIDE, dst+FDEC_STRIDE/2, FDEC_STRIDE, src, i_src, 8, height );
 }
 
 static void prefetch_fenc_null( pixel *pix_y, int stride_y,
@@ -431,30 +449,19 @@ static void frame_init_lowres_core( pixel *src0, pixel *dst0, pixel *dsth, pixel
     }
 }
 
-#if defined(__GNUC__) && (ARCH_X86 || ARCH_X86_64)
-// gcc isn't smart enough to use the "idiv" instruction
-static ALWAYS_INLINE int32_t div_64_32(int64_t x, int32_t y)
-{
-    int32_t quotient, remainder;
-    asm("idiv %4"
-        :"=a"(quotient), "=d"(remainder)
-        :"a"((uint32_t)x), "d"((int32_t)(x>>32)), "r"(y)
-    );
-    return quotient;
-}
-#else
-#define div_64_32(x,y) ((x)/(y))
-#endif
-
 /* Estimate the total amount of influence on future quality that could be had if we
  * were to improve the reference samples used to inter predict any given macroblock. */
 static void mbtree_propagate_cost( int *dst, uint16_t *propagate_in, uint16_t *intra_costs,
-                                   uint16_t *inter_costs, uint16_t *inv_qscales, int len )
+                                   uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len )
 {
+    float fps = *fps_factor / 256.f;
     for( int i = 0; i < len; i++ )
     {
-        int propagate_amount = propagate_in[i] + ((intra_costs[i] * inv_qscales[i] + 128)>>8);
-        dst[i] = div_64_32((int64_t)propagate_amount * (intra_costs[i] - (inter_costs[i] & LOWRES_COST_MASK)), intra_costs[i]);
+        float intra_cost       = intra_costs[i] * inv_qscales[i];
+        float propagate_amount = propagate_in[i] + intra_cost*fps;
+        float propagate_num    = intra_costs[i] - (inter_costs[i] & LOWRES_COST_MASK);
+        float propagate_denom  = intra_costs[i];
+        dst[i] = (int)(propagate_amount * propagate_num / propagate_denom + 0.5f);
     }
 }
 
@@ -462,6 +469,7 @@ void x264_mc_init( int cpu, x264_mc_functions_t *pf )
 {
     pf->mc_luma   = mc_luma;
     pf->get_ref   = get_ref;
+
     pf->mc_chroma = mc_chroma;
 
     pf->avg[PIXEL_16x16]= pixel_avg_16x16;
@@ -469,9 +477,11 @@ void x264_mc_init( int cpu, x264_mc_functions_t *pf )
     pf->avg[PIXEL_8x16] = pixel_avg_8x16;
     pf->avg[PIXEL_8x8]  = pixel_avg_8x8;
     pf->avg[PIXEL_8x4]  = pixel_avg_8x4;
+    pf->avg[PIXEL_4x16] = pixel_avg_4x16;
     pf->avg[PIXEL_4x8]  = pixel_avg_4x8;
     pf->avg[PIXEL_4x4]  = pixel_avg_4x4;
     pf->avg[PIXEL_4x2]  = pixel_avg_4x2;
+    pf->avg[PIXEL_2x8]  = pixel_avg_2x8;
     pf->avg[PIXEL_2x4]  = pixel_avg_2x4;
     pf->avg[PIXEL_2x2]  = pixel_avg_2x2;
 
@@ -485,13 +495,14 @@ void x264_mc_init( int cpu, x264_mc_functions_t *pf )
     pf->copy[PIXEL_8x8]   = mc_copy_w8;
     pf->copy[PIXEL_4x4]   = mc_copy_w4;
 
-    pf->store_interleave_8x8x2  = store_interleave_8x8x2;
-    pf->load_deinterleave_8x8x2_fenc = load_deinterleave_8x8x2_fenc;
-    pf->load_deinterleave_8x8x2_fdec = load_deinterleave_8x8x2_fdec;
+    pf->store_interleave_chroma       = store_interleave_chroma;
+    pf->load_deinterleave_chroma_fenc = load_deinterleave_chroma_fenc;
+    pf->load_deinterleave_chroma_fdec = load_deinterleave_chroma_fdec;
 
     pf->plane_copy = x264_plane_copy_c;
     pf->plane_copy_interleave = x264_plane_copy_interleave_c;
     pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_c;
+    pf->plane_copy_deinterleave_rgb = x264_plane_copy_deinterleave_rgb_c;
 
     pf->hpel_filter = hpel_filter;
 
@@ -522,25 +533,46 @@ void x264_mc_init( int cpu, x264_mc_functions_t *pf )
 
 void x264_frame_filter( x264_t *h, x264_frame_t *frame, int mb_y, int b_end )
 {
-    const int b_interlaced = h->sh.b_mbaff;
-    const int stride = frame->i_stride[0] << b_interlaced;
-    const int width = frame->i_width[0];
-    int start = (mb_y*16 >> b_interlaced) - 8; // buffer = 4 for deblock + 3 for 6tap, rounded to 8
-    int height = ((b_end ? frame->i_lines[0] : mb_y*16) >> b_interlaced) + 8;
-    int offs = start*stride - 8; // buffer = 3 for 6tap, aligned to 8 for simd
+    const int b_interlaced = PARAM_INTERLACED;
+    int start = mb_y*16 - 8; // buffer = 4 for deblock + 3 for 6tap, rounded to 8
+    int height = (b_end ? frame->i_lines[0] + 16*PARAM_INTERLACED : (mb_y+b_interlaced)*16) + 8;
 
     if( mb_y & b_interlaced )
         return;
 
-    for( int y = 0; y <= b_interlaced; y++, offs += frame->i_stride[0] )
+    for( int p = 0; p < (CHROMA444 ? 3 : 1); p++ )
     {
-        h->mc.hpel_filter(
-            frame->filtered[1] + offs,
-            frame->filtered[2] + offs,
-            frame->filtered[3] + offs,
-            frame->plane[0] + offs,
-            stride, width + 16, height - start,
-            h->scratch_buffer );
+        int stride = frame->i_stride[p];
+        const int width = frame->i_width[p];
+        int offs = start*stride - 8; // buffer = 3 for 6tap, aligned to 8 for simd
+
+        if( !b_interlaced || h->mb.b_adaptive_mbaff )
+            h->mc.hpel_filter(
+                frame->filtered[p][1] + offs,
+                frame->filtered[p][2] + offs,
+                frame->filtered[p][3] + offs,
+                frame->plane[p] + offs,
+                stride, width + 16, height - start,
+                h->scratch_buffer );
+
+        if( b_interlaced )
+        {
+            /* MC must happen between pixels in the same field. */
+            stride = frame->i_stride[p] << 1;
+            start = (mb_y*16 >> 1) - 8;
+            int height_fld = ((b_end ? frame->i_lines[p] : mb_y*16) >> 1) + 8;
+            offs = start*stride - 8;
+            for( int i = 0; i < 2; i++, offs += frame->i_stride[p] )
+            {
+                h->mc.hpel_filter(
+                    frame->filtered_fld[p][1] + offs,
+                    frame->filtered_fld[p][2] + offs,
+                    frame->filtered_fld[p][3] + offs,
+                    frame->plane_fld[p] + offs,
+                    stride, width + 16, height_fld - start,
+                    h->scratch_buffer );
+            }
+        }
     }
 
     /* generate integral image:
@@ -550,6 +582,7 @@ void x264_frame_filter( x264_t *h, x264_frame_t *frame, int mb_y, int b_end )
 
     if( frame->integral )
     {
+        int stride = frame->i_stride[0];
         if( start < 0 )
         {
             memset( frame->integral - PADV * stride - PADH, 0, stride * sizeof(uint16_t) );