dsputil: Separate h264 qpel
authorMans Rullgard <mans@mansr.com>
Thu, 24 Jan 2013 04:24:53 +0000 (20:24 -0800)
committerLuca Barbato <lu_zero@gentoo.org>
Thu, 24 Jan 2013 09:44:43 +0000 (10:44 +0100)
The sh4 optimizations are removed, because the code is
100% identical to the C code, so it is unlikely to
provide any real practical benefit.

Signed-off-by: Diego Biurrun <diego@biurrun.de>
Signed-off-by: Ronald S. Bultje <rsbultje@gmail.com>
Signed-off-by: Luca Barbato <lu_zero@gentoo.org>
28 files changed:
libavcodec/Makefile
libavcodec/arm/Makefile
libavcodec/arm/dsputil_init_neon.c
libavcodec/arm/h264dsp_neon.S
libavcodec/arm/h264qpel_init_arm.c [new file with mode: 0644]
libavcodec/arm/h264qpel_neon.S [new file with mode: 0644]
libavcodec/dsputil.c
libavcodec/dsputil.h
libavcodec/dsputil_template.c
libavcodec/h264.c
libavcodec/h264.h
libavcodec/h264qpel.c [new file with mode: 0644]
libavcodec/h264qpel.h [new file with mode: 0644]
libavcodec/h264qpel_template.c [new file with mode: 0644]
libavcodec/hpel_template.c [new file with mode: 0644]
libavcodec/ppc/Makefile
libavcodec/ppc/h264_altivec.c
libavcodec/ppc/h264_altivec_template.c [deleted file]
libavcodec/ppc/h264_qpel.c [new file with mode: 0644]
libavcodec/ppc/h264_qpel_template.c [new file with mode: 0644]
libavcodec/ppc/vc1dsp_altivec.c
libavcodec/rv30dsp.c
libavcodec/rv40dsp.c
libavcodec/sh4/dsputil_align.c
libavcodec/sh4/qpel.c
libavcodec/x86/Makefile
libavcodec/x86/dsputil_mmx.c
libavcodec/x86/h264_qpel.c

index 3f8f280..774ea57 100644 (file)
@@ -46,6 +46,7 @@ OBJS-$(CONFIG_FFT)                     += avfft.o fft_fixed.o fft_float.o \
 OBJS-$(CONFIG_GOLOMB)                  += golomb.o
 OBJS-$(CONFIG_H264DSP)                 += h264dsp.o h264idct.o
 OBJS-$(CONFIG_H264PRED)                += h264pred.o
+OBJS-$(CONFIG_H264QPEL)                += h264qpel.o
 OBJS-$(CONFIG_HUFFMAN)                 += huffman.o
 OBJS-$(CONFIG_LIBXVID)                 += libxvid_rc.o
 OBJS-$(CONFIG_LPC)                     += lpc.o
index 5ebda62..b8d78d6 100644 (file)
@@ -26,6 +26,7 @@ ARMV6-OBJS-$(CONFIG_VP8_DECODER)       += arm/vp8_armv6.o               \
 
 OBJS-$(CONFIG_H264DSP)                 += arm/h264dsp_init_arm.o
 OBJS-$(CONFIG_H264PRED)                += arm/h264pred_init_arm.o
+OBJS-$(CONFIG_H264QPEL)                += arm/h264qpel_init_arm.o
 
 OBJS-$(CONFIG_RV30_DECODER)            += arm/rv34dsp_init_arm.o
 OBJS-$(CONFIG_RV40_DECODER)            += arm/rv34dsp_init_arm.o        \
@@ -70,6 +71,8 @@ NEON-OBJS-$(CONFIG_H264DSP)            += arm/h264dsp_neon.o            \
 
 NEON-OBJS-$(CONFIG_H264PRED)           += arm/h264pred_neon.o           \
 
+NEON-OBJS-$(CONFIG_H264QPEL)           += arm/h264qpel_neon.o           \
+
 NEON-OBJS-$(CONFIG_AC3DSP)             += arm/ac3dsp_neon.o
 
 NEON-OBJS-$(CONFIG_AAC_DECODER)        += arm/sbrdsp_neon.o             \
index 4cd8f7d..e6544eb 100644 (file)
@@ -63,74 +63,6 @@ void ff_add_pixels_clamped_neon(const int16_t *, uint8_t *, int);
 void ff_put_pixels_clamped_neon(const int16_t *, uint8_t *, int);
 void ff_put_signed_pixels_clamped_neon(const int16_t *, uint8_t *, int);
 
-void ff_put_h264_qpel16_mc00_neon(uint8_t *, uint8_t *, int);
-void ff_put_h264_qpel16_mc10_neon(uint8_t *, uint8_t *, int);
-void ff_put_h264_qpel16_mc20_neon(uint8_t *, uint8_t *, int);
-void ff_put_h264_qpel16_mc30_neon(uint8_t *, uint8_t *, int);
-void ff_put_h264_qpel16_mc01_neon(uint8_t *, uint8_t *, int);
-void ff_put_h264_qpel16_mc11_neon(uint8_t *, uint8_t *, int);
-void ff_put_h264_qpel16_mc21_neon(uint8_t *, uint8_t *, int);
-void ff_put_h264_qpel16_mc31_neon(uint8_t *, uint8_t *, int);
-void ff_put_h264_qpel16_mc02_neon(uint8_t *, uint8_t *, int);
-void ff_put_h264_qpel16_mc12_neon(uint8_t *, uint8_t *, int);
-void ff_put_h264_qpel16_mc22_neon(uint8_t *, uint8_t *, int);
-void ff_put_h264_qpel16_mc32_neon(uint8_t *, uint8_t *, int);
-void ff_put_h264_qpel16_mc03_neon(uint8_t *, uint8_t *, int);
-void ff_put_h264_qpel16_mc13_neon(uint8_t *, uint8_t *, int);
-void ff_put_h264_qpel16_mc23_neon(uint8_t *, uint8_t *, int);
-void ff_put_h264_qpel16_mc33_neon(uint8_t *, uint8_t *, int);
-
-void ff_put_h264_qpel8_mc00_neon(uint8_t *, uint8_t *, int);
-void ff_put_h264_qpel8_mc10_neon(uint8_t *, uint8_t *, int);
-void ff_put_h264_qpel8_mc20_neon(uint8_t *, uint8_t *, int);
-void ff_put_h264_qpel8_mc30_neon(uint8_t *, uint8_t *, int);
-void ff_put_h264_qpel8_mc01_neon(uint8_t *, uint8_t *, int);
-void ff_put_h264_qpel8_mc11_neon(uint8_t *, uint8_t *, int);
-void ff_put_h264_qpel8_mc21_neon(uint8_t *, uint8_t *, int);
-void ff_put_h264_qpel8_mc31_neon(uint8_t *, uint8_t *, int);
-void ff_put_h264_qpel8_mc02_neon(uint8_t *, uint8_t *, int);
-void ff_put_h264_qpel8_mc12_neon(uint8_t *, uint8_t *, int);
-void ff_put_h264_qpel8_mc22_neon(uint8_t *, uint8_t *, int);
-void ff_put_h264_qpel8_mc32_neon(uint8_t *, uint8_t *, int);
-void ff_put_h264_qpel8_mc03_neon(uint8_t *, uint8_t *, int);
-void ff_put_h264_qpel8_mc13_neon(uint8_t *, uint8_t *, int);
-void ff_put_h264_qpel8_mc23_neon(uint8_t *, uint8_t *, int);
-void ff_put_h264_qpel8_mc33_neon(uint8_t *, uint8_t *, int);
-
-void ff_avg_h264_qpel16_mc00_neon(uint8_t *, uint8_t *, int);
-void ff_avg_h264_qpel16_mc10_neon(uint8_t *, uint8_t *, int);
-void ff_avg_h264_qpel16_mc20_neon(uint8_t *, uint8_t *, int);
-void ff_avg_h264_qpel16_mc30_neon(uint8_t *, uint8_t *, int);
-void ff_avg_h264_qpel16_mc01_neon(uint8_t *, uint8_t *, int);
-void ff_avg_h264_qpel16_mc11_neon(uint8_t *, uint8_t *, int);
-void ff_avg_h264_qpel16_mc21_neon(uint8_t *, uint8_t *, int);
-void ff_avg_h264_qpel16_mc31_neon(uint8_t *, uint8_t *, int);
-void ff_avg_h264_qpel16_mc02_neon(uint8_t *, uint8_t *, int);
-void ff_avg_h264_qpel16_mc12_neon(uint8_t *, uint8_t *, int);
-void ff_avg_h264_qpel16_mc22_neon(uint8_t *, uint8_t *, int);
-void ff_avg_h264_qpel16_mc32_neon(uint8_t *, uint8_t *, int);
-void ff_avg_h264_qpel16_mc03_neon(uint8_t *, uint8_t *, int);
-void ff_avg_h264_qpel16_mc13_neon(uint8_t *, uint8_t *, int);
-void ff_avg_h264_qpel16_mc23_neon(uint8_t *, uint8_t *, int);
-void ff_avg_h264_qpel16_mc33_neon(uint8_t *, uint8_t *, int);
-
-void ff_avg_h264_qpel8_mc00_neon(uint8_t *, uint8_t *, int);
-void ff_avg_h264_qpel8_mc10_neon(uint8_t *, uint8_t *, int);
-void ff_avg_h264_qpel8_mc20_neon(uint8_t *, uint8_t *, int);
-void ff_avg_h264_qpel8_mc30_neon(uint8_t *, uint8_t *, int);
-void ff_avg_h264_qpel8_mc01_neon(uint8_t *, uint8_t *, int);
-void ff_avg_h264_qpel8_mc11_neon(uint8_t *, uint8_t *, int);
-void ff_avg_h264_qpel8_mc21_neon(uint8_t *, uint8_t *, int);
-void ff_avg_h264_qpel8_mc31_neon(uint8_t *, uint8_t *, int);
-void ff_avg_h264_qpel8_mc02_neon(uint8_t *, uint8_t *, int);
-void ff_avg_h264_qpel8_mc12_neon(uint8_t *, uint8_t *, int);
-void ff_avg_h264_qpel8_mc22_neon(uint8_t *, uint8_t *, int);
-void ff_avg_h264_qpel8_mc32_neon(uint8_t *, uint8_t *, int);
-void ff_avg_h264_qpel8_mc03_neon(uint8_t *, uint8_t *, int);
-void ff_avg_h264_qpel8_mc13_neon(uint8_t *, uint8_t *, int);
-void ff_avg_h264_qpel8_mc23_neon(uint8_t *, uint8_t *, int);
-void ff_avg_h264_qpel8_mc33_neon(uint8_t *, uint8_t *, int);
-
 void ff_put_h264_chroma_mc8_neon(uint8_t *, uint8_t *, int, int, int, int);
 void ff_put_h264_chroma_mc4_neon(uint8_t *, uint8_t *, int, int, int, int);
 void ff_put_h264_chroma_mc2_neon(uint8_t *, uint8_t *, int, int, int, int);
@@ -214,74 +146,6 @@ void ff_dsputil_init_neon(DSPContext *c, AVCodecContext *avctx)
         c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_neon;
         c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_neon;
         c->avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_neon;
-
-        c->put_h264_qpel_pixels_tab[0][ 0] = ff_put_h264_qpel16_mc00_neon;
-        c->put_h264_qpel_pixels_tab[0][ 1] = ff_put_h264_qpel16_mc10_neon;
-        c->put_h264_qpel_pixels_tab[0][ 2] = ff_put_h264_qpel16_mc20_neon;
-        c->put_h264_qpel_pixels_tab[0][ 3] = ff_put_h264_qpel16_mc30_neon;
-        c->put_h264_qpel_pixels_tab[0][ 4] = ff_put_h264_qpel16_mc01_neon;
-        c->put_h264_qpel_pixels_tab[0][ 5] = ff_put_h264_qpel16_mc11_neon;
-        c->put_h264_qpel_pixels_tab[0][ 6] = ff_put_h264_qpel16_mc21_neon;
-        c->put_h264_qpel_pixels_tab[0][ 7] = ff_put_h264_qpel16_mc31_neon;
-        c->put_h264_qpel_pixels_tab[0][ 8] = ff_put_h264_qpel16_mc02_neon;
-        c->put_h264_qpel_pixels_tab[0][ 9] = ff_put_h264_qpel16_mc12_neon;
-        c->put_h264_qpel_pixels_tab[0][10] = ff_put_h264_qpel16_mc22_neon;
-        c->put_h264_qpel_pixels_tab[0][11] = ff_put_h264_qpel16_mc32_neon;
-        c->put_h264_qpel_pixels_tab[0][12] = ff_put_h264_qpel16_mc03_neon;
-        c->put_h264_qpel_pixels_tab[0][13] = ff_put_h264_qpel16_mc13_neon;
-        c->put_h264_qpel_pixels_tab[0][14] = ff_put_h264_qpel16_mc23_neon;
-        c->put_h264_qpel_pixels_tab[0][15] = ff_put_h264_qpel16_mc33_neon;
-
-        c->put_h264_qpel_pixels_tab[1][ 0] = ff_put_h264_qpel8_mc00_neon;
-        c->put_h264_qpel_pixels_tab[1][ 1] = ff_put_h264_qpel8_mc10_neon;
-        c->put_h264_qpel_pixels_tab[1][ 2] = ff_put_h264_qpel8_mc20_neon;
-        c->put_h264_qpel_pixels_tab[1][ 3] = ff_put_h264_qpel8_mc30_neon;
-        c->put_h264_qpel_pixels_tab[1][ 4] = ff_put_h264_qpel8_mc01_neon;
-        c->put_h264_qpel_pixels_tab[1][ 5] = ff_put_h264_qpel8_mc11_neon;
-        c->put_h264_qpel_pixels_tab[1][ 6] = ff_put_h264_qpel8_mc21_neon;
-        c->put_h264_qpel_pixels_tab[1][ 7] = ff_put_h264_qpel8_mc31_neon;
-        c->put_h264_qpel_pixels_tab[1][ 8] = ff_put_h264_qpel8_mc02_neon;
-        c->put_h264_qpel_pixels_tab[1][ 9] = ff_put_h264_qpel8_mc12_neon;
-        c->put_h264_qpel_pixels_tab[1][10] = ff_put_h264_qpel8_mc22_neon;
-        c->put_h264_qpel_pixels_tab[1][11] = ff_put_h264_qpel8_mc32_neon;
-        c->put_h264_qpel_pixels_tab[1][12] = ff_put_h264_qpel8_mc03_neon;
-        c->put_h264_qpel_pixels_tab[1][13] = ff_put_h264_qpel8_mc13_neon;
-        c->put_h264_qpel_pixels_tab[1][14] = ff_put_h264_qpel8_mc23_neon;
-        c->put_h264_qpel_pixels_tab[1][15] = ff_put_h264_qpel8_mc33_neon;
-
-        c->avg_h264_qpel_pixels_tab[0][ 0] = ff_avg_h264_qpel16_mc00_neon;
-        c->avg_h264_qpel_pixels_tab[0][ 1] = ff_avg_h264_qpel16_mc10_neon;
-        c->avg_h264_qpel_pixels_tab[0][ 2] = ff_avg_h264_qpel16_mc20_neon;
-        c->avg_h264_qpel_pixels_tab[0][ 3] = ff_avg_h264_qpel16_mc30_neon;
-        c->avg_h264_qpel_pixels_tab[0][ 4] = ff_avg_h264_qpel16_mc01_neon;
-        c->avg_h264_qpel_pixels_tab[0][ 5] = ff_avg_h264_qpel16_mc11_neon;
-        c->avg_h264_qpel_pixels_tab[0][ 6] = ff_avg_h264_qpel16_mc21_neon;
-        c->avg_h264_qpel_pixels_tab[0][ 7] = ff_avg_h264_qpel16_mc31_neon;
-        c->avg_h264_qpel_pixels_tab[0][ 8] = ff_avg_h264_qpel16_mc02_neon;
-        c->avg_h264_qpel_pixels_tab[0][ 9] = ff_avg_h264_qpel16_mc12_neon;
-        c->avg_h264_qpel_pixels_tab[0][10] = ff_avg_h264_qpel16_mc22_neon;
-        c->avg_h264_qpel_pixels_tab[0][11] = ff_avg_h264_qpel16_mc32_neon;
-        c->avg_h264_qpel_pixels_tab[0][12] = ff_avg_h264_qpel16_mc03_neon;
-        c->avg_h264_qpel_pixels_tab[0][13] = ff_avg_h264_qpel16_mc13_neon;
-        c->avg_h264_qpel_pixels_tab[0][14] = ff_avg_h264_qpel16_mc23_neon;
-        c->avg_h264_qpel_pixels_tab[0][15] = ff_avg_h264_qpel16_mc33_neon;
-
-        c->avg_h264_qpel_pixels_tab[1][ 0] = ff_avg_h264_qpel8_mc00_neon;
-        c->avg_h264_qpel_pixels_tab[1][ 1] = ff_avg_h264_qpel8_mc10_neon;
-        c->avg_h264_qpel_pixels_tab[1][ 2] = ff_avg_h264_qpel8_mc20_neon;
-        c->avg_h264_qpel_pixels_tab[1][ 3] = ff_avg_h264_qpel8_mc30_neon;
-        c->avg_h264_qpel_pixels_tab[1][ 4] = ff_avg_h264_qpel8_mc01_neon;
-        c->avg_h264_qpel_pixels_tab[1][ 5] = ff_avg_h264_qpel8_mc11_neon;
-        c->avg_h264_qpel_pixels_tab[1][ 6] = ff_avg_h264_qpel8_mc21_neon;
-        c->avg_h264_qpel_pixels_tab[1][ 7] = ff_avg_h264_qpel8_mc31_neon;
-        c->avg_h264_qpel_pixels_tab[1][ 8] = ff_avg_h264_qpel8_mc02_neon;
-        c->avg_h264_qpel_pixels_tab[1][ 9] = ff_avg_h264_qpel8_mc12_neon;
-        c->avg_h264_qpel_pixels_tab[1][10] = ff_avg_h264_qpel8_mc22_neon;
-        c->avg_h264_qpel_pixels_tab[1][11] = ff_avg_h264_qpel8_mc32_neon;
-        c->avg_h264_qpel_pixels_tab[1][12] = ff_avg_h264_qpel8_mc03_neon;
-        c->avg_h264_qpel_pixels_tab[1][13] = ff_avg_h264_qpel8_mc13_neon;
-        c->avg_h264_qpel_pixels_tab[1][14] = ff_avg_h264_qpel8_mc23_neon;
-        c->avg_h264_qpel_pixels_tab[1][15] = ff_avg_h264_qpel8_mc33_neon;
     }
 
     c->vector_clipf               = ff_vector_clipf_neon;
index 9daabe0..5e75565 100644 (file)
@@ -271,939 +271,6 @@ function ff_h264_h_loop_filter_chroma_neon, export=1
         bx              lr
 endfunc
 
-        /* H.264 qpel MC */
-
-.macro  lowpass_const   r
-        movw            \r,  #5
-        movt            \r,  #20
-        vmov.32         d6[0], \r
-.endm
-
-.macro  lowpass_8       r0,  r1,  r2,  r3,  d0,  d1,  narrow=1
-  .if \narrow
-        t0 .req q0
-        t1 .req q8
-  .else
-        t0 .req \d0
-        t1 .req \d1
-  .endif
-        vext.8          d2,  \r0, \r1, #2
-        vext.8          d3,  \r0, \r1, #3
-        vaddl.u8        q1,  d2,  d3
-        vext.8          d4,  \r0, \r1, #1
-        vext.8          d5,  \r0, \r1, #4
-        vaddl.u8        q2,  d4,  d5
-        vext.8          d30, \r0, \r1, #5
-        vaddl.u8        t0,  \r0, d30
-        vext.8          d18, \r2, \r3, #2
-        vmla.i16        t0,  q1,  d6[1]
-        vext.8          d19, \r2, \r3, #3
-        vaddl.u8        q9,  d18, d19
-        vext.8          d20, \r2, \r3, #1
-        vmls.i16        t0,  q2,  d6[0]
-        vext.8          d21, \r2, \r3, #4
-        vaddl.u8        q10, d20, d21
-        vext.8          d31, \r2, \r3, #5
-        vaddl.u8        t1,  \r2, d31
-        vmla.i16        t1,  q9,  d6[1]
-        vmls.i16        t1,  q10, d6[0]
-  .if \narrow
-        vqrshrun.s16    \d0, t0,  #5
-        vqrshrun.s16    \d1, t1,  #5
-  .endif
-        .unreq  t0
-        .unreq  t1
-.endm
-
-.macro  lowpass_8_1     r0,  r1,  d0,  narrow=1
-  .if \narrow
-        t0 .req q0
-  .else
-        t0 .req \d0
-  .endif
-        vext.8          d2,  \r0, \r1, #2
-        vext.8          d3,  \r0, \r1, #3
-        vaddl.u8        q1,  d2,  d3
-        vext.8          d4,  \r0, \r1, #1
-        vext.8          d5,  \r0, \r1, #4
-        vaddl.u8        q2,  d4,  d5
-        vext.8          d30, \r0, \r1, #5
-        vaddl.u8        t0,  \r0, d30
-        vmla.i16        t0,  q1,  d6[1]
-        vmls.i16        t0,  q2,  d6[0]
-  .if \narrow
-        vqrshrun.s16    \d0, t0,  #5
-  .endif
-        .unreq  t0
-.endm
-
-.macro  lowpass_8.16    r0,  r1,  l0,  h0,  l1,  h1,  d
-        vext.16         q1,  \r0, \r1, #2
-        vext.16         q0,  \r0, \r1, #3
-        vaddl.s16       q9,  d2,  d0
-        vext.16         q2,  \r0, \r1, #1
-        vaddl.s16       q1,  d3,  d1
-        vext.16         q3,  \r0, \r1, #4
-        vaddl.s16       q10, d4,  d6
-        vext.16         \r1, \r0, \r1, #5
-        vaddl.s16       q2,  d5,  d7
-        vaddl.s16       q0,  \h0, \h1
-        vaddl.s16       q8,  \l0, \l1
-
-        vshl.i32        q3,  q9,  #4
-        vshl.i32        q9,  q9,  #2
-        vshl.i32        q15, q10, #2
-        vadd.i32        q9,  q9,  q3
-        vadd.i32        q10, q10, q15
-
-        vshl.i32        q3,  q1,  #4
-        vshl.i32        q1,  q1,  #2
-        vshl.i32        q15, q2,  #2
-        vadd.i32        q1,  q1,  q3
-        vadd.i32        q2,  q2,  q15
-
-        vadd.i32        q9,  q9,  q8
-        vsub.i32        q9,  q9,  q10
-
-        vadd.i32        q1,  q1,  q0
-        vsub.i32        q1,  q1,  q2
-
-        vrshrn.s32      d18, q9,  #10
-        vrshrn.s32      d19, q1,  #10
-
-        vqmovun.s16     \d,  q9
-.endm
-
-function put_h264_qpel16_h_lowpass_neon_packed
-        mov             r4,  lr
-        mov             r12, #16
-        mov             r3,  #8
-        bl              put_h264_qpel8_h_lowpass_neon
-        sub             r1,  r1,  r2, lsl #4
-        add             r1,  r1,  #8
-        mov             r12, #16
-        mov             lr,  r4
-        b               put_h264_qpel8_h_lowpass_neon
-endfunc
-
-.macro  h264_qpel_h_lowpass type
-function \type\()_h264_qpel16_h_lowpass_neon
-        push            {lr}
-        mov             r12, #16
-        bl              \type\()_h264_qpel8_h_lowpass_neon
-        sub             r0,  r0,  r3, lsl #4
-        sub             r1,  r1,  r2, lsl #4
-        add             r0,  r0,  #8
-        add             r1,  r1,  #8
-        mov             r12, #16
-        pop             {lr}
-endfunc
-
-function \type\()_h264_qpel8_h_lowpass_neon
-1:      vld1.8          {d0, d1},  [r1], r2
-        vld1.8          {d16,d17}, [r1], r2
-        subs            r12, r12, #2
-        lowpass_8       d0,  d1,  d16, d17, d0,  d16
-  .ifc \type,avg
-        vld1.8          {d2},     [r0,:64], r3
-        vrhadd.u8       d0,  d0,  d2
-        vld1.8          {d3},     [r0,:64]
-        vrhadd.u8       d16, d16, d3
-        sub             r0,  r0,  r3
-  .endif
-        vst1.8          {d0},     [r0,:64], r3
-        vst1.8          {d16},    [r0,:64], r3
-        bne             1b
-        bx              lr
-endfunc
-.endm
-
-        h264_qpel_h_lowpass put
-        h264_qpel_h_lowpass avg
-
-.macro  h264_qpel_h_lowpass_l2 type
-function \type\()_h264_qpel16_h_lowpass_l2_neon
-        push            {lr}
-        mov             r12, #16
-        bl              \type\()_h264_qpel8_h_lowpass_l2_neon
-        sub             r0,  r0,  r2, lsl #4
-        sub             r1,  r1,  r2, lsl #4
-        sub             r3,  r3,  r2, lsl #4
-        add             r0,  r0,  #8
-        add             r1,  r1,  #8
-        add             r3,  r3,  #8
-        mov             r12, #16
-        pop             {lr}
-endfunc
-
-function \type\()_h264_qpel8_h_lowpass_l2_neon
-1:      vld1.8          {d0, d1},  [r1], r2
-        vld1.8          {d16,d17}, [r1], r2
-        vld1.8          {d28},     [r3], r2
-        vld1.8          {d29},     [r3], r2
-        subs            r12, r12, #2
-        lowpass_8       d0,  d1,  d16, d17, d0,  d1
-        vrhadd.u8       q0,  q0,  q14
-  .ifc \type,avg
-        vld1.8          {d2},      [r0,:64], r2
-        vrhadd.u8       d0,  d0,  d2
-        vld1.8          {d3},      [r0,:64]
-        vrhadd.u8       d1,  d1,  d3
-        sub             r0,  r0,  r2
-  .endif
-        vst1.8          {d0},      [r0,:64], r2
-        vst1.8          {d1},      [r0,:64], r2
-        bne             1b
-        bx              lr
-endfunc
-.endm
-
-        h264_qpel_h_lowpass_l2 put
-        h264_qpel_h_lowpass_l2 avg
-
-function put_h264_qpel16_v_lowpass_neon_packed
-        mov             r4,  lr
-        mov             r2,  #8
-        bl              put_h264_qpel8_v_lowpass_neon
-        sub             r1,  r1,  r3, lsl #2
-        bl              put_h264_qpel8_v_lowpass_neon
-        sub             r1,  r1,  r3, lsl #4
-        sub             r1,  r1,  r3, lsl #2
-        add             r1,  r1,  #8
-        bl              put_h264_qpel8_v_lowpass_neon
-        sub             r1,  r1,  r3, lsl #2
-        mov             lr,  r4
-        b               put_h264_qpel8_v_lowpass_neon
-endfunc
-
-.macro  h264_qpel_v_lowpass type
-function \type\()_h264_qpel16_v_lowpass_neon
-        mov             r4,  lr
-        bl              \type\()_h264_qpel8_v_lowpass_neon
-        sub             r1,  r1,  r3, lsl #2
-        bl              \type\()_h264_qpel8_v_lowpass_neon
-        sub             r0,  r0,  r2, lsl #4
-        add             r0,  r0,  #8
-        sub             r1,  r1,  r3, lsl #4
-        sub             r1,  r1,  r3, lsl #2
-        add             r1,  r1,  #8
-        bl              \type\()_h264_qpel8_v_lowpass_neon
-        sub             r1,  r1,  r3, lsl #2
-        mov             lr,  r4
-endfunc
-
-function \type\()_h264_qpel8_v_lowpass_neon
-        vld1.8          {d8},  [r1], r3
-        vld1.8          {d10}, [r1], r3
-        vld1.8          {d12}, [r1], r3
-        vld1.8          {d14}, [r1], r3
-        vld1.8          {d22}, [r1], r3
-        vld1.8          {d24}, [r1], r3
-        vld1.8          {d26}, [r1], r3
-        vld1.8          {d28}, [r1], r3
-        vld1.8          {d9},  [r1], r3
-        vld1.8          {d11}, [r1], r3
-        vld1.8          {d13}, [r1], r3
-        vld1.8          {d15}, [r1], r3
-        vld1.8          {d23}, [r1]
-
-        transpose_8x8   q4,  q5,  q6,  q7,  q11, q12, q13, q14
-        lowpass_8       d8,  d9,  d10, d11, d8,  d10
-        lowpass_8       d12, d13, d14, d15, d12, d14
-        lowpass_8       d22, d23, d24, d25, d22, d24
-        lowpass_8       d26, d27, d28, d29, d26, d28
-        transpose_8x8   d8,  d10, d12, d14, d22, d24, d26, d28
-
-  .ifc \type,avg
-        vld1.8          {d9},  [r0,:64], r2
-        vrhadd.u8       d8,  d8,  d9
-        vld1.8          {d11}, [r0,:64], r2
-        vrhadd.u8       d10, d10, d11
-        vld1.8          {d13}, [r0,:64], r2
-        vrhadd.u8       d12, d12, d13
-        vld1.8          {d15}, [r0,:64], r2
-        vrhadd.u8       d14, d14, d15
-        vld1.8          {d23}, [r0,:64], r2
-        vrhadd.u8       d22, d22, d23
-        vld1.8          {d25}, [r0,:64], r2
-        vrhadd.u8       d24, d24, d25
-        vld1.8          {d27}, [r0,:64], r2
-        vrhadd.u8       d26, d26, d27
-        vld1.8          {d29}, [r0,:64], r2
-        vrhadd.u8       d28, d28, d29
-        sub             r0,  r0,  r2,  lsl #3
-  .endif
-
-        vst1.8          {d8},  [r0,:64], r2
-        vst1.8          {d10}, [r0,:64], r2
-        vst1.8          {d12}, [r0,:64], r2
-        vst1.8          {d14}, [r0,:64], r2
-        vst1.8          {d22}, [r0,:64], r2
-        vst1.8          {d24}, [r0,:64], r2
-        vst1.8          {d26}, [r0,:64], r2
-        vst1.8          {d28}, [r0,:64], r2
-
-        bx              lr
-endfunc
-.endm
-
-        h264_qpel_v_lowpass put
-        h264_qpel_v_lowpass avg
-
-.macro  h264_qpel_v_lowpass_l2 type
-function \type\()_h264_qpel16_v_lowpass_l2_neon
-        mov             r4,  lr
-        bl              \type\()_h264_qpel8_v_lowpass_l2_neon
-        sub             r1,  r1,  r3, lsl #2
-        bl              \type\()_h264_qpel8_v_lowpass_l2_neon
-        sub             r0,  r0,  r3, lsl #4
-        sub             r12, r12, r2, lsl #4
-        add             r0,  r0,  #8
-        add             r12, r12, #8
-        sub             r1,  r1,  r3, lsl #4
-        sub             r1,  r1,  r3, lsl #2
-        add             r1,  r1,  #8
-        bl              \type\()_h264_qpel8_v_lowpass_l2_neon
-        sub             r1,  r1,  r3, lsl #2
-        mov             lr,  r4
-endfunc
-
-function \type\()_h264_qpel8_v_lowpass_l2_neon
-        vld1.8          {d8},  [r1], r3
-        vld1.8          {d10}, [r1], r3
-        vld1.8          {d12}, [r1], r3
-        vld1.8          {d14}, [r1], r3
-        vld1.8          {d22}, [r1], r3
-        vld1.8          {d24}, [r1], r3
-        vld1.8          {d26}, [r1], r3
-        vld1.8          {d28}, [r1], r3
-        vld1.8          {d9},  [r1], r3
-        vld1.8          {d11}, [r1], r3
-        vld1.8          {d13}, [r1], r3
-        vld1.8          {d15}, [r1], r3
-        vld1.8          {d23}, [r1]
-
-        transpose_8x8   q4,  q5,  q6,  q7,  q11, q12, q13, q14
-        lowpass_8       d8,  d9,  d10, d11, d8,  d9
-        lowpass_8       d12, d13, d14, d15, d12, d13
-        lowpass_8       d22, d23, d24, d25, d22, d23
-        lowpass_8       d26, d27, d28, d29, d26, d27
-        transpose_8x8   d8,  d9,  d12, d13, d22, d23, d26, d27
-
-        vld1.8          {d0},  [r12], r2
-        vld1.8          {d1},  [r12], r2
-        vld1.8          {d2},  [r12], r2
-        vld1.8          {d3},  [r12], r2
-        vld1.8          {d4},  [r12], r2
-        vrhadd.u8       q0,  q0,  q4
-        vld1.8          {d5},  [r12], r2
-        vrhadd.u8       q1,  q1,  q6
-        vld1.8          {d10}, [r12], r2
-        vrhadd.u8       q2,  q2,  q11
-        vld1.8          {d11}, [r12], r2
-        vrhadd.u8       q5,  q5,  q13
-
-  .ifc \type,avg
-        vld1.8          {d16}, [r0,:64], r3
-        vrhadd.u8       d0,  d0,  d16
-        vld1.8          {d17}, [r0,:64], r3
-        vrhadd.u8       d1,  d1,  d17
-        vld1.8          {d16}, [r0,:64], r3
-        vrhadd.u8       d2,  d2,  d16
-        vld1.8          {d17}, [r0,:64], r3
-        vrhadd.u8       d3,  d3,  d17
-        vld1.8          {d16}, [r0,:64], r3
-        vrhadd.u8       d4,  d4,  d16
-        vld1.8          {d17}, [r0,:64], r3
-        vrhadd.u8       d5,  d5,  d17
-        vld1.8          {d16}, [r0,:64], r3
-        vrhadd.u8       d10, d10, d16
-        vld1.8          {d17}, [r0,:64], r3
-        vrhadd.u8       d11, d11, d17
-        sub             r0,  r0,  r3,  lsl #3
-  .endif
-
-        vst1.8          {d0},  [r0,:64], r3
-        vst1.8          {d1},  [r0,:64], r3
-        vst1.8          {d2},  [r0,:64], r3
-        vst1.8          {d3},  [r0,:64], r3
-        vst1.8          {d4},  [r0,:64], r3
-        vst1.8          {d5},  [r0,:64], r3
-        vst1.8          {d10}, [r0,:64], r3
-        vst1.8          {d11}, [r0,:64], r3
-
-        bx              lr
-endfunc
-.endm
-
-        h264_qpel_v_lowpass_l2 put
-        h264_qpel_v_lowpass_l2 avg
-
-function put_h264_qpel8_hv_lowpass_neon_top
-        lowpass_const   r12
-        mov             r12, #12
-1:      vld1.8          {d0, d1},  [r1], r3
-        vld1.8          {d16,d17}, [r1], r3
-        subs            r12, r12, #2
-        lowpass_8       d0,  d1,  d16, d17, q11, q12, narrow=0
-        vst1.8          {d22-d25}, [r4,:128]!
-        bne             1b
-
-        vld1.8          {d0, d1},  [r1]
-        lowpass_8_1     d0,  d1,  q12, narrow=0
-
-        mov             r12, #-16
-        add             r4,  r4,  r12
-        vld1.8          {d30,d31}, [r4,:128], r12
-        vld1.8          {d20,d21}, [r4,:128], r12
-        vld1.8          {d18,d19}, [r4,:128], r12
-        vld1.8          {d16,d17}, [r4,:128], r12
-        vld1.8          {d14,d15}, [r4,:128], r12
-        vld1.8          {d12,d13}, [r4,:128], r12
-        vld1.8          {d10,d11}, [r4,:128], r12
-        vld1.8          {d8, d9},  [r4,:128], r12
-        vld1.8          {d6, d7},  [r4,:128], r12
-        vld1.8          {d4, d5},  [r4,:128], r12
-        vld1.8          {d2, d3},  [r4,:128], r12
-        vld1.8          {d0, d1},  [r4,:128]
-
-        swap4           d1,  d3,  d5,  d7,  d8,  d10, d12, d14
-        transpose16_4x4 q0,  q1,  q2,  q3,  q4,  q5,  q6,  q7
-
-        swap4           d17, d19, d21, d31, d24, d26, d28, d22
-        transpose16_4x4 q8,  q9,  q10, q15, q12, q13, q14, q11
-
-        vst1.8          {d30,d31}, [r4,:128]!
-        vst1.8          {d6, d7},  [r4,:128]!
-        vst1.8          {d20,d21}, [r4,:128]!
-        vst1.8          {d4, d5},  [r4,:128]!
-        vst1.8          {d18,d19}, [r4,:128]!
-        vst1.8          {d2, d3},  [r4,:128]!
-        vst1.8          {d16,d17}, [r4,:128]!
-        vst1.8          {d0, d1},  [r4,:128]
-
-        lowpass_8.16    q4,  q12, d8,  d9,  d24, d25, d8
-        lowpass_8.16    q5,  q13, d10, d11, d26, d27, d9
-        lowpass_8.16    q6,  q14, d12, d13, d28, d29, d10
-        lowpass_8.16    q7,  q11, d14, d15, d22, d23, d11
-
-        vld1.8          {d16,d17}, [r4,:128], r12
-        vld1.8          {d30,d31}, [r4,:128], r12
-        lowpass_8.16    q8,  q15, d16, d17, d30, d31, d12
-        vld1.8          {d16,d17}, [r4,:128], r12
-        vld1.8          {d30,d31}, [r4,:128], r12
-        lowpass_8.16    q8,  q15, d16, d17, d30, d31, d13
-        vld1.8          {d16,d17}, [r4,:128], r12
-        vld1.8          {d30,d31}, [r4,:128], r12
-        lowpass_8.16    q8,  q15, d16, d17, d30, d31, d14
-        vld1.8          {d16,d17}, [r4,:128], r12
-        vld1.8          {d30,d31}, [r4,:128]
-        lowpass_8.16    q8,  q15, d16, d17, d30, d31, d15
-
-        transpose_8x8   d12, d13, d14, d15, d8,  d9,  d10, d11
-
-        bx              lr
-endfunc
-
-.macro  h264_qpel8_hv_lowpass type
-function \type\()_h264_qpel8_hv_lowpass_neon
-        mov             r10, lr
-        bl              put_h264_qpel8_hv_lowpass_neon_top
-  .ifc \type,avg
-        vld1.8          {d0},      [r0,:64], r2
-        vrhadd.u8       d12, d12, d0
-        vld1.8          {d1},      [r0,:64], r2
-        vrhadd.u8       d13, d13, d1
-        vld1.8          {d2},      [r0,:64], r2
-        vrhadd.u8       d14, d14, d2
-        vld1.8          {d3},      [r0,:64], r2
-        vrhadd.u8       d15, d15, d3
-        vld1.8          {d4},      [r0,:64], r2
-        vrhadd.u8       d8,  d8,  d4
-        vld1.8          {d5},      [r0,:64], r2
-        vrhadd.u8       d9,  d9,  d5
-        vld1.8          {d6},      [r0,:64], r2
-        vrhadd.u8       d10, d10, d6
-        vld1.8          {d7},      [r0,:64], r2
-        vrhadd.u8       d11, d11, d7
-        sub             r0,  r0,  r2,  lsl #3
-  .endif
-
-        vst1.8          {d12},     [r0,:64], r2
-        vst1.8          {d13},     [r0,:64], r2
-        vst1.8          {d14},     [r0,:64], r2
-        vst1.8          {d15},     [r0,:64], r2
-        vst1.8          {d8},      [r0,:64], r2
-        vst1.8          {d9},      [r0,:64], r2
-        vst1.8          {d10},     [r0,:64], r2
-        vst1.8          {d11},     [r0,:64], r2
-
-        mov             lr,  r10
-        bx              lr
-endfunc
-.endm
-
-        h264_qpel8_hv_lowpass put
-        h264_qpel8_hv_lowpass avg
-
-.macro  h264_qpel8_hv_lowpass_l2 type
-function \type\()_h264_qpel8_hv_lowpass_l2_neon
-        mov             r10, lr
-        bl              put_h264_qpel8_hv_lowpass_neon_top
-
-        vld1.8          {d0, d1},  [r2,:128]!
-        vld1.8          {d2, d3},  [r2,:128]!
-        vrhadd.u8       q0,  q0,  q6
-        vld1.8          {d4, d5},  [r2,:128]!
-        vrhadd.u8       q1,  q1,  q7
-        vld1.8          {d6, d7},  [r2,:128]!
-        vrhadd.u8       q2,  q2,  q4
-        vrhadd.u8       q3,  q3,  q5
-  .ifc \type,avg
-        vld1.8          {d16},     [r0,:64], r3
-        vrhadd.u8       d0,  d0,  d16
-        vld1.8          {d17},     [r0,:64], r3
-        vrhadd.u8       d1,  d1,  d17
-        vld1.8          {d18},     [r0,:64], r3
-        vrhadd.u8       d2,  d2,  d18
-        vld1.8          {d19},     [r0,:64], r3
-        vrhadd.u8       d3,  d3,  d19
-        vld1.8          {d20},     [r0,:64], r3
-        vrhadd.u8       d4,  d4,  d20
-        vld1.8          {d21},     [r0,:64], r3
-        vrhadd.u8       d5,  d5,  d21
-        vld1.8          {d22},     [r0,:64], r3
-        vrhadd.u8       d6,  d6,  d22
-        vld1.8          {d23},     [r0,:64], r3
-        vrhadd.u8       d7,  d7,  d23
-        sub             r0,  r0,  r3,  lsl #3
-  .endif
-        vst1.8          {d0},      [r0,:64], r3
-        vst1.8          {d1},      [r0,:64], r3
-        vst1.8          {d2},      [r0,:64], r3
-        vst1.8          {d3},      [r0,:64], r3
-        vst1.8          {d4},      [r0,:64], r3
-        vst1.8          {d5},      [r0,:64], r3
-        vst1.8          {d6},      [r0,:64], r3
-        vst1.8          {d7},      [r0,:64], r3
-
-        mov             lr,  r10
-        bx              lr
-endfunc
-.endm
-
-        h264_qpel8_hv_lowpass_l2 put
-        h264_qpel8_hv_lowpass_l2 avg
-
-.macro  h264_qpel16_hv  type
-function \type\()_h264_qpel16_hv_lowpass_neon
-        mov             r9,  lr
-        bl              \type\()_h264_qpel8_hv_lowpass_neon
-        sub             r1,  r1,  r3, lsl #2
-        bl              \type\()_h264_qpel8_hv_lowpass_neon
-        sub             r1,  r1,  r3, lsl #4
-        sub             r1,  r1,  r3, lsl #2
-        add             r1,  r1,  #8
-        sub             r0,  r0,  r2, lsl #4
-        add             r0,  r0,  #8
-        bl              \type\()_h264_qpel8_hv_lowpass_neon
-        sub             r1,  r1,  r3, lsl #2
-        mov             lr,  r9
-        b               \type\()_h264_qpel8_hv_lowpass_neon
-endfunc
-
-function \type\()_h264_qpel16_hv_lowpass_l2_neon
-        mov             r9,  lr
-        sub             r2,  r4,  #256
-        bl              \type\()_h264_qpel8_hv_lowpass_l2_neon
-        sub             r1,  r1,  r3, lsl #2
-        bl              \type\()_h264_qpel8_hv_lowpass_l2_neon
-        sub             r1,  r1,  r3, lsl #4
-        sub             r1,  r1,  r3, lsl #2
-        add             r1,  r1,  #8
-        sub             r0,  r0,  r3, lsl #4
-        add             r0,  r0,  #8
-        bl              \type\()_h264_qpel8_hv_lowpass_l2_neon
-        sub             r1,  r1,  r3, lsl #2
-        mov             lr,  r9
-        b               \type\()_h264_qpel8_hv_lowpass_l2_neon
-endfunc
-.endm
-
-        h264_qpel16_hv put
-        h264_qpel16_hv avg
-
-.macro  h264_qpel8      type
-function ff_\type\()_h264_qpel8_mc10_neon, export=1
-        lowpass_const   r3
-        mov             r3,  r1
-        sub             r1,  r1,  #2
-        mov             r12, #8
-        b               \type\()_h264_qpel8_h_lowpass_l2_neon
-endfunc
-
-function ff_\type\()_h264_qpel8_mc20_neon, export=1
-        lowpass_const   r3
-        sub             r1,  r1,  #2
-        mov             r3,  r2
-        mov             r12, #8
-        b               \type\()_h264_qpel8_h_lowpass_neon
-endfunc
-
-function ff_\type\()_h264_qpel8_mc30_neon, export=1
-        lowpass_const   r3
-        add             r3,  r1,  #1
-        sub             r1,  r1,  #2
-        mov             r12, #8
-        b               \type\()_h264_qpel8_h_lowpass_l2_neon
-endfunc
-
-function ff_\type\()_h264_qpel8_mc01_neon, export=1
-        push            {lr}
-        mov             r12, r1
-\type\()_h264_qpel8_mc01:
-        lowpass_const   r3
-        mov             r3,  r2
-        sub             r1,  r1,  r2, lsl #1
-        vpush           {d8-d15}
-        bl              \type\()_h264_qpel8_v_lowpass_l2_neon
-        vpop            {d8-d15}
-        pop             {pc}
-endfunc
-
-function ff_\type\()_h264_qpel8_mc11_neon, export=1
-        push            {r0, r1, r11, lr}
-\type\()_h264_qpel8_mc11:
-        lowpass_const   r3
-        mov             r11, sp
-A       bic             sp,  sp,  #15
-T       bic             r0,  r11, #15
-T       mov             sp,  r0
-        sub             sp,  sp,  #64
-        mov             r0,  sp
-        sub             r1,  r1,  #2
-        mov             r3,  #8
-        mov             r12, #8
-        vpush           {d8-d15}
-        bl              put_h264_qpel8_h_lowpass_neon
-        ldrd            r0,  r1,  [r11], #8
-        mov             r3,  r2
-        add             r12, sp,  #64
-        sub             r1,  r1,  r2, lsl #1
-        mov             r2,  #8
-        bl              \type\()_h264_qpel8_v_lowpass_l2_neon
-        vpop            {d8-d15}
-        mov             sp,  r11
-        pop             {r11, pc}
-endfunc
-
-function ff_\type\()_h264_qpel8_mc21_neon, export=1
-        push            {r0, r1, r4, r10, r11, lr}
-\type\()_h264_qpel8_mc21:
-        lowpass_const   r3
-        mov             r11, sp
-A       bic             sp,  sp,  #15
-T       bic             r0,  r11, #15
-T       mov             sp,  r0
-        sub             sp,  sp,  #(8*8+16*12)
-        sub             r1,  r1,  #2
-        mov             r3,  #8
-        mov             r0,  sp
-        mov             r12, #8
-        vpush           {d8-d15}
-        bl              put_h264_qpel8_h_lowpass_neon
-        mov             r4,  r0
-        ldrd            r0,  r1,  [r11], #8
-        sub             r1,  r1,  r2, lsl #1
-        sub             r1,  r1,  #2
-        mov             r3,  r2
-        sub             r2,  r4,  #64
-        bl              \type\()_h264_qpel8_hv_lowpass_l2_neon
-        vpop            {d8-d15}
-        mov             sp,  r11
-        pop             {r4, r10, r11, pc}
-endfunc
-
-function ff_\type\()_h264_qpel8_mc31_neon, export=1
-        add             r1,  r1,  #1
-        push            {r0, r1, r11, lr}
-        sub             r1,  r1,  #1
-        b               \type\()_h264_qpel8_mc11
-endfunc
-
-function ff_\type\()_h264_qpel8_mc02_neon, export=1
-        push            {lr}
-        lowpass_const   r3
-        sub             r1,  r1,  r2, lsl #1
-        mov             r3,  r2
-        vpush           {d8-d15}
-        bl              \type\()_h264_qpel8_v_lowpass_neon
-        vpop            {d8-d15}
-        pop             {pc}
-endfunc
-
-function ff_\type\()_h264_qpel8_mc12_neon, export=1
-        push            {r0, r1, r4, r10, r11, lr}
-\type\()_h264_qpel8_mc12:
-        lowpass_const   r3
-        mov             r11, sp
-A       bic             sp,  sp,  #15
-T       bic             r0,  r11, #15
-T       mov             sp,  r0
-        sub             sp,  sp,  #(8*8+16*12)
-        sub             r1,  r1,  r2, lsl #1
-        mov             r3,  r2
-        mov             r2,  #8
-        mov             r0,  sp
-        vpush           {d8-d15}
-        bl              put_h264_qpel8_v_lowpass_neon
-        mov             r4,  r0
-        ldrd            r0,  r1,  [r11], #8
-        sub             r1,  r1,  r3, lsl #1
-        sub             r1,  r1,  #2
-        sub             r2,  r4,  #64
-        bl              \type\()_h264_qpel8_hv_lowpass_l2_neon
-        vpop            {d8-d15}
-        mov             sp,  r11
-        pop             {r4, r10, r11, pc}
-endfunc
-
-function ff_\type\()_h264_qpel8_mc22_neon, export=1
-        push            {r4, r10, r11, lr}
-        mov             r11, sp
-A       bic             sp,  sp,  #15
-T       bic             r4,  r11, #15
-T       mov             sp,  r4
-        sub             r1,  r1,  r2, lsl #1
-        sub             r1,  r1,  #2
-        mov             r3,  r2
-        sub             sp,  sp,  #(16*12)
-        mov             r4,  sp
-        vpush           {d8-d15}
-        bl              \type\()_h264_qpel8_hv_lowpass_neon
-        vpop            {d8-d15}
-        mov             sp,  r11
-        pop             {r4, r10, r11, pc}
-endfunc
-
-function ff_\type\()_h264_qpel8_mc32_neon, export=1
-        push            {r0, r1, r4, r10, r11, lr}
-        add             r1,  r1,  #1
-        b               \type\()_h264_qpel8_mc12
-endfunc
-
-function ff_\type\()_h264_qpel8_mc03_neon, export=1
-        push            {lr}
-        add             r12, r1,  r2
-        b               \type\()_h264_qpel8_mc01
-endfunc
-
-function ff_\type\()_h264_qpel8_mc13_neon, export=1
-        push            {r0, r1, r11, lr}
-        add             r1,  r1,  r2
-        b               \type\()_h264_qpel8_mc11
-endfunc
-
-function ff_\type\()_h264_qpel8_mc23_neon, export=1
-        push            {r0, r1, r4, r10, r11, lr}
-        add             r1,  r1,  r2
-        b               \type\()_h264_qpel8_mc21
-endfunc
-
-function ff_\type\()_h264_qpel8_mc33_neon, export=1
-        add             r1,  r1,  #1
-        push            {r0, r1, r11, lr}
-        add             r1,  r1,  r2
-        sub             r1,  r1,  #1
-        b               \type\()_h264_qpel8_mc11
-endfunc
-.endm
-
-        h264_qpel8 put
-        h264_qpel8 avg
-
-.macro  h264_qpel16     type
-function ff_\type\()_h264_qpel16_mc10_neon, export=1
-        lowpass_const   r3
-        mov             r3,  r1
-        sub             r1,  r1,  #2
-        b               \type\()_h264_qpel16_h_lowpass_l2_neon
-endfunc
-
-function ff_\type\()_h264_qpel16_mc20_neon, export=1
-        lowpass_const   r3
-        sub             r1,  r1,  #2
-        mov             r3,  r2
-        b               \type\()_h264_qpel16_h_lowpass_neon
-endfunc
-
-function ff_\type\()_h264_qpel16_mc30_neon, export=1
-        lowpass_const   r3
-        add             r3,  r1,  #1
-        sub             r1,  r1,  #2
-        b               \type\()_h264_qpel16_h_lowpass_l2_neon
-endfunc
-
-function ff_\type\()_h264_qpel16_mc01_neon, export=1
-        push            {r4, lr}
-        mov             r12, r1
-\type\()_h264_qpel16_mc01:
-        lowpass_const   r3
-        mov             r3,  r2
-        sub             r1,  r1,  r2, lsl #1
-        vpush           {d8-d15}
-        bl              \type\()_h264_qpel16_v_lowpass_l2_neon
-        vpop            {d8-d15}
-        pop             {r4, pc}
-endfunc
-
-function ff_\type\()_h264_qpel16_mc11_neon, export=1
-        push            {r0, r1, r4, r11, lr}
-\type\()_h264_qpel16_mc11:
-        lowpass_const   r3
-        mov             r11, sp
-A       bic             sp,  sp,  #15
-T       bic             r0,  r11, #15
-T       mov             sp,  r0
-        sub             sp,  sp,  #256
-        mov             r0,  sp
-        sub             r1,  r1,  #2
-        mov             r3,  #16
-        vpush           {d8-d15}
-        bl              put_h264_qpel16_h_lowpass_neon
-        ldrd            r0,  r1,  [r11], #8
-        mov             r3,  r2
-        add             r12, sp,  #64
-        sub             r1,  r1,  r2, lsl #1
-        mov             r2,  #16
-        bl              \type\()_h264_qpel16_v_lowpass_l2_neon
-        vpop            {d8-d15}
-        mov             sp,  r11
-        pop             {r4, r11, pc}
-endfunc
-
-function ff_\type\()_h264_qpel16_mc21_neon, export=1
-        push            {r0, r1, r4-r5, r9-r11, lr}
-\type\()_h264_qpel16_mc21:
-        lowpass_const   r3
-        mov             r11, sp
-A       bic             sp,  sp,  #15
-T       bic             r0,  r11, #15
-T       mov             sp,  r0
-        sub             sp,  sp,  #(16*16+16*12)
-        sub             r1,  r1,  #2
-        mov             r0,  sp
-        vpush           {d8-d15}
-        bl              put_h264_qpel16_h_lowpass_neon_packed
-        mov             r4,  r0
-        ldrd            r0,  r1,  [r11], #8
-        sub             r1,  r1,  r2, lsl #1
-        sub             r1,  r1,  #2
-        mov             r3,  r2
-        bl              \type\()_h264_qpel16_hv_lowpass_l2_neon
-        vpop            {d8-d15}
-        mov             sp,  r11
-        pop             {r4-r5, r9-r11, pc}
-endfunc
-
-function ff_\type\()_h264_qpel16_mc31_neon, export=1
-        add             r1,  r1,  #1
-        push            {r0, r1, r4, r11, lr}
-        sub             r1,  r1,  #1
-        b               \type\()_h264_qpel16_mc11
-endfunc
-
-function ff_\type\()_h264_qpel16_mc02_neon, export=1
-        push            {r4, lr}
-        lowpass_const   r3
-        sub             r1,  r1,  r2, lsl #1
-        mov             r3,  r2
-        vpush           {d8-d15}
-        bl              \type\()_h264_qpel16_v_lowpass_neon
-        vpop            {d8-d15}
-        pop             {r4, pc}
-endfunc
-
-function ff_\type\()_h264_qpel16_mc12_neon, export=1
-        push            {r0, r1, r4-r5, r9-r11, lr}
-\type\()_h264_qpel16_mc12:
-        lowpass_const   r3
-        mov             r11, sp
-A       bic             sp,  sp,  #15
-T       bic             r0,  r11, #15
-T       mov             sp,  r0
-        sub             sp,  sp,  #(16*16+16*12)
-        sub             r1,  r1,  r2, lsl #1
-        mov             r0,  sp
-        mov             r3,  r2
-        vpush           {d8-d15}
-        bl              put_h264_qpel16_v_lowpass_neon_packed
-        mov             r4,  r0
-        ldrd            r0,  r1,  [r11], #8
-        sub             r1,  r1,  r3, lsl #1
-        sub             r1,  r1,  #2
-        mov             r2,  r3
-        bl              \type\()_h264_qpel16_hv_lowpass_l2_neon
-        vpop            {d8-d15}
-        mov             sp,  r11
-        pop             {r4-r5, r9-r11, pc}
-endfunc
-
-function ff_\type\()_h264_qpel16_mc22_neon, export=1
-        push            {r4, r9-r11, lr}
-        lowpass_const   r3
-        mov             r11, sp
-A       bic             sp,  sp,  #15
-T       bic             r4,  r11, #15
-T       mov             sp,  r4
-        sub             r1,  r1,  r2, lsl #1
-        sub             r1,  r1,  #2
-        mov             r3,  r2
-        sub             sp,  sp,  #(16*12)
-        mov             r4,  sp
-        vpush           {d8-d15}
-        bl              \type\()_h264_qpel16_hv_lowpass_neon
-        vpop            {d8-d15}
-        mov             sp,  r11
-        pop             {r4, r9-r11, pc}
-endfunc
-
-function ff_\type\()_h264_qpel16_mc32_neon, export=1
-        push            {r0, r1, r4-r5, r9-r11, lr}
-        add             r1,  r1,  #1
-        b               \type\()_h264_qpel16_mc12
-endfunc
-
-function ff_\type\()_h264_qpel16_mc03_neon, export=1
-        push            {r4, lr}
-        add             r12, r1,  r2
-        b               \type\()_h264_qpel16_mc01
-endfunc
-
-function ff_\type\()_h264_qpel16_mc13_neon, export=1
-        push            {r0, r1, r4, r11, lr}
-        add             r1,  r1,  r2
-        b               \type\()_h264_qpel16_mc11
-endfunc
-
-function ff_\type\()_h264_qpel16_mc23_neon, export=1
-        push            {r0, r1, r4-r5, r9-r11, lr}
-        add             r1,  r1,  r2
-        b               \type\()_h264_qpel16_mc21
-endfunc
-
-function ff_\type\()_h264_qpel16_mc33_neon, export=1
-        add             r1,  r1,  #1
-        push            {r0, r1, r4, r11, lr}
-        add             r1,  r1,  r2
-        sub             r1,  r1,  #1
-        b               \type\()_h264_qpel16_mc11
-endfunc
-.endm
-
-        h264_qpel16 put
-        h264_qpel16 avg
-
 @ Biweighted prediction
 
 .macro  biweight_16     macs, macd
diff --git a/libavcodec/arm/h264qpel_init_arm.c b/libavcodec/arm/h264qpel_init_arm.c
new file mode 100644 (file)
index 0000000..b546c2a
--- /dev/null
@@ -0,0 +1,170 @@
+/*
+ * ARM NEON optimised DSP functions
+ * Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+#include "config.h"
+#include "libavcodec/h264qpel.h"
+
+void ff_put_h264_qpel16_mc00_neon(uint8_t *, uint8_t *, int);
+void ff_put_h264_qpel16_mc10_neon(uint8_t *, uint8_t *, int);
+void ff_put_h264_qpel16_mc20_neon(uint8_t *, uint8_t *, int);
+void ff_put_h264_qpel16_mc30_neon(uint8_t *, uint8_t *, int);
+void ff_put_h264_qpel16_mc01_neon(uint8_t *, uint8_t *, int);
+void ff_put_h264_qpel16_mc11_neon(uint8_t *, uint8_t *, int);
+void ff_put_h264_qpel16_mc21_neon(uint8_t *, uint8_t *, int);
+void ff_put_h264_qpel16_mc31_neon(uint8_t *, uint8_t *, int);
+void ff_put_h264_qpel16_mc02_neon(uint8_t *, uint8_t *, int);
+void ff_put_h264_qpel16_mc12_neon(uint8_t *, uint8_t *, int);
+void ff_put_h264_qpel16_mc22_neon(uint8_t *, uint8_t *, int);
+void ff_put_h264_qpel16_mc32_neon(uint8_t *, uint8_t *, int);
+void ff_put_h264_qpel16_mc03_neon(uint8_t *, uint8_t *, int);
+void ff_put_h264_qpel16_mc13_neon(uint8_t *, uint8_t *, int);
+void ff_put_h264_qpel16_mc23_neon(uint8_t *, uint8_t *, int);
+void ff_put_h264_qpel16_mc33_neon(uint8_t *, uint8_t *, int);
+
+void ff_put_h264_qpel8_mc00_neon(uint8_t *, uint8_t *, int);
+void ff_put_h264_qpel8_mc10_neon(uint8_t *, uint8_t *, int);
+void ff_put_h264_qpel8_mc20_neon(uint8_t *, uint8_t *, int);
+void ff_put_h264_qpel8_mc30_neon(uint8_t *, uint8_t *, int);
+void ff_put_h264_qpel8_mc01_neon(uint8_t *, uint8_t *, int);
+void ff_put_h264_qpel8_mc11_neon(uint8_t *, uint8_t *, int);
+void ff_put_h264_qpel8_mc21_neon(uint8_t *, uint8_t *, int);
+void ff_put_h264_qpel8_mc31_neon(uint8_t *, uint8_t *, int);
+void ff_put_h264_qpel8_mc02_neon(uint8_t *, uint8_t *, int);
+void ff_put_h264_qpel8_mc12_neon(uint8_t *, uint8_t *, int);
+void ff_put_h264_qpel8_mc22_neon(uint8_t *, uint8_t *, int);
+void ff_put_h264_qpel8_mc32_neon(uint8_t *, uint8_t *, int);
+void ff_put_h264_qpel8_mc03_neon(uint8_t *, uint8_t *, int);
+void ff_put_h264_qpel8_mc13_neon(uint8_t *, uint8_t *, int);
+void ff_put_h264_qpel8_mc23_neon(uint8_t *, uint8_t *, int);
+void ff_put_h264_qpel8_mc33_neon(uint8_t *, uint8_t *, int);
+
+void ff_avg_h264_qpel16_mc00_neon(uint8_t *, uint8_t *, int);
+void ff_avg_h264_qpel16_mc10_neon(uint8_t *, uint8_t *, int);
+void ff_avg_h264_qpel16_mc20_neon(uint8_t *, uint8_t *, int);
+void ff_avg_h264_qpel16_mc30_neon(uint8_t *, uint8_t *, int);
+void ff_avg_h264_qpel16_mc01_neon(uint8_t *, uint8_t *, int);
+void ff_avg_h264_qpel16_mc11_neon(uint8_t *, uint8_t *, int);
+void ff_avg_h264_qpel16_mc21_neon(uint8_t *, uint8_t *, int);
+void ff_avg_h264_qpel16_mc31_neon(uint8_t *, uint8_t *, int);
+void ff_avg_h264_qpel16_mc02_neon(uint8_t *, uint8_t *, int);
+void ff_avg_h264_qpel16_mc12_neon(uint8_t *, uint8_t *, int);
+void ff_avg_h264_qpel16_mc22_neon(uint8_t *, uint8_t *, int);
+void ff_avg_h264_qpel16_mc32_neon(uint8_t *, uint8_t *, int);
+void ff_avg_h264_qpel16_mc03_neon(uint8_t *, uint8_t *, int);
+void ff_avg_h264_qpel16_mc13_neon(uint8_t *, uint8_t *, int);
+void ff_avg_h264_qpel16_mc23_neon(uint8_t *, uint8_t *, int);
+void ff_avg_h264_qpel16_mc33_neon(uint8_t *, uint8_t *, int);
+
+void ff_avg_h264_qpel8_mc00_neon(uint8_t *, uint8_t *, int);
+void ff_avg_h264_qpel8_mc10_neon(uint8_t *, uint8_t *, int);
+void ff_avg_h264_qpel8_mc20_neon(uint8_t *, uint8_t *, int);
+void ff_avg_h264_qpel8_mc30_neon(uint8_t *, uint8_t *, int);
+void ff_avg_h264_qpel8_mc01_neon(uint8_t *, uint8_t *, int);
+void ff_avg_h264_qpel8_mc11_neon(uint8_t *, uint8_t *, int);
+void ff_avg_h264_qpel8_mc21_neon(uint8_t *, uint8_t *, int);
+void ff_avg_h264_qpel8_mc31_neon(uint8_t *, uint8_t *, int);
+void ff_avg_h264_qpel8_mc02_neon(uint8_t *, uint8_t *, int);
+void ff_avg_h264_qpel8_mc12_neon(uint8_t *, uint8_t *, int);
+void ff_avg_h264_qpel8_mc22_neon(uint8_t *, uint8_t *, int);
+void ff_avg_h264_qpel8_mc32_neon(uint8_t *, uint8_t *, int);
+void ff_avg_h264_qpel8_mc03_neon(uint8_t *, uint8_t *, int);
+void ff_avg_h264_qpel8_mc13_neon(uint8_t *, uint8_t *, int);
+void ff_avg_h264_qpel8_mc23_neon(uint8_t *, uint8_t *, int);
+void ff_avg_h264_qpel8_mc33_neon(uint8_t *, uint8_t *, int);
+
+void ff_h264qpel_init_arm(H264QpelContext *c, int bit_depth)
+{
+#if HAVE_NEON
+    const int high_bit_depth = bit_depth > 8;
+
+    if (!high_bit_depth) {
+        c->put_h264_qpel_pixels_tab[0][ 0] = ff_put_h264_qpel16_mc00_neon;
+        c->put_h264_qpel_pixels_tab[0][ 1] = ff_put_h264_qpel16_mc10_neon;
+        c->put_h264_qpel_pixels_tab[0][ 2] = ff_put_h264_qpel16_mc20_neon;
+        c->put_h264_qpel_pixels_tab[0][ 3] = ff_put_h264_qpel16_mc30_neon;
+        c->put_h264_qpel_pixels_tab[0][ 4] = ff_put_h264_qpel16_mc01_neon;
+        c->put_h264_qpel_pixels_tab[0][ 5] = ff_put_h264_qpel16_mc11_neon;
+        c->put_h264_qpel_pixels_tab[0][ 6] = ff_put_h264_qpel16_mc21_neon;
+        c->put_h264_qpel_pixels_tab[0][ 7] = ff_put_h264_qpel16_mc31_neon;
+        c->put_h264_qpel_pixels_tab[0][ 8] = ff_put_h264_qpel16_mc02_neon;
+        c->put_h264_qpel_pixels_tab[0][ 9] = ff_put_h264_qpel16_mc12_neon;
+        c->put_h264_qpel_pixels_tab[0][10] = ff_put_h264_qpel16_mc22_neon;
+        c->put_h264_qpel_pixels_tab[0][11] = ff_put_h264_qpel16_mc32_neon;
+        c->put_h264_qpel_pixels_tab[0][12] = ff_put_h264_qpel16_mc03_neon;
+        c->put_h264_qpel_pixels_tab[0][13] = ff_put_h264_qpel16_mc13_neon;
+        c->put_h264_qpel_pixels_tab[0][14] = ff_put_h264_qpel16_mc23_neon;
+        c->put_h264_qpel_pixels_tab[0][15] = ff_put_h264_qpel16_mc33_neon;
+
+        c->put_h264_qpel_pixels_tab[1][ 0] = ff_put_h264_qpel8_mc00_neon;
+        c->put_h264_qpel_pixels_tab[1][ 1] = ff_put_h264_qpel8_mc10_neon;
+        c->put_h264_qpel_pixels_tab[1][ 2] = ff_put_h264_qpel8_mc20_neon;
+        c->put_h264_qpel_pixels_tab[1][ 3] = ff_put_h264_qpel8_mc30_neon;
+        c->put_h264_qpel_pixels_tab[1][ 4] = ff_put_h264_qpel8_mc01_neon;
+        c->put_h264_qpel_pixels_tab[1][ 5] = ff_put_h264_qpel8_mc11_neon;
+        c->put_h264_qpel_pixels_tab[1][ 6] = ff_put_h264_qpel8_mc21_neon;
+        c->put_h264_qpel_pixels_tab[1][ 7] = ff_put_h264_qpel8_mc31_neon;
+        c->put_h264_qpel_pixels_tab[1][ 8] = ff_put_h264_qpel8_mc02_neon;
+        c->put_h264_qpel_pixels_tab[1][ 9] = ff_put_h264_qpel8_mc12_neon;
+        c->put_h264_qpel_pixels_tab[1][10] = ff_put_h264_qpel8_mc22_neon;
+        c->put_h264_qpel_pixels_tab[1][11] = ff_put_h264_qpel8_mc32_neon;
+        c->put_h264_qpel_pixels_tab[1][12] = ff_put_h264_qpel8_mc03_neon;
+        c->put_h264_qpel_pixels_tab[1][13] = ff_put_h264_qpel8_mc13_neon;
+        c->put_h264_qpel_pixels_tab[1][14] = ff_put_h264_qpel8_mc23_neon;
+        c->put_h264_qpel_pixels_tab[1][15] = ff_put_h264_qpel8_mc33_neon;
+
+        c->avg_h264_qpel_pixels_tab[0][ 0] = ff_avg_h264_qpel16_mc00_neon;
+        c->avg_h264_qpel_pixels_tab[0][ 1] = ff_avg_h264_qpel16_mc10_neon;
+        c->avg_h264_qpel_pixels_tab[0][ 2] = ff_avg_h264_qpel16_mc20_neon;
+        c->avg_h264_qpel_pixels_tab[0][ 3] = ff_avg_h264_qpel16_mc30_neon;
+        c->avg_h264_qpel_pixels_tab[0][ 4] = ff_avg_h264_qpel16_mc01_neon;
+        c->avg_h264_qpel_pixels_tab[0][ 5] = ff_avg_h264_qpel16_mc11_neon;
+        c->avg_h264_qpel_pixels_tab[0][ 6] = ff_avg_h264_qpel16_mc21_neon;
+        c->avg_h264_qpel_pixels_tab[0][ 7] = ff_avg_h264_qpel16_mc31_neon;
+        c->avg_h264_qpel_pixels_tab[0][ 8] = ff_avg_h264_qpel16_mc02_neon;
+        c->avg_h264_qpel_pixels_tab[0][ 9] = ff_avg_h264_qpel16_mc12_neon;
+        c->avg_h264_qpel_pixels_tab[0][10] = ff_avg_h264_qpel16_mc22_neon;
+        c->avg_h264_qpel_pixels_tab[0][11] = ff_avg_h264_qpel16_mc32_neon;
+        c->avg_h264_qpel_pixels_tab[0][12] = ff_avg_h264_qpel16_mc03_neon;
+        c->avg_h264_qpel_pixels_tab[0][13] = ff_avg_h264_qpel16_mc13_neon;
+        c->avg_h264_qpel_pixels_tab[0][14] = ff_avg_h264_qpel16_mc23_neon;
+        c->avg_h264_qpel_pixels_tab[0][15] = ff_avg_h264_qpel16_mc33_neon;
+
+        c->avg_h264_qpel_pixels_tab[1][ 0] = ff_avg_h264_qpel8_mc00_neon;
+        c->avg_h264_qpel_pixels_tab[1][ 1] = ff_avg_h264_qpel8_mc10_neon;
+        c->avg_h264_qpel_pixels_tab[1][ 2] = ff_avg_h264_qpel8_mc20_neon;
+        c->avg_h264_qpel_pixels_tab[1][ 3] = ff_avg_h264_qpel8_mc30_neon;
+        c->avg_h264_qpel_pixels_tab[1][ 4] = ff_avg_h264_qpel8_mc01_neon;
+        c->avg_h264_qpel_pixels_tab[1][ 5] = ff_avg_h264_qpel8_mc11_neon;
+        c->avg_h264_qpel_pixels_tab[1][ 6] = ff_avg_h264_qpel8_mc21_neon;
+        c->avg_h264_qpel_pixels_tab[1][ 7] = ff_avg_h264_qpel8_mc31_neon;
+        c->avg_h264_qpel_pixels_tab[1][ 8] = ff_avg_h264_qpel8_mc02_neon;
+        c->avg_h264_qpel_pixels_tab[1][ 9] = ff_avg_h264_qpel8_mc12_neon;
+        c->avg_h264_qpel_pixels_tab[1][10] = ff_avg_h264_qpel8_mc22_neon;
+        c->avg_h264_qpel_pixels_tab[1][11] = ff_avg_h264_qpel8_mc32_neon;
+        c->avg_h264_qpel_pixels_tab[1][12] = ff_avg_h264_qpel8_mc03_neon;
+        c->avg_h264_qpel_pixels_tab[1][13] = ff_avg_h264_qpel8_mc13_neon;
+        c->avg_h264_qpel_pixels_tab[1][14] = ff_avg_h264_qpel8_mc23_neon;
+        c->avg_h264_qpel_pixels_tab[1][15] = ff_avg_h264_qpel8_mc33_neon;
+    }
+#endif /* HAVE_NEON */
+}
diff --git a/libavcodec/arm/h264qpel_neon.S b/libavcodec/arm/h264qpel_neon.S
new file mode 100644 (file)
index 0000000..6c51250
--- /dev/null
@@ -0,0 +1,955 @@
+/*
+ * Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/arm/asm.S"
+#include "neon.S"
+
+        /* H.264 qpel MC */
+
+.macro  lowpass_const   r
+        movw            \r,  #5
+        movt            \r,  #20
+        vmov.32         d6[0], \r
+.endm
+
+.macro  lowpass_8       r0,  r1,  r2,  r3,  d0,  d1,  narrow=1
+  .if \narrow
+        t0 .req q0
+        t1 .req q8
+  .else
+        t0 .req \d0
+        t1 .req \d1
+  .endif
+        vext.8          d2,  \r0, \r1, #2
+        vext.8          d3,  \r0, \r1, #3
+        vaddl.u8        q1,  d2,  d3
+        vext.8          d4,  \r0, \r1, #1
+        vext.8          d5,  \r0, \r1, #4
+        vaddl.u8        q2,  d4,  d5
+        vext.8          d30, \r0, \r1, #5
+        vaddl.u8        t0,  \r0, d30
+        vext.8          d18, \r2, \r3, #2
+        vmla.i16        t0,  q1,  d6[1]
+        vext.8          d19, \r2, \r3, #3
+        vaddl.u8        q9,  d18, d19
+        vext.8          d20, \r2, \r3, #1
+        vmls.i16        t0,  q2,  d6[0]
+        vext.8          d21, \r2, \r3, #4
+        vaddl.u8        q10, d20, d21
+        vext.8          d31, \r2, \r3, #5
+        vaddl.u8        t1,  \r2, d31
+        vmla.i16        t1,  q9,  d6[1]
+        vmls.i16        t1,  q10, d6[0]
+  .if \narrow
+        vqrshrun.s16    \d0, t0,  #5
+        vqrshrun.s16    \d1, t1,  #5
+  .endif
+        .unreq  t0
+        .unreq  t1
+.endm
+
+.macro  lowpass_8_1     r0,  r1,  d0,  narrow=1
+  .if \narrow
+        t0 .req q0
+  .else
+        t0 .req \d0
+  .endif
+        vext.8          d2,  \r0, \r1, #2
+        vext.8          d3,  \r0, \r1, #3
+        vaddl.u8        q1,  d2,  d3
+        vext.8          d4,  \r0, \r1, #1
+        vext.8          d5,  \r0, \r1, #4
+        vaddl.u8        q2,  d4,  d5
+        vext.8          d30, \r0, \r1, #5
+        vaddl.u8        t0,  \r0, d30
+        vmla.i16        t0,  q1,  d6[1]
+        vmls.i16        t0,  q2,  d6[0]
+  .if \narrow
+        vqrshrun.s16    \d0, t0,  #5
+  .endif
+        .unreq  t0
+.endm
+
+.macro  lowpass_8.16    r0,  r1,  l0,  h0,  l1,  h1,  d
+        vext.16         q1,  \r0, \r1, #2
+        vext.16         q0,  \r0, \r1, #3
+        vaddl.s16       q9,  d2,  d0
+        vext.16         q2,  \r0, \r1, #1
+        vaddl.s16       q1,  d3,  d1
+        vext.16         q3,  \r0, \r1, #4
+        vaddl.s16       q10, d4,  d6
+        vext.16         \r1, \r0, \r1, #5
+        vaddl.s16       q2,  d5,  d7
+        vaddl.s16       q0,  \h0, \h1
+        vaddl.s16       q8,  \l0, \l1
+
+        vshl.i32        q3,  q9,  #4
+        vshl.i32        q9,  q9,  #2
+        vshl.i32        q15, q10, #2
+        vadd.i32        q9,  q9,  q3
+        vadd.i32        q10, q10, q15
+
+        vshl.i32        q3,  q1,  #4
+        vshl.i32        q1,  q1,  #2
+        vshl.i32        q15, q2,  #2
+        vadd.i32        q1,  q1,  q3
+        vadd.i32        q2,  q2,  q15
+
+        vadd.i32        q9,  q9,  q8
+        vsub.i32        q9,  q9,  q10
+
+        vadd.i32        q1,  q1,  q0
+        vsub.i32        q1,  q1,  q2
+
+        vrshrn.s32      d18, q9,  #10
+        vrshrn.s32      d19, q1,  #10
+
+        vqmovun.s16     \d,  q9
+.endm
+
+function put_h264_qpel16_h_lowpass_neon_packed
+        mov             r4,  lr
+        mov             r12, #16
+        mov             r3,  #8
+        bl              put_h264_qpel8_h_lowpass_neon
+        sub             r1,  r1,  r2, lsl #4
+        add             r1,  r1,  #8
+        mov             r12, #16
+        mov             lr,  r4
+        b               put_h264_qpel8_h_lowpass_neon
+endfunc
+
+.macro  h264_qpel_h_lowpass type
+function \type\()_h264_qpel16_h_lowpass_neon
+        push            {lr}
+        mov             r12, #16
+        bl              \type\()_h264_qpel8_h_lowpass_neon
+        sub             r0,  r0,  r3, lsl #4
+        sub             r1,  r1,  r2, lsl #4
+        add             r0,  r0,  #8
+        add             r1,  r1,  #8
+        mov             r12, #16
+        pop             {lr}
+endfunc
+
+function \type\()_h264_qpel8_h_lowpass_neon
+1:      vld1.8          {d0, d1},  [r1], r2
+        vld1.8          {d16,d17}, [r1], r2
+        subs            r12, r12, #2
+        lowpass_8       d0,  d1,  d16, d17, d0,  d16
+  .ifc \type,avg
+        vld1.8          {d2},     [r0,:64], r3
+        vrhadd.u8       d0,  d0,  d2
+        vld1.8          {d3},     [r0,:64]
+        vrhadd.u8       d16, d16, d3
+        sub             r0,  r0,  r3
+  .endif
+        vst1.8          {d0},     [r0,:64], r3
+        vst1.8          {d16},    [r0,:64], r3
+        bne             1b
+        bx              lr
+endfunc
+.endm
+
+        h264_qpel_h_lowpass put
+        h264_qpel_h_lowpass avg
+
+.macro  h264_qpel_h_lowpass_l2 type
+function \type\()_h264_qpel16_h_lowpass_l2_neon
+        push            {lr}
+        mov             r12, #16
+        bl              \type\()_h264_qpel8_h_lowpass_l2_neon
+        sub             r0,  r0,  r2, lsl #4
+        sub             r1,  r1,  r2, lsl #4
+        sub             r3,  r3,  r2, lsl #4
+        add             r0,  r0,  #8
+        add             r1,  r1,  #8
+        add             r3,  r3,  #8
+        mov             r12, #16
+        pop             {lr}
+endfunc
+
+function \type\()_h264_qpel8_h_lowpass_l2_neon
+1:      vld1.8          {d0, d1},  [r1], r2
+        vld1.8          {d16,d17}, [r1], r2
+        vld1.8          {d28},     [r3], r2
+        vld1.8          {d29},     [r3], r2
+        subs            r12, r12, #2
+        lowpass_8       d0,  d1,  d16, d17, d0,  d1
+        vrhadd.u8       q0,  q0,  q14
+  .ifc \type,avg
+        vld1.8          {d2},      [r0,:64], r2
+        vrhadd.u8       d0,  d0,  d2
+        vld1.8          {d3},      [r0,:64]
+        vrhadd.u8       d1,  d1,  d3
+        sub             r0,  r0,  r2
+  .endif
+        vst1.8          {d0},      [r0,:64], r2
+        vst1.8          {d1},      [r0,:64], r2
+        bne             1b
+        bx              lr
+endfunc
+.endm
+
+        h264_qpel_h_lowpass_l2 put
+        h264_qpel_h_lowpass_l2 avg
+
+function put_h264_qpel16_v_lowpass_neon_packed
+        mov             r4,  lr
+        mov             r2,  #8
+        bl              put_h264_qpel8_v_lowpass_neon
+        sub             r1,  r1,  r3, lsl #2
+        bl              put_h264_qpel8_v_lowpass_neon
+        sub             r1,  r1,  r3, lsl #4
+        sub             r1,  r1,  r3, lsl #2
+        add             r1,  r1,  #8
+        bl              put_h264_qpel8_v_lowpass_neon
+        sub             r1,  r1,  r3, lsl #2
+        mov             lr,  r4
+        b               put_h264_qpel8_v_lowpass_neon
+endfunc
+
+.macro  h264_qpel_v_lowpass type
+function \type\()_h264_qpel16_v_lowpass_neon
+        mov             r4,  lr
+        bl              \type\()_h264_qpel8_v_lowpass_neon
+        sub             r1,  r1,  r3, lsl #2
+        bl              \type\()_h264_qpel8_v_lowpass_neon
+        sub             r0,  r0,  r2, lsl #4
+        add             r0,  r0,  #8
+        sub             r1,  r1,  r3, lsl #4
+        sub             r1,  r1,  r3, lsl #2
+        add             r1,  r1,  #8
+        bl              \type\()_h264_qpel8_v_lowpass_neon
+        sub             r1,  r1,  r3, lsl #2
+        mov             lr,  r4
+endfunc
+
+function \type\()_h264_qpel8_v_lowpass_neon
+        vld1.8          {d8},  [r1], r3
+        vld1.8          {d10}, [r1], r3
+        vld1.8          {d12}, [r1], r3
+        vld1.8          {d14}, [r1], r3
+        vld1.8          {d22}, [r1], r3
+        vld1.8          {d24}, [r1], r3
+        vld1.8          {d26}, [r1], r3
+        vld1.8          {d28}, [r1], r3
+        vld1.8          {d9},  [r1], r3
+        vld1.8          {d11}, [r1], r3
+        vld1.8          {d13}, [r1], r3
+        vld1.8          {d15}, [r1], r3
+        vld1.8          {d23}, [r1]
+
+        transpose_8x8   q4,  q5,  q6,  q7,  q11, q12, q13, q14
+        lowpass_8       d8,  d9,  d10, d11, d8,  d10
+        lowpass_8       d12, d13, d14, d15, d12, d14
+        lowpass_8       d22, d23, d24, d25, d22, d24
+        lowpass_8       d26, d27, d28, d29, d26, d28
+        transpose_8x8   d8,  d10, d12, d14, d22, d24, d26, d28
+
+  .ifc \type,avg
+        vld1.8          {d9},  [r0,:64], r2
+        vrhadd.u8       d8,  d8,  d9
+        vld1.8          {d11}, [r0,:64], r2
+        vrhadd.u8       d10, d10, d11
+        vld1.8          {d13}, [r0,:64], r2
+        vrhadd.u8       d12, d12, d13
+        vld1.8          {d15}, [r0,:64], r2
+        vrhadd.u8       d14, d14, d15
+        vld1.8          {d23}, [r0,:64], r2
+        vrhadd.u8       d22, d22, d23
+        vld1.8          {d25}, [r0,:64], r2
+        vrhadd.u8       d24, d24, d25
+        vld1.8          {d27}, [r0,:64], r2
+        vrhadd.u8       d26, d26, d27
+        vld1.8          {d29}, [r0,:64], r2
+        vrhadd.u8       d28, d28, d29
+        sub             r0,  r0,  r2,  lsl #3
+  .endif
+
+        vst1.8          {d8},  [r0,:64], r2
+        vst1.8          {d10}, [r0,:64], r2
+        vst1.8          {d12}, [r0,:64], r2
+        vst1.8          {d14}, [r0,:64], r2
+        vst1.8          {d22}, [r0,:64], r2
+        vst1.8          {d24}, [r0,:64], r2
+        vst1.8          {d26}, [r0,:64], r2
+        vst1.8          {d28}, [r0,:64], r2
+
+        bx              lr
+endfunc
+.endm
+
+        h264_qpel_v_lowpass put
+        h264_qpel_v_lowpass avg
+
+.macro  h264_qpel_v_lowpass_l2 type
+function \type\()_h264_qpel16_v_lowpass_l2_neon
+        mov             r4,  lr
+        bl              \type\()_h264_qpel8_v_lowpass_l2_neon
+        sub             r1,  r1,  r3, lsl #2
+        bl              \type\()_h264_qpel8_v_lowpass_l2_neon
+        sub             r0,  r0,  r3, lsl #4
+        sub             r12, r12, r2, lsl #4
+        add             r0,  r0,  #8
+        add             r12, r12, #8
+        sub             r1,  r1,  r3, lsl #4
+        sub             r1,  r1,  r3, lsl #2
+        add             r1,  r1,  #8
+        bl              \type\()_h264_qpel8_v_lowpass_l2_neon
+        sub             r1,  r1,  r3, lsl #2
+        mov             lr,  r4
+endfunc
+
+function \type\()_h264_qpel8_v_lowpass_l2_neon
+        vld1.8          {d8},  [r1], r3
+        vld1.8          {d10}, [r1], r3
+        vld1.8          {d12}, [r1], r3
+        vld1.8          {d14}, [r1], r3
+        vld1.8          {d22}, [r1], r3
+        vld1.8          {d24}, [r1], r3
+        vld1.8          {d26}, [r1], r3
+        vld1.8          {d28}, [r1], r3
+        vld1.8          {d9},  [r1], r3
+        vld1.8          {d11}, [r1], r3
+        vld1.8          {d13}, [r1], r3
+        vld1.8          {d15}, [r1], r3
+        vld1.8          {d23}, [r1]
+
+        transpose_8x8   q4,  q5,  q6,  q7,  q11, q12, q13, q14
+        lowpass_8       d8,  d9,  d10, d11, d8,  d9
+        lowpass_8       d12, d13, d14, d15, d12, d13
+        lowpass_8       d22, d23, d24, d25, d22, d23
+        lowpass_8       d26, d27, d28, d29, d26, d27
+        transpose_8x8   d8,  d9,  d12, d13, d22, d23, d26, d27
+
+        vld1.8          {d0},  [r12], r2
+        vld1.8          {d1},  [r12], r2
+        vld1.8          {d2},  [r12], r2
+        vld1.8          {d3},  [r12], r2
+        vld1.8          {d4},  [r12], r2
+        vrhadd.u8       q0,  q0,  q4
+        vld1.8          {d5},  [r12], r2
+        vrhadd.u8       q1,  q1,  q6
+        vld1.8          {d10}, [r12], r2
+        vrhadd.u8       q2,  q2,  q11
+        vld1.8          {d11}, [r12], r2
+        vrhadd.u8       q5,  q5,  q13
+
+  .ifc \type,avg
+        vld1.8          {d16}, [r0,:64], r3
+        vrhadd.u8       d0,  d0,  d16
+        vld1.8          {d17}, [r0,:64], r3
+        vrhadd.u8       d1,  d1,  d17
+        vld1.8          {d16}, [r0,:64], r3
+        vrhadd.u8       d2,  d2,  d16
+        vld1.8          {d17}, [r0,:64], r3
+        vrhadd.u8       d3,  d3,  d17
+        vld1.8          {d16}, [r0,:64], r3
+        vrhadd.u8       d4,  d4,  d16
+        vld1.8          {d17}, [r0,:64], r3
+        vrhadd.u8       d5,  d5,  d17
+        vld1.8          {d16}, [r0,:64], r3
+        vrhadd.u8       d10, d10, d16
+        vld1.8          {d17}, [r0,:64], r3
+        vrhadd.u8       d11, d11, d17
+        sub             r0,  r0,  r3,  lsl #3
+  .endif
+
+        vst1.8          {d0},  [r0,:64], r3
+        vst1.8          {d1},  [r0,:64], r3
+        vst1.8          {d2},  [r0,:64], r3
+        vst1.8          {d3},  [r0,:64], r3
+        vst1.8          {d4},  [r0,:64], r3
+        vst1.8          {d5},  [r0,:64], r3
+        vst1.8          {d10}, [r0,:64], r3
+        vst1.8          {d11}, [r0,:64], r3
+
+        bx              lr
+endfunc
+.endm
+
+        h264_qpel_v_lowpass_l2 put
+        h264_qpel_v_lowpass_l2 avg
+
+function put_h264_qpel8_hv_lowpass_neon_top
+        lowpass_const   r12
+        mov             r12, #12
+1:      vld1.8          {d0, d1},  [r1], r3
+        vld1.8          {d16,d17}, [r1], r3
+        subs            r12, r12, #2
+        lowpass_8       d0,  d1,  d16, d17, q11, q12, narrow=0
+        vst1.8          {d22-d25}, [r4,:128]!
+        bne             1b
+
+        vld1.8          {d0, d1},  [r1]
+        lowpass_8_1     d0,  d1,  q12, narrow=0
+
+        mov             r12, #-16
+        add             r4,  r4,  r12
+        vld1.8          {d30,d31}, [r4,:128], r12
+        vld1.8          {d20,d21}, [r4,:128], r12
+        vld1.8          {d18,d19}, [r4,:128], r12
+        vld1.8          {d16,d17}, [r4,:128], r12
+        vld1.8          {d14,d15}, [r4,:128], r12
+        vld1.8          {d12,d13}, [r4,:128], r12
+        vld1.8          {d10,d11}, [r4,:128], r12
+        vld1.8          {d8, d9},  [r4,:128], r12
+        vld1.8          {d6, d7},  [r4,:128], r12
+        vld1.8          {d4, d5},  [r4,:128], r12
+        vld1.8          {d2, d3},  [r4,:128], r12
+        vld1.8          {d0, d1},  [r4,:128]
+
+        swap4           d1,  d3,  d5,  d7,  d8,  d10, d12, d14
+        transpose16_4x4 q0,  q1,  q2,  q3,  q4,  q5,  q6,  q7
+
+        swap4           d17, d19, d21, d31, d24, d26, d28, d22
+        transpose16_4x4 q8,  q9,  q10, q15, q12, q13, q14, q11
+
+        vst1.8          {d30,d31}, [r4,:128]!
+        vst1.8          {d6, d7},  [r4,:128]!
+        vst1.8          {d20,d21}, [r4,:128]!
+        vst1.8          {d4, d5},  [r4,:128]!
+        vst1.8          {d18,d19}, [r4,:128]!
+        vst1.8          {d2, d3},  [r4,:128]!
+        vst1.8          {d16,d17}, [r4,:128]!
+        vst1.8          {d0, d1},  [r4,:128]
+
+        lowpass_8.16    q4,  q12, d8,  d9,  d24, d25, d8
+        lowpass_8.16    q5,  q13, d10, d11, d26, d27, d9
+        lowpass_8.16    q6,  q14, d12, d13, d28, d29, d10
+        lowpass_8.16    q7,  q11, d14, d15, d22, d23, d11
+
+        vld1.8          {d16,d17}, [r4,:128], r12
+        vld1.8          {d30,d31}, [r4,:128], r12
+        lowpass_8.16    q8,  q15, d16, d17, d30, d31, d12
+        vld1.8          {d16,d17}, [r4,:128], r12
+        vld1.8          {d30,d31}, [r4,:128], r12
+        lowpass_8.16    q8,  q15, d16, d17, d30, d31, d13
+        vld1.8          {d16,d17}, [r4,:128], r12
+        vld1.8          {d30,d31}, [r4,:128], r12
+        lowpass_8.16    q8,  q15, d16, d17, d30, d31, d14
+        vld1.8          {d16,d17}, [r4,:128], r12
+        vld1.8          {d30,d31}, [r4,:128]
+        lowpass_8.16    q8,  q15, d16, d17, d30, d31, d15
+
+        transpose_8x8   d12, d13, d14, d15, d8,  d9,  d10, d11
+
+        bx              lr
+endfunc
+
+.macro  h264_qpel8_hv_lowpass type
+function \type\()_h264_qpel8_hv_lowpass_neon
+        mov             r10, lr
+        bl              put_h264_qpel8_hv_lowpass_neon_top
+  .ifc \type,avg
+        vld1.8          {d0},      [r0,:64], r2
+        vrhadd.u8       d12, d12, d0
+        vld1.8          {d1},      [r0,:64], r2
+        vrhadd.u8       d13, d13, d1
+        vld1.8          {d2},      [r0,:64], r2
+        vrhadd.u8       d14, d14, d2
+        vld1.8          {d3},      [r0,:64], r2
+        vrhadd.u8       d15, d15, d3
+        vld1.8          {d4},      [r0,:64], r2
+        vrhadd.u8       d8,  d8,  d4
+        vld1.8          {d5},      [r0,:64], r2
+        vrhadd.u8       d9,  d9,  d5
+        vld1.8          {d6},      [r0,:64], r2
+        vrhadd.u8       d10, d10, d6
+        vld1.8          {d7},      [r0,:64], r2
+        vrhadd.u8       d11, d11, d7
+        sub             r0,  r0,  r2,  lsl #3
+  .endif
+
+        vst1.8          {d12},     [r0,:64], r2
+        vst1.8          {d13},     [r0,:64], r2
+        vst1.8          {d14},     [r0,:64], r2
+        vst1.8          {d15},     [r0,:64], r2
+        vst1.8          {d8},      [r0,:64], r2
+        vst1.8          {d9},      [r0,:64], r2
+        vst1.8          {d10},     [r0,:64], r2
+        vst1.8          {d11},     [r0,:64], r2
+
+        mov             lr,  r10
+        bx              lr
+endfunc
+.endm
+
+        h264_qpel8_hv_lowpass put
+        h264_qpel8_hv_lowpass avg
+
+.macro  h264_qpel8_hv_lowpass_l2 type
+function \type\()_h264_qpel8_hv_lowpass_l2_neon
+        mov             r10, lr
+        bl              put_h264_qpel8_hv_lowpass_neon_top
+
+        vld1.8          {d0, d1},  [r2,:128]!
+        vld1.8          {d2, d3},  [r2,:128]!
+        vrhadd.u8       q0,  q0,  q6
+        vld1.8          {d4, d5},  [r2,:128]!
+        vrhadd.u8       q1,  q1,  q7
+        vld1.8          {d6, d7},  [r2,:128]!
+        vrhadd.u8       q2,  q2,  q4
+        vrhadd.u8       q3,  q3,  q5
+  .ifc \type,avg
+        vld1.8          {d16},     [r0,:64], r3
+        vrhadd.u8       d0,  d0,  d16
+        vld1.8          {d17},     [r0,:64], r3
+        vrhadd.u8       d1,  d1,  d17
+        vld1.8          {d18},     [r0,:64], r3
+        vrhadd.u8       d2,  d2,  d18
+        vld1.8          {d19},     [r0,:64], r3
+        vrhadd.u8       d3,  d3,  d19
+        vld1.8          {d20},     [r0,:64], r3
+        vrhadd.u8       d4,  d4,  d20
+        vld1.8          {d21},     [r0,:64], r3
+        vrhadd.u8       d5,  d5,  d21
+        vld1.8          {d22},     [r0,:64], r3
+        vrhadd.u8       d6,  d6,  d22
+        vld1.8          {d23},     [r0,:64], r3
+        vrhadd.u8       d7,  d7,  d23
+        sub             r0,  r0,  r3,  lsl #3
+  .endif
+        vst1.8          {d0},      [r0,:64], r3
+        vst1.8          {d1},      [r0,:64], r3
+        vst1.8          {d2},      [r0,:64], r3
+        vst1.8          {d3},      [r0,:64], r3
+        vst1.8          {d4},      [r0,:64], r3
+        vst1.8          {d5},      [r0,:64], r3
+        vst1.8          {d6},      [r0,:64], r3
+        vst1.8          {d7},      [r0,:64], r3
+
+        mov             lr,  r10
+        bx              lr
+endfunc
+.endm
+
+        h264_qpel8_hv_lowpass_l2 put
+        h264_qpel8_hv_lowpass_l2 avg
+
+.macro  h264_qpel16_hv  type
+function \type\()_h264_qpel16_hv_lowpass_neon
+        mov             r9,  lr
+        bl              \type\()_h264_qpel8_hv_lowpass_neon
+        sub             r1,  r1,  r3, lsl #2
+        bl              \type\()_h264_qpel8_hv_lowpass_neon
+        sub             r1,  r1,  r3, lsl #4
+        sub             r1,  r1,  r3, lsl #2
+        add             r1,  r1,  #8
+        sub             r0,  r0,  r2, lsl #4
+        add             r0,  r0,  #8
+        bl              \type\()_h264_qpel8_hv_lowpass_neon
+        sub             r1,  r1,  r3, lsl #2
+        mov             lr,  r9
+        b               \type\()_h264_qpel8_hv_lowpass_neon
+endfunc
+
+function \type\()_h264_qpel16_hv_lowpass_l2_neon
+        mov             r9,  lr
+        sub             r2,  r4,  #256
+        bl              \type\()_h264_qpel8_hv_lowpass_l2_neon
+        sub             r1,  r1,  r3, lsl #2
+        bl              \type\()_h264_qpel8_hv_lowpass_l2_neon
+        sub             r1,  r1,  r3, lsl #4
+        sub             r1,  r1,  r3, lsl #2
+        add             r1,  r1,  #8
+        sub             r0,  r0,  r3, lsl #4
+        add             r0,  r0,  #8
+        bl              \type\()_h264_qpel8_hv_lowpass_l2_neon
+        sub             r1,  r1,  r3, lsl #2
+        mov             lr,  r9
+        b               \type\()_h264_qpel8_hv_lowpass_l2_neon
+endfunc
+.endm
+
+        h264_qpel16_hv put
+        h264_qpel16_hv avg
+
+.macro  h264_qpel8      type
+function ff_\type\()_h264_qpel8_mc10_neon, export=1
+        lowpass_const   r3
+        mov             r3,  r1
+        sub             r1,  r1,  #2
+        mov             r12, #8
+        b               \type\()_h264_qpel8_h_lowpass_l2_neon
+endfunc
+
+function ff_\type\()_h264_qpel8_mc20_neon, export=1
+        lowpass_const   r3
+        sub             r1,  r1,  #2
+        mov             r3,  r2
+        mov             r12, #8
+        b               \type\()_h264_qpel8_h_lowpass_neon
+endfunc
+
+function ff_\type\()_h264_qpel8_mc30_neon, export=1
+        lowpass_const   r3
+        add             r3,  r1,  #1
+        sub             r1,  r1,  #2
+        mov             r12, #8
+        b               \type\()_h264_qpel8_h_lowpass_l2_neon
+endfunc
+
+function ff_\type\()_h264_qpel8_mc01_neon, export=1
+        push            {lr}
+        mov             r12, r1
+\type\()_h264_qpel8_mc01:
+        lowpass_const   r3
+        mov             r3,  r2
+        sub             r1,  r1,  r2, lsl #1
+        vpush           {d8-d15}
+        bl              \type\()_h264_qpel8_v_lowpass_l2_neon
+        vpop            {d8-d15}
+        pop             {pc}
+endfunc
+
+function ff_\type\()_h264_qpel8_mc11_neon, export=1
+        push            {r0, r1, r11, lr}
+\type\()_h264_qpel8_mc11:
+        lowpass_const   r3
+        mov             r11, sp
+A       bic             sp,  sp,  #15
+T       bic             r0,  r11, #15
+T       mov             sp,  r0
+        sub             sp,  sp,  #64
+        mov             r0,  sp
+        sub             r1,  r1,  #2
+        mov             r3,  #8
+        mov             r12, #8
+        vpush           {d8-d15}
+        bl              put_h264_qpel8_h_lowpass_neon
+        ldrd            r0,  r1,  [r11], #8
+        mov             r3,  r2
+        add             r12, sp,  #64
+        sub             r1,  r1,  r2, lsl #1
+        mov             r2,  #8
+        bl              \type\()_h264_qpel8_v_lowpass_l2_neon
+        vpop            {d8-d15}
+        mov             sp,  r11
+        pop             {r11, pc}
+endfunc
+
+function ff_\type\()_h264_qpel8_mc21_neon, export=1
+        push            {r0, r1, r4, r10, r11, lr}
+\type\()_h264_qpel8_mc21:
+        lowpass_const   r3
+        mov             r11, sp
+A       bic             sp,  sp,  #15
+T       bic             r0,  r11, #15
+T       mov             sp,  r0
+        sub             sp,  sp,  #(8*8+16*12)
+        sub             r1,  r1,  #2
+        mov             r3,  #8
+        mov             r0,  sp
+        mov             r12, #8
+        vpush           {d8-d15}
+        bl              put_h264_qpel8_h_lowpass_neon
+        mov             r4,  r0
+        ldrd            r0,  r1,  [r11], #8
+        sub             r1,  r1,  r2, lsl #1
+        sub             r1,  r1,  #2
+        mov             r3,  r2
+        sub             r2,  r4,  #64
+        bl              \type\()_h264_qpel8_hv_lowpass_l2_neon
+        vpop            {d8-d15}
+        mov             sp,  r11
+        pop             {r4, r10, r11, pc}
+endfunc
+
+function ff_\type\()_h264_qpel8_mc31_neon, export=1
+        add             r1,  r1,  #1
+        push            {r0, r1, r11, lr}
+        sub             r1,  r1,  #1
+        b               \type\()_h264_qpel8_mc11
+endfunc
+
+function ff_\type\()_h264_qpel8_mc02_neon, export=1
+        push            {lr}
+        lowpass_const   r3
+        sub             r1,  r1,  r2, lsl #1
+        mov             r3,  r2
+        vpush           {d8-d15}
+        bl              \type\()_h264_qpel8_v_lowpass_neon
+        vpop            {d8-d15}
+        pop             {pc}
+endfunc
+
+function ff_\type\()_h264_qpel8_mc12_neon, export=1
+        push            {r0, r1, r4, r10, r11, lr}
+\type\()_h264_qpel8_mc12:
+        lowpass_const   r3
+        mov             r11, sp
+A       bic             sp,  sp,  #15
+T       bic             r0,  r11, #15
+T       mov             sp,  r0
+        sub             sp,  sp,  #(8*8+16*12)
+        sub             r1,  r1,  r2, lsl #1
+        mov             r3,  r2
+        mov             r2,  #8
+        mov             r0,  sp
+        vpush           {d8-d15}
+        bl              put_h264_qpel8_v_lowpass_neon
+        mov             r4,  r0
+        ldrd            r0,  r1,  [r11], #8
+        sub             r1,  r1,  r3, lsl #1
+        sub             r1,  r1,  #2
+        sub             r2,  r4,  #64
+        bl              \type\()_h264_qpel8_hv_lowpass_l2_neon
+        vpop            {d8-d15}
+        mov             sp,  r11
+        pop             {r4, r10, r11, pc}
+endfunc
+
+function ff_\type\()_h264_qpel8_mc22_neon, export=1
+        push            {r4, r10, r11, lr}
+        mov             r11, sp
+A       bic             sp,  sp,  #15
+T       bic             r4,  r11, #15
+T       mov             sp,  r4
+        sub             r1,  r1,  r2, lsl #1
+        sub             r1,  r1,  #2
+        mov             r3,  r2
+        sub             sp,  sp,  #(16*12)
+        mov             r4,  sp
+        vpush           {d8-d15}
+        bl              \type\()_h264_qpel8_hv_lowpass_neon
+        vpop            {d8-d15}
+        mov             sp,  r11
+        pop             {r4, r10, r11, pc}
+endfunc
+
+function ff_\type\()_h264_qpel8_mc32_neon, export=1
+        push            {r0, r1, r4, r10, r11, lr}
+        add             r1,  r1,  #1
+        b               \type\()_h264_qpel8_mc12
+endfunc
+
+function ff_\type\()_h264_qpel8_mc03_neon, export=1
+        push            {lr}
+        add             r12, r1,  r2
+        b               \type\()_h264_qpel8_mc01
+endfunc
+
+function ff_\type\()_h264_qpel8_mc13_neon, export=1
+        push            {r0, r1, r11, lr}
+        add             r1,  r1,  r2
+        b               \type\()_h264_qpel8_mc11
+endfunc
+
+function ff_\type\()_h264_qpel8_mc23_neon, export=1
+        push            {r0, r1, r4, r10, r11, lr}
+        add             r1,  r1,  r2
+        b               \type\()_h264_qpel8_mc21
+endfunc
+
+function ff_\type\()_h264_qpel8_mc33_neon, export=1
+        add             r1,  r1,  #1
+        push            {r0, r1, r11, lr}
+        add             r1,  r1,  r2
+        sub             r1,  r1,  #1
+        b               \type\()_h264_qpel8_mc11
+endfunc
+.endm
+
+        h264_qpel8 put
+        h264_qpel8 avg
+
+.macro  h264_qpel16     type
+function ff_\type\()_h264_qpel16_mc10_neon, export=1
+        lowpass_const   r3
+        mov             r3,  r1
+        sub             r1,  r1,  #2
+        b               \type\()_h264_qpel16_h_lowpass_l2_neon
+endfunc
+
+function ff_\type\()_h264_qpel16_mc20_neon, export=1
+        lowpass_const   r3
+        sub             r1,  r1,  #2
+        mov             r3,  r2
+        b               \type\()_h264_qpel16_h_lowpass_neon
+endfunc
+
+function ff_\type\()_h264_qpel16_mc30_neon, export=1
+        lowpass_const   r3
+        add             r3,  r1,  #1
+        sub             r1,  r1,  #2
+        b               \type\()_h264_qpel16_h_lowpass_l2_neon
+endfunc
+
+function ff_\type\()_h264_qpel16_mc01_neon, export=1
+        push            {r4, lr}
+        mov             r12, r1
+\type\()_h264_qpel16_mc01:
+        lowpass_const   r3
+        mov             r3,  r2
+        sub             r1,  r1,  r2, lsl #1
+        vpush           {d8-d15}
+        bl              \type\()_h264_qpel16_v_lowpass_l2_neon
+        vpop            {d8-d15}
+        pop             {r4, pc}
+endfunc
+
+function ff_\type\()_h264_qpel16_mc11_neon, export=1
+        push            {r0, r1, r4, r11, lr}
+\type\()_h264_qpel16_mc11:
+        lowpass_const   r3
+        mov             r11, sp
+A       bic             sp,  sp,  #15
+T       bic             r0,  r11, #15
+T       mov             sp,  r0
+        sub             sp,  sp,  #256
+        mov             r0,  sp
+        sub             r1,  r1,  #2
+        mov             r3,  #16
+        vpush           {d8-d15}
+        bl              put_h264_qpel16_h_lowpass_neon
+        ldrd            r0,  r1,  [r11], #8
+        mov             r3,  r2
+        add             r12, sp,  #64
+        sub             r1,  r1,  r2, lsl #1
+        mov             r2,  #16
+        bl              \type\()_h264_qpel16_v_lowpass_l2_neon
+        vpop            {d8-d15}
+        mov             sp,  r11
+        pop             {r4, r11, pc}
+endfunc
+
+function ff_\type\()_h264_qpel16_mc21_neon, export=1
+        push            {r0, r1, r4-r5, r9-r11, lr}
+\type\()_h264_qpel16_mc21:
+        lowpass_const   r3
+        mov             r11, sp
+A       bic             sp,  sp,  #15
+T       bic             r0,  r11, #15
+T       mov             sp,  r0
+        sub             sp,  sp,  #(16*16+16*12)
+        sub             r1,  r1,  #2
+        mov             r0,  sp
+        vpush           {d8-d15}
+        bl              put_h264_qpel16_h_lowpass_neon_packed
+        mov             r4,  r0
+        ldrd            r0,  r1,  [r11], #8
+        sub             r1,  r1,  r2, lsl #1
+        sub             r1,  r1,  #2
+        mov             r3,  r2
+        bl              \type\()_h264_qpel16_hv_lowpass_l2_neon
+        vpop            {d8-d15}
+        mov             sp,  r11
+        pop             {r4-r5, r9-r11, pc}
+endfunc
+
+function ff_\type\()_h264_qpel16_mc31_neon, export=1
+        add             r1,  r1,  #1
+        push            {r0, r1, r4, r11, lr}
+        sub             r1,  r1,  #1
+        b               \type\()_h264_qpel16_mc11
+endfunc
+
+function ff_\type\()_h264_qpel16_mc02_neon, export=1
+        push            {r4, lr}
+        lowpass_const   r3
+        sub             r1,  r1,  r2, lsl #1
+        mov             r3,  r2
+        vpush           {d8-d15}
+        bl              \type\()_h264_qpel16_v_lowpass_neon
+        vpop            {d8-d15}
+        pop             {r4, pc}
+endfunc
+
+function ff_\type\()_h264_qpel16_mc12_neon, export=1
+        push            {r0, r1, r4-r5, r9-r11, lr}
+\type\()_h264_qpel16_mc12:
+        lowpass_const   r3
+        mov             r11, sp
+A       bic             sp,  sp,  #15
+T       bic             r0,  r11, #15
+T       mov             sp,  r0
+        sub             sp,  sp,  #(16*16+16*12)
+        sub             r1,  r1,  r2, lsl #1
+        mov             r0,  sp
+        mov             r3,  r2
+        vpush           {d8-d15}
+        bl              put_h264_qpel16_v_lowpass_neon_packed
+        mov             r4,  r0
+        ldrd            r0,  r1,  [r11], #8
+        sub             r1,  r1,  r3, lsl #1
+        sub             r1,  r1,  #2
+        mov             r2,  r3
+        bl              \type\()_h264_qpel16_hv_lowpass_l2_neon
+        vpop            {d8-d15}
+        mov             sp,  r11
+        pop             {r4-r5, r9-r11, pc}
+endfunc
+
+function ff_\type\()_h264_qpel16_mc22_neon, export=1
+        push            {r4, r9-r11, lr}
+        lowpass_const   r3
+        mov             r11, sp
+A       bic             sp,  sp,  #15
+T       bic             r4,  r11, #15
+T       mov             sp,  r4
+        sub             r1,  r1,  r2, lsl #1
+        sub             r1,  r1,  #2
+        mov             r3,  r2
+        sub             sp,  sp,  #(16*12)
+        mov             r4,  sp
+        vpush           {d8-d15}
+        bl              \type\()_h264_qpel16_hv_lowpass_neon
+        vpop            {d8-d15}
+        mov             sp,  r11
+        pop             {r4, r9-r11, pc}
+endfunc
+
+function ff_\type\()_h264_qpel16_mc32_neon, export=1
+        push            {r0, r1, r4-r5, r9-r11, lr}
+        add             r1,  r1,  #1
+        b               \type\()_h264_qpel16_mc12
+endfunc
+
+function ff_\type\()_h264_qpel16_mc03_neon, export=1
+        push            {r4, lr}
+        add             r12, r1,  r2
+        b               \type\()_h264_qpel16_mc01
+endfunc
+
+function ff_\type\()_h264_qpel16_mc13_neon, export=1
+        push            {r0, r1, r4, r11, lr}
+        add             r1,  r1,  r2
+        b               \type\()_h264_qpel16_mc11
+endfunc
+
+function ff_\type\()_h264_qpel16_mc23_neon, export=1
+        push            {r0, r1, r4-r5, r9-r11, lr}
+        add             r1,  r1,  r2
+        b               \type\()_h264_qpel16_mc21
+endfunc
+
+function ff_\type\()_h264_qpel16_mc33_neon, export=1
+        add             r1,  r1,  #1
+        push            {r0, r1, r4, r11, lr}
+        add             r1,  r1,  r2
+        sub             r1,  r1,  #1
+        b               \type\()_h264_qpel16_mc11
+endfunc
+.endm
+
+        h264_qpel16 put
+        h264_qpel16 avg
index a049683..eddf4bd 100644 (file)
@@ -2712,24 +2712,6 @@ av_cold void ff_dsputil_init(DSPContext* c, AVCodecContext *avctx)
 #define FUNC(f, depth) f ## _ ## depth
 #define FUNCC(f, depth) f ## _ ## depth ## _c
 
-#define dspfunc2(PFX, IDX, NUM, depth)\
-    c->PFX ## _pixels_tab[IDX][ 0] = FUNCC(PFX ## NUM ## _mc00, depth);\
-    c->PFX ## _pixels_tab[IDX][ 1] = FUNCC(PFX ## NUM ## _mc10, depth);\
-    c->PFX ## _pixels_tab[IDX][ 2] = FUNCC(PFX ## NUM ## _mc20, depth);\
-    c->PFX ## _pixels_tab[IDX][ 3] = FUNCC(PFX ## NUM ## _mc30, depth);\
-    c->PFX ## _pixels_tab[IDX][ 4] = FUNCC(PFX ## NUM ## _mc01, depth);\
-    c->PFX ## _pixels_tab[IDX][ 5] = FUNCC(PFX ## NUM ## _mc11, depth);\
-    c->PFX ## _pixels_tab[IDX][ 6] = FUNCC(PFX ## NUM ## _mc21, depth);\
-    c->PFX ## _pixels_tab[IDX][ 7] = FUNCC(PFX ## NUM ## _mc31, depth);\
-    c->PFX ## _pixels_tab[IDX][ 8] = FUNCC(PFX ## NUM ## _mc02, depth);\
-    c->PFX ## _pixels_tab[IDX][ 9] = FUNCC(PFX ## NUM ## _mc12, depth);\
-    c->PFX ## _pixels_tab[IDX][10] = FUNCC(PFX ## NUM ## _mc22, depth);\
-    c->PFX ## _pixels_tab[IDX][11] = FUNCC(PFX ## NUM ## _mc32, depth);\
-    c->PFX ## _pixels_tab[IDX][12] = FUNCC(PFX ## NUM ## _mc03, depth);\
-    c->PFX ## _pixels_tab[IDX][13] = FUNCC(PFX ## NUM ## _mc13, depth);\
-    c->PFX ## _pixels_tab[IDX][14] = FUNCC(PFX ## NUM ## _mc23, depth);\
-    c->PFX ## _pixels_tab[IDX][15] = FUNCC(PFX ## NUM ## _mc33, depth)
-
 #define BIT_DEPTH_FUNCS(depth, dct)\
     c->get_pixels                    = FUNCC(get_pixels   ## dct   , depth);\
     c->draw_edges                    = FUNCC(draw_edges            , depth);\
@@ -2743,15 +2725,7 @@ av_cold void ff_dsputil_init(DSPContext* c, AVCodecContext *avctx)
     c->put_h264_chroma_pixels_tab[2] = FUNCC(put_h264_chroma_mc2   , depth);\
     c->avg_h264_chroma_pixels_tab[0] = FUNCC(avg_h264_chroma_mc8   , depth);\
     c->avg_h264_chroma_pixels_tab[1] = FUNCC(avg_h264_chroma_mc4   , depth);\
-    c->avg_h264_chroma_pixels_tab[2] = FUNCC(avg_h264_chroma_mc2   , depth);\
-\
-    dspfunc2(put_h264_qpel, 0, 16, depth);\
-    dspfunc2(put_h264_qpel, 1,  8, depth);\
-    dspfunc2(put_h264_qpel, 2,  4, depth);\
-    dspfunc2(put_h264_qpel, 3,  2, depth);\
-    dspfunc2(avg_h264_qpel, 0, 16, depth);\
-    dspfunc2(avg_h264_qpel, 1,  8, depth);\
-    dspfunc2(avg_h264_qpel, 2,  4, depth);
+    c->avg_h264_chroma_pixels_tab[2] = FUNCC(avg_h264_chroma_mc2   , depth)
 
     switch (avctx->bits_per_raw_sample) {
     case 9:
index 96ee588..d656e26 100644 (file)
@@ -314,9 +314,6 @@ typedef struct DSPContext {
     h264_chroma_mc_func put_h264_chroma_pixels_tab[3];
     h264_chroma_mc_func avg_h264_chroma_pixels_tab[3];
 
-    qpel_mc_func put_h264_qpel_pixels_tab[4][16];
-    qpel_mc_func avg_h264_qpel_pixels_tab[3][16];
-
     me_cmp_func pix_abs[2][4];
 
     /* huffyuv specific */
index 2de0e65..42f2b7b 100644 (file)
@@ -197,86 +197,9 @@ DCTELEM_FUNCS(int16_t, _16)
 DCTELEM_FUNCS(dctcoef, _32)
 #endif
 
-#define PIXOP3(OPNAME, OP) \
-static void FUNCC(OPNAME ## _pixels4)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
-    int i;\
-    for(i=0; i<h; i++){\
-        OP(*((pixel4*)(block  )), AV_RN4P(pixels  ));\
-        pixels+=line_size;\
-        block +=line_size;\
-    }\
-}\
-static void FUNCC(OPNAME ## _pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
-    int i;\
-    for(i=0; i<h; i++){\
-        OP(*((pixel4*)(block                )), AV_RN4P(pixels                ));\
-        OP(*((pixel4*)(block+4*sizeof(pixel))), AV_RN4P(pixels+4*sizeof(pixel)));\
-        pixels+=line_size;\
-        block +=line_size;\
-    }\
-}\
-static inline void FUNCC(OPNAME ## _no_rnd_pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
-    FUNCC(OPNAME ## _pixels8)(block, pixels, line_size, h);\
-}\
-\
-static inline void FUNC(OPNAME ## _pixels8_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
-                                                int src_stride1, int src_stride2, int h){\
-    int i;\
-    for(i=0; i<h; i++){\
-        pixel4 a,b;\
-        a= AV_RN4P(&src1[i*src_stride1  ]);\
-        b= AV_RN4P(&src2[i*src_stride2  ]);\
-        OP(*((pixel4*)&dst[i*dst_stride  ]), rnd_avg_pixel4(a, b));\
-        a= AV_RN4P(&src1[i*src_stride1+4*sizeof(pixel)]);\
-        b= AV_RN4P(&src2[i*src_stride2+4*sizeof(pixel)]);\
-        OP(*((pixel4*)&dst[i*dst_stride+4*sizeof(pixel)]), rnd_avg_pixel4(a, b));\
-    }\
-}\
-\
-static inline void FUNC(OPNAME ## _pixels4_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
-                                                int src_stride1, int src_stride2, int h){\
-    int i;\
-    for(i=0; i<h; i++){\
-        pixel4 a,b;\
-        a= AV_RN4P(&src1[i*src_stride1  ]);\
-        b= AV_RN4P(&src2[i*src_stride2  ]);\
-        OP(*((pixel4*)&dst[i*dst_stride  ]), rnd_avg_pixel4(a, b));\
-    }\
-}\
-\
-static inline void FUNC(OPNAME ## _pixels2_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
-                                                int src_stride1, int src_stride2, int h){\
-    int i;\
-    for(i=0; i<h; i++){\
-        pixel4 a,b;\
-        a= AV_RN2P(&src1[i*src_stride1  ]);\
-        b= AV_RN2P(&src2[i*src_stride2  ]);\
-        OP(*((pixel2*)&dst[i*dst_stride  ]), rnd_avg_pixel4(a, b));\
-    }\
-}\
-\
-static inline void FUNC(OPNAME ## _pixels16_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
-                                                int src_stride1, int src_stride2, int h){\
-    FUNC(OPNAME ## _pixels8_l2)(dst  , src1  , src2  , dst_stride, src_stride1, src_stride2, h);\
-    FUNC(OPNAME ## _pixels8_l2)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, h);\
-}\
-\
-CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16)    , FUNCC(OPNAME ## _pixels8)    , 8*sizeof(pixel))
-
-#define PIXOP4(OPNAME, OP) \
-static void FUNCC(OPNAME ## _pixels2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
-    int i;\
-    for(i=0; i<h; i++){\
-        OP(*((pixel2*)(block  )), AV_RN2P(pixels  ));\
-        pixels+=line_size;\
-        block +=line_size;\
-    }\
-}\
-PIXOP3(OPNAME, OP)
+#include "hpel_template.c"
 
 #define PIXOP2(OPNAME, OP) \
-PIXOP4(OPNAME, OP)\
-\
 static inline void FUNC(OPNAME ## _no_rnd_pixels8_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
                                                 int src_stride1, int src_stride2, int h){\
     int i;\
@@ -581,11 +504,9 @@ CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_xy2), FUNCC(OPNAME ## _no_rnd_pi
 #define op_avg(a, b) a = rnd_avg_pixel4(a, b)
 #define op_put(a, b) a = b
 #if BIT_DEPTH == 8
+#define put_no_rnd_pixels8_8_c put_pixels8_8_c
 PIXOP2(avg, op_avg)
 PIXOP2(put, op_put)
-#else
-PIXOP3(avg, op_avg)
-PIXOP4(put, op_put)
 #endif
 #undef op_avg
 #undef op_put
@@ -708,484 +629,6 @@ H264_CHROMA_MC(avg_       , op_avg)
 #undef op_avg
 #undef op_put
 
-#define H264_LOWPASS(OPNAME, OP, OP2) \
-static av_unused void FUNC(OPNAME ## h264_qpel2_h_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
-    const int h=2;\
-    INIT_CLIP\
-    int i;\
-    pixel *dst = (pixel*)_dst;\
-    pixel *src = (pixel*)_src;\
-    dstStride /= sizeof(pixel);\
-    srcStride /= sizeof(pixel);\
-    for(i=0; i<h; i++)\
-    {\
-        OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
-        OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
-        dst+=dstStride;\
-        src+=srcStride;\
-    }\
-}\
-\
-static av_unused void FUNC(OPNAME ## h264_qpel2_v_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
-    const int w=2;\
-    INIT_CLIP\
-    int i;\
-    pixel *dst = (pixel*)_dst;\
-    pixel *src = (pixel*)_src;\
-    dstStride /= sizeof(pixel);\
-    srcStride /= sizeof(pixel);\
-    for(i=0; i<w; i++)\
-    {\
-        const int srcB= src[-2*srcStride];\
-        const int srcA= src[-1*srcStride];\
-        const int src0= src[0 *srcStride];\
-        const int src1= src[1 *srcStride];\
-        const int src2= src[2 *srcStride];\
-        const int src3= src[3 *srcStride];\
-        const int src4= src[4 *srcStride];\
-        OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
-        OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
-        dst++;\
-        src++;\
-    }\
-}\
-\
-static av_unused void FUNC(OPNAME ## h264_qpel2_hv_lowpass)(uint8_t *_dst, int16_t *tmp, uint8_t *_src, int dstStride, int tmpStride, int srcStride){\
-    const int h=2;\
-    const int w=2;\
-    const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
-    INIT_CLIP\
-    int i;\
-    pixel *dst = (pixel*)_dst;\
-    pixel *src = (pixel*)_src;\
-    dstStride /= sizeof(pixel);\
-    srcStride /= sizeof(pixel);\
-    src -= 2*srcStride;\
-    for(i=0; i<h+5; i++)\
-    {\
-        tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]) + pad;\
-        tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]) + pad;\
-        tmp+=tmpStride;\
-        src+=srcStride;\
-    }\
-    tmp -= tmpStride*(h+5-2);\
-    for(i=0; i<w; i++)\
-    {\
-        const int tmpB= tmp[-2*tmpStride] - pad;\
-        const int tmpA= tmp[-1*tmpStride] - pad;\
-        const int tmp0= tmp[0 *tmpStride] - pad;\
-        const int tmp1= tmp[1 *tmpStride] - pad;\
-        const int tmp2= tmp[2 *tmpStride] - pad;\
-        const int tmp3= tmp[3 *tmpStride] - pad;\
-        const int tmp4= tmp[4 *tmpStride] - pad;\
-        OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
-        OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
-        dst++;\
-        tmp++;\
-    }\
-}\
-static void FUNC(OPNAME ## h264_qpel4_h_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
-    const int h=4;\
-    INIT_CLIP\
-    int i;\
-    pixel *dst = (pixel*)_dst;\
-    pixel *src = (pixel*)_src;\
-    dstStride /= sizeof(pixel);\
-    srcStride /= sizeof(pixel);\
-    for(i=0; i<h; i++)\
-    {\
-        OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
-        OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
-        OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]));\
-        OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]));\
-        dst+=dstStride;\
-        src+=srcStride;\
-    }\
-}\
-\
-static void FUNC(OPNAME ## h264_qpel4_v_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
-    const int w=4;\
-    INIT_CLIP\
-    int i;\
-    pixel *dst = (pixel*)_dst;\
-    pixel *src = (pixel*)_src;\
-    dstStride /= sizeof(pixel);\
-    srcStride /= sizeof(pixel);\
-    for(i=0; i<w; i++)\
-    {\
-        const int srcB= src[-2*srcStride];\
-        const int srcA= src[-1*srcStride];\
-        const int src0= src[0 *srcStride];\
-        const int src1= src[1 *srcStride];\
-        const int src2= src[2 *srcStride];\
-        const int src3= src[3 *srcStride];\
-        const int src4= src[4 *srcStride];\
-        const int src5= src[5 *srcStride];\
-        const int src6= src[6 *srcStride];\
-        OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
-        OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
-        OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
-        OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
-        dst++;\
-        src++;\
-    }\
-}\
-\
-static void FUNC(OPNAME ## h264_qpel4_hv_lowpass)(uint8_t *_dst, int16_t *tmp, uint8_t *_src, int dstStride, int tmpStride, int srcStride){\
-    const int h=4;\
-    const int w=4;\
-    const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
-    INIT_CLIP\
-    int i;\
-    pixel *dst = (pixel*)_dst;\
-    pixel *src = (pixel*)_src;\
-    dstStride /= sizeof(pixel);\
-    srcStride /= sizeof(pixel);\
-    src -= 2*srcStride;\
-    for(i=0; i<h+5; i++)\
-    {\
-        tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]) + pad;\
-        tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]) + pad;\
-        tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]) + pad;\
-        tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]) + pad;\
-        tmp+=tmpStride;\
-        src+=srcStride;\
-    }\
-    tmp -= tmpStride*(h+5-2);\
-    for(i=0; i<w; i++)\
-    {\
-        const int tmpB= tmp[-2*tmpStride] - pad;\
-        const int tmpA= tmp[-1*tmpStride] - pad;\
-        const int tmp0= tmp[0 *tmpStride] - pad;\
-        const int tmp1= tmp[1 *tmpStride] - pad;\
-        const int tmp2= tmp[2 *tmpStride] - pad;\
-        const int tmp3= tmp[3 *tmpStride] - pad;\
-        const int tmp4= tmp[4 *tmpStride] - pad;\
-        const int tmp5= tmp[5 *tmpStride] - pad;\
-        const int tmp6= tmp[6 *tmpStride] - pad;\
-        OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
-        OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
-        OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
-        OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
-        dst++;\
-        tmp++;\
-    }\
-}\
-\
-static void FUNC(OPNAME ## h264_qpel8_h_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
-    const int h=8;\
-    INIT_CLIP\
-    int i;\
-    pixel *dst = (pixel*)_dst;\
-    pixel *src = (pixel*)_src;\
-    dstStride /= sizeof(pixel);\
-    srcStride /= sizeof(pixel);\
-    for(i=0; i<h; i++)\
-    {\
-        OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]));\
-        OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]));\
-        OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]));\
-        OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]));\
-        OP(dst[4], (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]));\
-        OP(dst[5], (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]));\
-        OP(dst[6], (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]));\
-        OP(dst[7], (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]));\
-        dst+=dstStride;\
-        src+=srcStride;\
-    }\
-}\
-\
-static void FUNC(OPNAME ## h264_qpel8_v_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
-    const int w=8;\
-    INIT_CLIP\
-    int i;\
-    pixel *dst = (pixel*)_dst;\
-    pixel *src = (pixel*)_src;\
-    dstStride /= sizeof(pixel);\
-    srcStride /= sizeof(pixel);\
-    for(i=0; i<w; i++)\
-    {\
-        const int srcB= src[-2*srcStride];\
-        const int srcA= src[-1*srcStride];\
-        const int src0= src[0 *srcStride];\
-        const int src1= src[1 *srcStride];\
-        const int src2= src[2 *srcStride];\
-        const int src3= src[3 *srcStride];\
-        const int src4= src[4 *srcStride];\
-        const int src5= src[5 *srcStride];\
-        const int src6= src[6 *srcStride];\
-        const int src7= src[7 *srcStride];\
-        const int src8= src[8 *srcStride];\
-        const int src9= src[9 *srcStride];\
-        const int src10=src[10*srcStride];\
-        OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
-        OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
-        OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
-        OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
-        OP(dst[4*dstStride], (src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
-        OP(dst[5*dstStride], (src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
-        OP(dst[6*dstStride], (src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
-        OP(dst[7*dstStride], (src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
-        dst++;\
-        src++;\
-    }\
-}\
-\
-static void FUNC(OPNAME ## h264_qpel8_hv_lowpass)(uint8_t *_dst, int16_t *tmp, uint8_t *_src, int dstStride, int tmpStride, int srcStride){\
-    const int h=8;\
-    const int w=8;\
-    const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
-    INIT_CLIP\
-    int i;\
-    pixel *dst = (pixel*)_dst;\
-    pixel *src = (pixel*)_src;\
-    dstStride /= sizeof(pixel);\
-    srcStride /= sizeof(pixel);\
-    src -= 2*srcStride;\
-    for(i=0; i<h+5; i++)\
-    {\
-        tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]) + pad;\
-        tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]) + pad;\
-        tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]) + pad;\
-        tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]) + pad;\
-        tmp[4]= (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]) + pad;\
-        tmp[5]= (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]) + pad;\
-        tmp[6]= (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]) + pad;\
-        tmp[7]= (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]) + pad;\
-        tmp+=tmpStride;\
-        src+=srcStride;\
-    }\
-    tmp -= tmpStride*(h+5-2);\
-    for(i=0; i<w; i++)\
-    {\
-        const int tmpB= tmp[-2*tmpStride] - pad;\
-        const int tmpA= tmp[-1*tmpStride] - pad;\
-        const int tmp0= tmp[0 *tmpStride] - pad;\
-        const int tmp1= tmp[1 *tmpStride] - pad;\
-        const int tmp2= tmp[2 *tmpStride] - pad;\
-        const int tmp3= tmp[3 *tmpStride] - pad;\
-        const int tmp4= tmp[4 *tmpStride] - pad;\
-        const int tmp5= tmp[5 *tmpStride] - pad;\
-        const int tmp6= tmp[6 *tmpStride] - pad;\
-        const int tmp7= tmp[7 *tmpStride] - pad;\
-        const int tmp8= tmp[8 *tmpStride] - pad;\
-        const int tmp9= tmp[9 *tmpStride] - pad;\
-        const int tmp10=tmp[10*tmpStride] - pad;\
-        OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
-        OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
-        OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
-        OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
-        OP2(dst[4*dstStride], (tmp4+tmp5)*20 - (tmp3+tmp6)*5 + (tmp2+tmp7));\
-        OP2(dst[5*dstStride], (tmp5+tmp6)*20 - (tmp4+tmp7)*5 + (tmp3+tmp8));\
-        OP2(dst[6*dstStride], (tmp6+tmp7)*20 - (tmp5+tmp8)*5 + (tmp4+tmp9));\
-        OP2(dst[7*dstStride], (tmp7+tmp8)*20 - (tmp6+tmp9)*5 + (tmp5+tmp10));\
-        dst++;\
-        tmp++;\
-    }\
-}\
-\
-static void FUNC(OPNAME ## h264_qpel16_v_lowpass)(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
-    FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst                , src                , dstStride, srcStride);\
-    FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
-    src += 8*srcStride;\
-    dst += 8*dstStride;\
-    FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst                , src                , dstStride, srcStride);\
-    FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
-}\
-\
-static void FUNC(OPNAME ## h264_qpel16_h_lowpass)(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
-    FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst                , src                , dstStride, srcStride);\
-    FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
-    src += 8*srcStride;\
-    dst += 8*dstStride;\
-    FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst                , src                , dstStride, srcStride);\
-    FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
-}\
-\
-static void FUNC(OPNAME ## h264_qpel16_hv_lowpass)(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
-    FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst                , tmp  , src                , dstStride, tmpStride, srcStride);\
-    FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst+8*sizeof(pixel), tmp+8, src+8*sizeof(pixel), dstStride, tmpStride, srcStride);\
-    src += 8*srcStride;\
-    dst += 8*dstStride;\
-    FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst                , tmp  , src                , dstStride, tmpStride, srcStride);\
-    FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst+8*sizeof(pixel), tmp+8, src+8*sizeof(pixel), dstStride, tmpStride, srcStride);\
-}\
-
-#define H264_MC(OPNAME, SIZE) \
-static av_unused void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc00)(uint8_t *dst, uint8_t *src, int stride){\
-    FUNCC(OPNAME ## pixels ## SIZE)(dst, src, stride, SIZE);\
-}\
-\
-static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc10)(uint8_t *dst, uint8_t *src, int stride){\
-    uint8_t half[SIZE*SIZE*sizeof(pixel)];\
-    FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(half, src, SIZE*sizeof(pixel), stride);\
-    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, src, half, stride, stride, SIZE*sizeof(pixel), SIZE);\
-}\
-\
-static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc20)(uint8_t *dst, uint8_t *src, int stride){\
-    FUNC(OPNAME ## h264_qpel ## SIZE ## _h_lowpass)(dst, src, stride, stride);\
-}\
-\
-static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc30)(uint8_t *dst, uint8_t *src, int stride){\
-    uint8_t half[SIZE*SIZE*sizeof(pixel)];\
-    FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(half, src, SIZE*sizeof(pixel), stride);\
-    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, src+sizeof(pixel), half, stride, stride, SIZE*sizeof(pixel), SIZE);\
-}\
-\
-static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc01)(uint8_t *dst, uint8_t *src, int stride){\
-    uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
-    uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
-    uint8_t half[SIZE*SIZE*sizeof(pixel)];\
-    FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel),  stride, SIZE + 5);\
-    FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(half, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
-    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, full_mid, half, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
-}\
-\
-static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc02)(uint8_t *dst, uint8_t *src, int stride){\
-    uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
-    uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
-    FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel),  stride, SIZE + 5);\
-    FUNC(OPNAME ## h264_qpel ## SIZE ## _v_lowpass)(dst, full_mid, stride, SIZE*sizeof(pixel));\
-}\
-\
-static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc03)(uint8_t *dst, uint8_t *src, int stride){\
-    uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
-    uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
-    uint8_t half[SIZE*SIZE*sizeof(pixel)];\
-    FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel),  stride, SIZE + 5);\
-    FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(half, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
-    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, full_mid+SIZE*sizeof(pixel), half, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
-}\
-\
-static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc11)(uint8_t *dst, uint8_t *src, int stride){\
-    uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
-    uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
-    uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
-    uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
-    FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
-    FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel),  stride, SIZE + 5);\
-    FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
-    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
-}\
-\
-static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc31)(uint8_t *dst, uint8_t *src, int stride){\
-    uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
-    uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
-    uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
-    uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
-    FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
-    FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel),  stride, SIZE + 5);\
-    FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
-    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
-}\
-\
-static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc13)(uint8_t *dst, uint8_t *src, int stride){\
-    uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
-    uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
-    uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
-    uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
-    FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
-    FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel),  stride, SIZE + 5);\
-    FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
-    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
-}\
-\
-static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc33)(uint8_t *dst, uint8_t *src, int stride){\
-    uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
-    uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
-    uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
-    uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
-    FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
-    FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel),  stride, SIZE + 5);\
-    FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
-    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
-}\
-\
-static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc22)(uint8_t *dst, uint8_t *src, int stride){\
-    int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
-    FUNC(OPNAME ## h264_qpel ## SIZE ## _hv_lowpass)(dst, tmp, src, stride, SIZE*sizeof(pixel), stride);\
-}\
-\
-static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc21)(uint8_t *dst, uint8_t *src, int stride){\
-    int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
-    uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
-    uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
-    FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
-    FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
-    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
-}\
-\
-static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc23)(uint8_t *dst, uint8_t *src, int stride){\
-    int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
-    uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
-    uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
-    FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
-    FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
-    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
-}\
-\
-static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc12)(uint8_t *dst, uint8_t *src, int stride){\
-    uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
-    uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
-    int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
-    uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
-    uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
-    FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel),  stride, SIZE + 5);\
-    FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
-    FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
-    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfV, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
-}\
-\
-static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc32)(uint8_t *dst, uint8_t *src, int stride){\
-    uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
-    uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
-    int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
-    uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
-    uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
-    FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel),  stride, SIZE + 5);\
-    FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
-    FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
-    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfV, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
-}\
-
-#define op_avg(a, b)  a = (((a)+CLIP(((b) + 16)>>5)+1)>>1)
-//#define op_avg2(a, b) a = (((a)*w1+cm[((b) + 16)>>5]*w2 + o + 64)>>7)
-#define op_put(a, b)  a = CLIP(((b) + 16)>>5)
-#define op2_avg(a, b)  a = (((a)+CLIP(((b) + 512)>>10)+1)>>1)
-#define op2_put(a, b)  a = CLIP(((b) + 512)>>10)
-
-H264_LOWPASS(put_       , op_put, op2_put)
-H264_LOWPASS(avg_       , op_avg, op2_avg)
-H264_MC(put_, 2)
-H264_MC(put_, 4)
-H264_MC(put_, 8)
-H264_MC(put_, 16)
-H264_MC(avg_, 4)
-H264_MC(avg_, 8)
-H264_MC(avg_, 16)
-
-#undef op_avg
-#undef op_put
-#undef op2_avg
-#undef op2_put
-
-#if BIT_DEPTH == 8
-#   define put_h264_qpel8_mc00_8_c  ff_put_pixels8x8_8_c
-#   define avg_h264_qpel8_mc00_8_c  ff_avg_pixels8x8_8_c
-#   define put_h264_qpel16_mc00_8_c ff_put_pixels16x16_8_c
-#   define avg_h264_qpel16_mc00_8_c ff_avg_pixels16x16_8_c
-#elif BIT_DEPTH == 9
-#   define put_h264_qpel8_mc00_9_c  ff_put_pixels8x8_9_c
-#   define avg_h264_qpel8_mc00_9_c  ff_avg_pixels8x8_9_c
-#   define put_h264_qpel16_mc00_9_c ff_put_pixels16x16_9_c
-#   define avg_h264_qpel16_mc00_9_c ff_avg_pixels16x16_9_c
-#elif BIT_DEPTH == 10
-#   define put_h264_qpel8_mc00_10_c  ff_put_pixels8x8_10_c
-#   define avg_h264_qpel8_mc00_10_c  ff_avg_pixels8x8_10_c
-#   define put_h264_qpel16_mc00_10_c ff_put_pixels16x16_10_c
-#   define avg_h264_qpel16_mc00_10_c ff_avg_pixels16x16_10_c
-#endif
-
 void FUNCC(ff_put_pixels8x8)(uint8_t *dst, uint8_t *src, int stride) {
     FUNCC(put_pixels8)(dst, src, stride, 8);
 }
index 913b06f..9e9384b 100644 (file)
@@ -967,6 +967,7 @@ static av_cold void common_init(H264Context *h)
     s->codec_id = s->avctx->codec->id;
 
     ff_h264dsp_init(&h->h264dsp, 8, 1);
+    ff_h264qpel_init(&h->h264qpel, 8);
     ff_h264_pred_init(&h->hpc, s->codec_id, 8, 1);
 
     h->dequant_coeff_pps = -1;
@@ -2436,6 +2437,7 @@ static int h264_set_parameter_from_sps(H264Context *h)
 
             ff_h264dsp_init(&h->h264dsp, h->sps.bit_depth_luma,
                             h->sps.chroma_format_idc);
+            ff_h264qpel_init(&h->h264qpel, h->sps.bit_depth_luma);
             ff_h264_pred_init(&h->hpc, s->codec_id, h->sps.bit_depth_luma,
                               h->sps.chroma_format_idc);
             s->dsp.dct_bits = h->sps.bit_depth_luma > 8 ? 32 : 16;
@@ -2593,8 +2595,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
     int last_pic_structure, last_pic_droppable;
     int needs_reinit = 0;
 
-    s->me.qpel_put = s->dsp.put_h264_qpel_pixels_tab;
-    s->me.qpel_avg = s->dsp.avg_h264_qpel_pixels_tab;
+    s->me.qpel_put = h->h264qpel.put_h264_qpel_pixels_tab;
+    s->me.qpel_avg = h->h264qpel.avg_h264_qpel_pixels_tab;
 
     first_mb_in_slice = get_ue_golomb(&s->gb);
 
index 140740c..323324d 100644 (file)
@@ -33,6 +33,7 @@
 #include "mpegvideo.h"
 #include "h264dsp.h"
 #include "h264pred.h"
+#include "h264qpel.h"
 #include "rectangle.h"
 
 #define interlaced_dct interlaced_dct_is_a_bad_name
@@ -253,6 +254,7 @@ typedef struct MMCO {
 typedef struct H264Context {
     MpegEncContext s;
     H264DSPContext h264dsp;
+    H264QpelContext h264qpel;
     int pixel_shift;    ///< 0 for 8-bit H264, 1 for high-bit-depth H264
     int chroma_qp[2];   // QPc
 
diff --git a/libavcodec/h264qpel.c b/libavcodec/h264qpel.c
new file mode 100644 (file)
index 0000000..1bf3168
--- /dev/null
@@ -0,0 +1,86 @@
+/*
+ * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
+ * Copyright (c) 2003-2010 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "h264qpel.h"
+
+#define BIT_DEPTH 8
+#include "h264qpel_template.c"
+#undef BIT_DEPTH
+
+#define BIT_DEPTH 9
+#include "h264qpel_template.c"
+#undef BIT_DEPTH
+
+#define BIT_DEPTH 10
+#include "h264qpel_template.c"
+#undef BIT_DEPTH
+
+void ff_h264qpel_init(H264QpelContext *c, int bit_depth)
+{
+#undef FUNCC
+#define FUNCC(f, depth) f ## _ ## depth ## _c
+
+#define dspfunc2(PFX, IDX, NUM, depth)                                  \
+    c->PFX ## _pixels_tab[IDX][ 0] = FUNCC(PFX ## NUM ## _mc00, depth); \
+    c->PFX ## _pixels_tab[IDX][ 1] = FUNCC(PFX ## NUM ## _mc10, depth); \
+    c->PFX ## _pixels_tab[IDX][ 2] = FUNCC(PFX ## NUM ## _mc20, depth); \
+    c->PFX ## _pixels_tab[IDX][ 3] = FUNCC(PFX ## NUM ## _mc30, depth); \
+    c->PFX ## _pixels_tab[IDX][ 4] = FUNCC(PFX ## NUM ## _mc01, depth); \
+    c->PFX ## _pixels_tab[IDX][ 5] = FUNCC(PFX ## NUM ## _mc11, depth); \
+    c->PFX ## _pixels_tab[IDX][ 6] = FUNCC(PFX ## NUM ## _mc21, depth); \
+    c->PFX ## _pixels_tab[IDX][ 7] = FUNCC(PFX ## NUM ## _mc31, depth); \
+    c->PFX ## _pixels_tab[IDX][ 8] = FUNCC(PFX ## NUM ## _mc02, depth); \
+    c->PFX ## _pixels_tab[IDX][ 9] = FUNCC(PFX ## NUM ## _mc12, depth); \
+    c->PFX ## _pixels_tab[IDX][10] = FUNCC(PFX ## NUM ## _mc22, depth); \
+    c->PFX ## _pixels_tab[IDX][11] = FUNCC(PFX ## NUM ## _mc32, depth); \
+    c->PFX ## _pixels_tab[IDX][12] = FUNCC(PFX ## NUM ## _mc03, depth); \
+    c->PFX ## _pixels_tab[IDX][13] = FUNCC(PFX ## NUM ## _mc13, depth); \
+    c->PFX ## _pixels_tab[IDX][14] = FUNCC(PFX ## NUM ## _mc23, depth); \
+    c->PFX ## _pixels_tab[IDX][15] = FUNCC(PFX ## NUM ## _mc33, depth)
+
+#define SET_QPEL(depth)                         \
+    dspfunc2(put_h264_qpel, 0, 16, depth);      \
+    dspfunc2(put_h264_qpel, 1,  8, depth);      \
+    dspfunc2(put_h264_qpel, 2,  4, depth);      \
+    dspfunc2(put_h264_qpel, 3,  2, depth);      \
+    dspfunc2(avg_h264_qpel, 0, 16, depth);      \
+    dspfunc2(avg_h264_qpel, 1,  8, depth);      \
+    dspfunc2(avg_h264_qpel, 2,  4, depth)
+
+    switch (bit_depth) {
+    default:
+        SET_QPEL(8);
+        break;
+    case 9:
+        SET_QPEL(9);
+        break;
+    case 10:
+        SET_QPEL(10);
+        break;
+    }
+
+    if (ARCH_ARM)
+        ff_h264qpel_init_arm(c, bit_depth);
+    if (ARCH_PPC)
+        ff_h264qpel_init_ppc(c, bit_depth);
+    if (ARCH_X86)
+        ff_h264qpel_init_x86(c, bit_depth);
+}
diff --git a/libavcodec/h264qpel.h b/libavcodec/h264qpel.h
new file mode 100644 (file)
index 0000000..d761775
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
+ * Copyright (c) 2003-2010 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_H264QPEL_H
+#define AVCODEC_H264QPEL_H
+
+#include "dsputil.h"
+
+typedef struct H264QpelContext {
+    qpel_mc_func put_h264_qpel_pixels_tab[4][16];
+    qpel_mc_func avg_h264_qpel_pixels_tab[4][16];
+} H264QpelContext;
+
+void ff_h264qpel_init(H264QpelContext *c, int bit_depth);
+
+void ff_h264qpel_init_arm(H264QpelContext *c, int bit_depth);
+void ff_h264qpel_init_ppc(H264QpelContext *c, int bit_depth);
+void ff_h264qpel_init_x86(H264QpelContext *c, int bit_depth);
+
+#endif /* AVCODEC_H264QPEL_H */
diff --git a/libavcodec/h264qpel_template.c b/libavcodec/h264qpel_template.c
new file mode 100644 (file)
index 0000000..3bf30c0
--- /dev/null
@@ -0,0 +1,550 @@
+/*
+ * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
+ * Copyright (c) 2003-2010 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/common.h"
+#include "bit_depth_template.c"
+#include "hpel_template.c"
+
+static inline void FUNC(copy_block2)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
+{
+    int i;
+    for(i=0; i<h; i++)
+    {
+        AV_WN2P(dst   , AV_RN2P(src   ));
+        dst+=dstStride;
+        src+=srcStride;
+    }
+}
+
+static inline void FUNC(copy_block4)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
+{
+    int i;
+    for(i=0; i<h; i++)
+    {
+        AV_WN4P(dst   , AV_RN4P(src   ));
+        dst+=dstStride;
+        src+=srcStride;
+    }
+}
+
+static inline void FUNC(copy_block8)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
+{
+    int i;
+    for(i=0; i<h; i++)
+    {
+        AV_WN4P(dst                , AV_RN4P(src                ));
+        AV_WN4P(dst+4*sizeof(pixel), AV_RN4P(src+4*sizeof(pixel)));
+        dst+=dstStride;
+        src+=srcStride;
+    }
+}
+
+static inline void FUNC(copy_block16)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
+{
+    int i;
+    for(i=0; i<h; i++)
+    {
+        AV_WN4P(dst                 , AV_RN4P(src                 ));
+        AV_WN4P(dst+ 4*sizeof(pixel), AV_RN4P(src+ 4*sizeof(pixel)));
+        AV_WN4P(dst+ 8*sizeof(pixel), AV_RN4P(src+ 8*sizeof(pixel)));
+        AV_WN4P(dst+12*sizeof(pixel), AV_RN4P(src+12*sizeof(pixel)));
+        dst+=dstStride;
+        src+=srcStride;
+    }
+}
+
+#define H264_LOWPASS(OPNAME, OP, OP2) \
+static av_unused void FUNC(OPNAME ## h264_qpel2_h_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
+    const int h=2;\
+    INIT_CLIP\
+    int i;\
+    pixel *dst = (pixel*)_dst;\
+    pixel *src = (pixel*)_src;\
+    dstStride /= sizeof(pixel);\
+    srcStride /= sizeof(pixel);\
+    for(i=0; i<h; i++)\
+    {\
+        OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
+        OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
+        dst+=dstStride;\
+        src+=srcStride;\
+    }\
+}\
+\
+static av_unused void FUNC(OPNAME ## h264_qpel2_v_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
+    const int w=2;\
+    INIT_CLIP\
+    int i;\
+    pixel *dst = (pixel*)_dst;\
+    pixel *src = (pixel*)_src;\
+    dstStride /= sizeof(pixel);\
+    srcStride /= sizeof(pixel);\
+    for(i=0; i<w; i++)\
+    {\
+        const int srcB= src[-2*srcStride];\
+        const int srcA= src[-1*srcStride];\
+        const int src0= src[0 *srcStride];\
+        const int src1= src[1 *srcStride];\
+        const int src2= src[2 *srcStride];\
+        const int src3= src[3 *srcStride];\
+        const int src4= src[4 *srcStride];\
+        OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
+        OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
+        dst++;\
+        src++;\
+    }\
+}\
+\
+static av_unused void FUNC(OPNAME ## h264_qpel2_hv_lowpass)(uint8_t *_dst, int16_t *tmp, uint8_t *_src, int dstStride, int tmpStride, int srcStride){\
+    const int h=2;\
+    const int w=2;\
+    const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
+    INIT_CLIP\
+    int i;\
+    pixel *dst = (pixel*)_dst;\
+    pixel *src = (pixel*)_src;\
+    dstStride /= sizeof(pixel);\
+    srcStride /= sizeof(pixel);\
+    src -= 2*srcStride;\
+    for(i=0; i<h+5; i++)\
+    {\
+        tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]) + pad;\
+        tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]) + pad;\
+        tmp+=tmpStride;\
+        src+=srcStride;\
+    }\
+    tmp -= tmpStride*(h+5-2);\
+    for(i=0; i<w; i++)\
+    {\
+        const int tmpB= tmp[-2*tmpStride] - pad;\
+        const int tmpA= tmp[-1*tmpStride] - pad;\
+        const int tmp0= tmp[0 *tmpStride] - pad;\
+        const int tmp1= tmp[1 *tmpStride] - pad;\
+        const int tmp2= tmp[2 *tmpStride] - pad;\
+        const int tmp3= tmp[3 *tmpStride] - pad;\
+        const int tmp4= tmp[4 *tmpStride] - pad;\
+        OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
+        OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
+        dst++;\
+        tmp++;\
+    }\
+}\
+static void FUNC(OPNAME ## h264_qpel4_h_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
+    const int h=4;\
+    INIT_CLIP\
+    int i;\
+    pixel *dst = (pixel*)_dst;\
+    pixel *src = (pixel*)_src;\
+    dstStride /= sizeof(pixel);\
+    srcStride /= sizeof(pixel);\
+    for(i=0; i<h; i++)\
+    {\
+        OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
+        OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
+        OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]));\
+        OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]));\
+        dst+=dstStride;\
+        src+=srcStride;\
+    }\
+}\
+\
+static void FUNC(OPNAME ## h264_qpel4_v_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
+    const int w=4;\
+    INIT_CLIP\
+    int i;\
+    pixel *dst = (pixel*)_dst;\
+    pixel *src = (pixel*)_src;\
+    dstStride /= sizeof(pixel);\
+    srcStride /= sizeof(pixel);\
+    for(i=0; i<w; i++)\
+    {\
+        const int srcB= src[-2*srcStride];\
+        const int srcA= src[-1*srcStride];\
+        const int src0= src[0 *srcStride];\
+        const int src1= src[1 *srcStride];\
+        const int src2= src[2 *srcStride];\
+        const int src3= src[3 *srcStride];\
+        const int src4= src[4 *srcStride];\
+        const int src5= src[5 *srcStride];\
+        const int src6= src[6 *srcStride];\
+        OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
+        OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
+        OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
+        OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
+        dst++;\
+        src++;\
+    }\
+}\
+\
+static void FUNC(OPNAME ## h264_qpel4_hv_lowpass)(uint8_t *_dst, int16_t *tmp, uint8_t *_src, int dstStride, int tmpStride, int srcStride){\
+    const int h=4;\
+    const int w=4;\
+    const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
+    INIT_CLIP\
+    int i;\
+    pixel *dst = (pixel*)_dst;\
+    pixel *src = (pixel*)_src;\
+    dstStride /= sizeof(pixel);\
+    srcStride /= sizeof(pixel);\
+    src -= 2*srcStride;\
+    for(i=0; i<h+5; i++)\
+    {\
+        tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]) + pad;\
+        tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]) + pad;\
+        tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]) + pad;\
+        tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]) + pad;\
+        tmp+=tmpStride;\
+        src+=srcStride;\
+    }\
+    tmp -= tmpStride*(h+5-2);\
+    for(i=0; i<w; i++)\
+    {\
+        const int tmpB= tmp[-2*tmpStride] - pad;\
+        const int tmpA= tmp[-1*tmpStride] - pad;\
+        const int tmp0= tmp[0 *tmpStride] - pad;\
+        const int tmp1= tmp[1 *tmpStride] - pad;\
+        const int tmp2= tmp[2 *tmpStride] - pad;\
+        const int tmp3= tmp[3 *tmpStride] - pad;\
+        const int tmp4= tmp[4 *tmpStride] - pad;\
+        const int tmp5= tmp[5 *tmpStride] - pad;\
+        const int tmp6= tmp[6 *tmpStride] - pad;\
+        OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
+        OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
+        OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
+        OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
+        dst++;\
+        tmp++;\
+    }\
+}\
+\
+static void FUNC(OPNAME ## h264_qpel8_h_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
+    const int h=8;\
+    INIT_CLIP\
+    int i;\
+    pixel *dst = (pixel*)_dst;\
+    pixel *src = (pixel*)_src;\
+    dstStride /= sizeof(pixel);\
+    srcStride /= sizeof(pixel);\
+    for(i=0; i<h; i++)\
+    {\
+        OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]));\
+        OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]));\
+        OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]));\
+        OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]));\
+        OP(dst[4], (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]));\
+        OP(dst[5], (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]));\
+        OP(dst[6], (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]));\
+        OP(dst[7], (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]));\
+        dst+=dstStride;\
+        src+=srcStride;\
+    }\
+}\
+\
+static void FUNC(OPNAME ## h264_qpel8_v_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
+    const int w=8;\
+    INIT_CLIP\
+    int i;\
+    pixel *dst = (pixel*)_dst;\
+    pixel *src = (pixel*)_src;\
+    dstStride /= sizeof(pixel);\
+    srcStride /= sizeof(pixel);\
+    for(i=0; i<w; i++)\
+    {\
+        const int srcB= src[-2*srcStride];\
+        const int srcA= src[-1*srcStride];\
+        const int src0= src[0 *srcStride];\
+        const int src1= src[1 *srcStride];\
+        const int src2= src[2 *srcStride];\
+        const int src3= src[3 *srcStride];\
+        const int src4= src[4 *srcStride];\
+        const int src5= src[5 *srcStride];\
+        const int src6= src[6 *srcStride];\
+        const int src7= src[7 *srcStride];\
+        const int src8= src[8 *srcStride];\
+        const int src9= src[9 *srcStride];\
+        const int src10=src[10*srcStride];\
+        OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
+        OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
+        OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
+        OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
+        OP(dst[4*dstStride], (src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
+        OP(dst[5*dstStride], (src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
+        OP(dst[6*dstStride], (src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
+        OP(dst[7*dstStride], (src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
+        dst++;\
+        src++;\
+    }\
+}\
+\
+static void FUNC(OPNAME ## h264_qpel8_hv_lowpass)(uint8_t *_dst, int16_t *tmp, uint8_t *_src, int dstStride, int tmpStride, int srcStride){\
+    const int h=8;\
+    const int w=8;\
+    const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
+    INIT_CLIP\
+    int i;\
+    pixel *dst = (pixel*)_dst;\
+    pixel *src = (pixel*)_src;\
+    dstStride /= sizeof(pixel);\
+    srcStride /= sizeof(pixel);\
+    src -= 2*srcStride;\
+    for(i=0; i<h+5; i++)\
+    {\
+        tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]) + pad;\
+        tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]) + pad;\
+        tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]) + pad;\
+        tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]) + pad;\
+        tmp[4]= (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]) + pad;\
+        tmp[5]= (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]) + pad;\
+        tmp[6]= (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]) + pad;\
+        tmp[7]= (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]) + pad;\
+        tmp+=tmpStride;\
+        src+=srcStride;\
+    }\
+    tmp -= tmpStride*(h+5-2);\
+    for(i=0; i<w; i++)\
+    {\
+        const int tmpB= tmp[-2*tmpStride] - pad;\
+        const int tmpA= tmp[-1*tmpStride] - pad;\
+        const int tmp0= tmp[0 *tmpStride] - pad;\
+        const int tmp1= tmp[1 *tmpStride] - pad;\
+        const int tmp2= tmp[2 *tmpStride] - pad;\
+        const int tmp3= tmp[3 *tmpStride] - pad;\
+        const int tmp4= tmp[4 *tmpStride] - pad;\
+        const int tmp5= tmp[5 *tmpStride] - pad;\
+        const int tmp6= tmp[6 *tmpStride] - pad;\
+        const int tmp7= tmp[7 *tmpStride] - pad;\
+        const int tmp8= tmp[8 *tmpStride] - pad;\
+        const int tmp9= tmp[9 *tmpStride] - pad;\
+        const int tmp10=tmp[10*tmpStride] - pad;\
+        OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
+        OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
+        OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
+        OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
+        OP2(dst[4*dstStride], (tmp4+tmp5)*20 - (tmp3+tmp6)*5 + (tmp2+tmp7));\
+        OP2(dst[5*dstStride], (tmp5+tmp6)*20 - (tmp4+tmp7)*5 + (tmp3+tmp8));\
+        OP2(dst[6*dstStride], (tmp6+tmp7)*20 - (tmp5+tmp8)*5 + (tmp4+tmp9));\
+        OP2(dst[7*dstStride], (tmp7+tmp8)*20 - (tmp6+tmp9)*5 + (tmp5+tmp10));\
+        dst++;\
+        tmp++;\
+    }\
+}\
+\
+static void FUNC(OPNAME ## h264_qpel16_v_lowpass)(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+    FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst                , src                , dstStride, srcStride);\
+    FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
+    src += 8*srcStride;\
+    dst += 8*dstStride;\
+    FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst                , src                , dstStride, srcStride);\
+    FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
+}\
+\
+static void FUNC(OPNAME ## h264_qpel16_h_lowpass)(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+    FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst                , src                , dstStride, srcStride);\
+    FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
+    src += 8*srcStride;\
+    dst += 8*dstStride;\
+    FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst                , src                , dstStride, srcStride);\
+    FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
+}\
+\
+static void FUNC(OPNAME ## h264_qpel16_hv_lowpass)(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
+    FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst                , tmp  , src                , dstStride, tmpStride, srcStride);\
+    FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst+8*sizeof(pixel), tmp+8, src+8*sizeof(pixel), dstStride, tmpStride, srcStride);\
+    src += 8*srcStride;\
+    dst += 8*dstStride;\
+    FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst                , tmp  , src                , dstStride, tmpStride, srcStride);\
+    FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst+8*sizeof(pixel), tmp+8, src+8*sizeof(pixel), dstStride, tmpStride, srcStride);\
+}\
+
+#define H264_MC(OPNAME, SIZE) \
+static av_unused void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc00)(uint8_t *dst, uint8_t *src, int stride){\
+    FUNCC(OPNAME ## pixels ## SIZE)(dst, src, stride, SIZE);\
+}\
+\
+static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc10)(uint8_t *dst, uint8_t *src, int stride){\
+    uint8_t half[SIZE*SIZE*sizeof(pixel)];\
+    FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(half, src, SIZE*sizeof(pixel), stride);\
+    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, src, half, stride, stride, SIZE*sizeof(pixel), SIZE);\
+}\
+\
+static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc20)(uint8_t *dst, uint8_t *src, int stride){\
+    FUNC(OPNAME ## h264_qpel ## SIZE ## _h_lowpass)(dst, src, stride, stride);\
+}\
+\
+static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc30)(uint8_t *dst, uint8_t *src, int stride){\
+    uint8_t half[SIZE*SIZE*sizeof(pixel)];\
+    FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(half, src, SIZE*sizeof(pixel), stride);\
+    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, src+sizeof(pixel), half, stride, stride, SIZE*sizeof(pixel), SIZE);\
+}\
+\
+static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc01)(uint8_t *dst, uint8_t *src, int stride){\
+    uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
+    uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
+    uint8_t half[SIZE*SIZE*sizeof(pixel)];\
+    FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel),  stride, SIZE + 5);\
+    FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(half, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
+    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, full_mid, half, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
+}\
+\
+static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc02)(uint8_t *dst, uint8_t *src, int stride){\
+    uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
+    uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
+    FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel),  stride, SIZE + 5);\
+    FUNC(OPNAME ## h264_qpel ## SIZE ## _v_lowpass)(dst, full_mid, stride, SIZE*sizeof(pixel));\
+}\
+\
+static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc03)(uint8_t *dst, uint8_t *src, int stride){\
+    uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
+    uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
+    uint8_t half[SIZE*SIZE*sizeof(pixel)];\
+    FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel),  stride, SIZE + 5);\
+    FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(half, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
+    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, full_mid+SIZE*sizeof(pixel), half, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
+}\
+\
+static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc11)(uint8_t *dst, uint8_t *src, int stride){\
+    uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
+    uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
+    uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
+    uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
+    FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
+    FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel),  stride, SIZE + 5);\
+    FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
+    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
+}\
+\
+static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc31)(uint8_t *dst, uint8_t *src, int stride){\
+    uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
+    uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
+    uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
+    uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
+    FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
+    FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel),  stride, SIZE + 5);\
+    FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
+    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
+}\
+\
+static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc13)(uint8_t *dst, uint8_t *src, int stride){\
+    uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
+    uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
+    uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
+    uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
+    FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
+    FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel),  stride, SIZE + 5);\
+    FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
+    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
+}\
+\
+static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc33)(uint8_t *dst, uint8_t *src, int stride){\
+    uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
+    uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
+    uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
+    uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
+    FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
+    FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel),  stride, SIZE + 5);\
+    FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
+    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
+}\
+\
+static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc22)(uint8_t *dst, uint8_t *src, int stride){\
+    int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
+    FUNC(OPNAME ## h264_qpel ## SIZE ## _hv_lowpass)(dst, tmp, src, stride, SIZE*sizeof(pixel), stride);\
+}\
+\
+static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc21)(uint8_t *dst, uint8_t *src, int stride){\
+    int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
+    uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
+    uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
+    FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
+    FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
+    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
+}\
+\
+static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc23)(uint8_t *dst, uint8_t *src, int stride){\
+    int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
+    uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
+    uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
+    FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
+    FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
+    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
+}\
+\
+static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc12)(uint8_t *dst, uint8_t *src, int stride){\
+    uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
+    uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
+    int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
+    uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
+    uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
+    FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel),  stride, SIZE + 5);\
+    FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
+    FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
+    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfV, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
+}\
+\
+static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc32)(uint8_t *dst, uint8_t *src, int stride){\
+    uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
+    uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
+    int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
+    uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
+    uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
+    FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel),  stride, SIZE + 5);\
+    FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
+    FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
+    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfV, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
+}\
+
+#define op_avg(a, b)  a = (((a)+CLIP(((b) + 16)>>5)+1)>>1)
+//#define op_avg2(a, b) a = (((a)*w1+cm[((b) + 16)>>5]*w2 + o + 64)>>7)
+#define op_put(a, b)  a = CLIP(((b) + 16)>>5)
+#define op2_avg(a, b)  a = (((a)+CLIP(((b) + 512)>>10)+1)>>1)
+#define op2_put(a, b)  a = CLIP(((b) + 512)>>10)
+
+H264_LOWPASS(put_       , op_put, op2_put)
+H264_LOWPASS(avg_       , op_avg, op2_avg)
+H264_MC(put_, 2)
+H264_MC(put_, 4)
+H264_MC(put_, 8)
+H264_MC(put_, 16)
+H264_MC(avg_, 4)
+H264_MC(avg_, 8)
+H264_MC(avg_, 16)
+
+#undef op_avg
+#undef op_put
+#undef op2_avg
+#undef op2_put
+
+#if BIT_DEPTH == 8
+#   define put_h264_qpel8_mc00_8_c  ff_put_pixels8x8_8_c
+#   define avg_h264_qpel8_mc00_8_c  ff_avg_pixels8x8_8_c
+#   define put_h264_qpel16_mc00_8_c ff_put_pixels16x16_8_c
+#   define avg_h264_qpel16_mc00_8_c ff_avg_pixels16x16_8_c
+#elif BIT_DEPTH == 9
+#   define put_h264_qpel8_mc00_9_c  ff_put_pixels8x8_9_c
+#   define avg_h264_qpel8_mc00_9_c  ff_avg_pixels8x8_9_c
+#   define put_h264_qpel16_mc00_9_c ff_put_pixels16x16_9_c
+#   define avg_h264_qpel16_mc00_9_c ff_avg_pixels16x16_9_c
+#elif BIT_DEPTH == 10
+#   define put_h264_qpel8_mc00_10_c  ff_put_pixels8x8_10_c
+#   define avg_h264_qpel8_mc00_10_c  ff_avg_pixels8x8_10_c
+#   define put_h264_qpel16_mc00_10_c ff_put_pixels16x16_10_c
+#   define avg_h264_qpel16_mc00_10_c ff_avg_pixels16x16_10_c
+#endif
diff --git a/libavcodec/hpel_template.c b/libavcodec/hpel_template.c
new file mode 100644 (file)
index 0000000..e3d74c5
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2000, 2001 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define DEF_HPEL(OPNAME, OP) \
+static inline void FUNCC(OPNAME ## _pixels2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
+    int i;\
+    for(i=0; i<h; i++){\
+        OP(*((pixel2*)(block  )), AV_RN2P(pixels  ));\
+        pixels+=line_size;\
+        block +=line_size;\
+    }\
+}\
+static inline void FUNCC(OPNAME ## _pixels4)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
+    int i;\
+    for(i=0; i<h; i++){\
+        OP(*((pixel4*)(block  )), AV_RN4P(pixels  ));\
+        pixels+=line_size;\
+        block +=line_size;\
+    }\
+}\
+static inline void FUNCC(OPNAME ## _pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
+    int i;\
+    for(i=0; i<h; i++){\
+        OP(*((pixel4*)(block                )), AV_RN4P(pixels                ));\
+        OP(*((pixel4*)(block+4*sizeof(pixel))), AV_RN4P(pixels+4*sizeof(pixel)));\
+        pixels+=line_size;\
+        block +=line_size;\
+    }\
+}\
+\
+static inline void FUNC(OPNAME ## _pixels8_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
+                                                int src_stride1, int src_stride2, int h){\
+    int i;\
+    for(i=0; i<h; i++){\
+        pixel4 a,b;\
+        a= AV_RN4P(&src1[i*src_stride1  ]);\
+        b= AV_RN4P(&src2[i*src_stride2  ]);\
+        OP(*((pixel4*)&dst[i*dst_stride  ]), rnd_avg_pixel4(a, b));\
+        a= AV_RN4P(&src1[i*src_stride1+4*sizeof(pixel)]);\
+        b= AV_RN4P(&src2[i*src_stride2+4*sizeof(pixel)]);\
+        OP(*((pixel4*)&dst[i*dst_stride+4*sizeof(pixel)]), rnd_avg_pixel4(a, b));\
+    }\
+}\
+\
+static inline void FUNC(OPNAME ## _pixels4_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
+                                                int src_stride1, int src_stride2, int h){\
+    int i;\
+    for(i=0; i<h; i++){\
+        pixel4 a,b;\
+        a= AV_RN4P(&src1[i*src_stride1  ]);\
+        b= AV_RN4P(&src2[i*src_stride2  ]);\
+        OP(*((pixel4*)&dst[i*dst_stride  ]), rnd_avg_pixel4(a, b));\
+    }\
+}\
+\
+static inline void FUNC(OPNAME ## _pixels2_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
+                                                int src_stride1, int src_stride2, int h){\
+    int i;\
+    for(i=0; i<h; i++){\
+        pixel4 a,b;\
+        a= AV_RN2P(&src1[i*src_stride1  ]);\
+        b= AV_RN2P(&src2[i*src_stride2  ]);\
+        OP(*((pixel2*)&dst[i*dst_stride  ]), rnd_avg_pixel4(a, b));\
+    }\
+}\
+\
+static inline void FUNC(OPNAME ## _pixels16_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
+                                                int src_stride1, int src_stride2, int h){\
+    FUNC(OPNAME ## _pixels8_l2)(dst  , src1  , src2  , dst_stride, src_stride1, src_stride2, h);\
+    FUNC(OPNAME ## _pixels8_l2)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, h);\
+}\
+\
+CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16)    , FUNCC(OPNAME ## _pixels8)    , 8*sizeof(pixel))
+
+
+#define op_avg(a, b) a = rnd_avg_pixel4(a, b)
+#define op_put(a, b) a = b
+
+DEF_HPEL(avg, op_avg)
+DEF_HPEL(put, op_put)
+#undef op_avg
+#undef op_put
index cdc6435..e152483 100644 (file)
@@ -1,6 +1,7 @@
 OBJS                                   += ppc/dsputil_ppc.o             \
                                           ppc/videodsp_ppc.o            \
 
+OBJS-$(CONFIG_H264QPEL)                += ppc/h264_qpel.o
 OBJS-$(CONFIG_VORBIS_DECODER)          += ppc/vorbisdsp_altivec.o
 OBJS-$(CONFIG_VP3DSP)                  += ppc/vp3dsp_altivec.o
 
index 36d39d4..0cbf895 100644 (file)
 #include "libavutil/intreadwrite.h"
 #include "libavutil/ppc/types_altivec.h"
 #include "libavutil/ppc/util_altivec.h"
-#include "libavcodec/dsputil.h"
 #include "libavcodec/h264data.h"
 #include "libavcodec/h264dsp.h"
 
-#include "dsputil_altivec.h"
-
-#define PUT_OP_U8_ALTIVEC(d, s, dst) d = s
-#define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s)
-
-#define OP_U8_ALTIVEC                          PUT_OP_U8_ALTIVEC
-#define PREFIX_h264_chroma_mc8_altivec         put_h264_chroma_mc8_altivec
-#define PREFIX_h264_chroma_mc8_num             altivec_put_h264_chroma_mc8_num
-#define PREFIX_h264_qpel16_h_lowpass_altivec   put_h264_qpel16_h_lowpass_altivec
-#define PREFIX_h264_qpel16_h_lowpass_num       altivec_put_h264_qpel16_h_lowpass_num
-#define PREFIX_h264_qpel16_v_lowpass_altivec   put_h264_qpel16_v_lowpass_altivec
-#define PREFIX_h264_qpel16_v_lowpass_num       altivec_put_h264_qpel16_v_lowpass_num
-#define PREFIX_h264_qpel16_hv_lowpass_altivec  put_h264_qpel16_hv_lowpass_altivec
-#define PREFIX_h264_qpel16_hv_lowpass_num      altivec_put_h264_qpel16_hv_lowpass_num
-#include "h264_altivec_template.c"
-#undef OP_U8_ALTIVEC
-#undef PREFIX_h264_chroma_mc8_altivec
-#undef PREFIX_h264_chroma_mc8_num
-#undef PREFIX_h264_qpel16_h_lowpass_altivec
-#undef PREFIX_h264_qpel16_h_lowpass_num
-#undef PREFIX_h264_qpel16_v_lowpass_altivec
-#undef PREFIX_h264_qpel16_v_lowpass_num
-#undef PREFIX_h264_qpel16_hv_lowpass_altivec
-#undef PREFIX_h264_qpel16_hv_lowpass_num
-
-#define OP_U8_ALTIVEC                          AVG_OP_U8_ALTIVEC
-#define PREFIX_h264_chroma_mc8_altivec         avg_h264_chroma_mc8_altivec
-#define PREFIX_h264_chroma_mc8_num             altivec_avg_h264_chroma_mc8_num
-#define PREFIX_h264_qpel16_h_lowpass_altivec   avg_h264_qpel16_h_lowpass_altivec
-#define PREFIX_h264_qpel16_h_lowpass_num       altivec_avg_h264_qpel16_h_lowpass_num
-#define PREFIX_h264_qpel16_v_lowpass_altivec   avg_h264_qpel16_v_lowpass_altivec
-#define PREFIX_h264_qpel16_v_lowpass_num       altivec_avg_h264_qpel16_v_lowpass_num
-#define PREFIX_h264_qpel16_hv_lowpass_altivec  avg_h264_qpel16_hv_lowpass_altivec
-#define PREFIX_h264_qpel16_hv_lowpass_num      altivec_avg_h264_qpel16_hv_lowpass_num
-#include "h264_altivec_template.c"
-#undef OP_U8_ALTIVEC
-#undef PREFIX_h264_chroma_mc8_altivec
-#undef PREFIX_h264_chroma_mc8_num
-#undef PREFIX_h264_qpel16_h_lowpass_altivec
-#undef PREFIX_h264_qpel16_h_lowpass_num
-#undef PREFIX_h264_qpel16_v_lowpass_altivec
-#undef PREFIX_h264_qpel16_v_lowpass_num
-#undef PREFIX_h264_qpel16_hv_lowpass_altivec
-#undef PREFIX_h264_qpel16_hv_lowpass_num
-
-#define H264_MC(OPNAME, SIZE, CODETYPE) \
-static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## CODETYPE (uint8_t *dst, uint8_t *src, int stride){\
-    ff_ ## OPNAME ## pixels ## SIZE ## _ ## CODETYPE(dst, src, stride, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){ \
-    DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
-    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
-    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
-    OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(dst, src, stride, stride);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
-    DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
-    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
-    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+1, half, stride, stride, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
-    DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
-    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
-    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
-    OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(dst, src, stride, stride);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
-    DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
-    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
-    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+stride, half, stride, stride, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
-    DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
-    DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
-    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
-    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
-    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
-    DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
-    DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
-    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
-    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
-    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
-    DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
-    DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
-    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
-    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
-    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
-    DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
-    DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
-    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
-    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
-    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
-    DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
-    OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(dst, tmp, src, stride, SIZE, stride);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
-    DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
-    DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
-    DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
-    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
-    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
-    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
-    DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
-    DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
-    DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
-    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
-    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
-    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
-    DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
-    DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
-    DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
-    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
-    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
-    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
-    DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
-    DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
-    DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
-    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
-    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
-    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
-}\
-
-static inline void put_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
-                                    const uint8_t * src2, int dst_stride,
-                                    int src_stride1, int h)
-{
-    int i;
-    vec_u8 a, b, d, tmp1, tmp2, mask, mask_, edges, align;
-
-    mask_ = vec_lvsl(0, src2);
-
-    for (i = 0; i < h; i++) {
-
-        tmp1 = vec_ld(i * src_stride1, src1);
-        mask = vec_lvsl(i * src_stride1, src1);
-        tmp2 = vec_ld(i * src_stride1 + 15, src1);
-
-        a = vec_perm(tmp1, tmp2, mask);
-
-        tmp1 = vec_ld(i * 16, src2);
-        tmp2 = vec_ld(i * 16 + 15, src2);
-
-        b = vec_perm(tmp1, tmp2, mask_);
-
-        tmp1 = vec_ld(0, dst);
-        mask = vec_lvsl(0, dst);
-        tmp2 = vec_ld(15, dst);
-
-        d = vec_avg(a, b);
-
-        edges = vec_perm(tmp2, tmp1, mask);
-
-        align = vec_lvsr(0, dst);
-
-        tmp2 = vec_perm(d, edges, align);
-        tmp1 = vec_perm(edges, d, align);
-
-        vec_st(tmp2, 15, dst);
-        vec_st(tmp1, 0 , dst);
-
-        dst += dst_stride;
-    }
-}
-
-static inline void avg_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
-                                    const uint8_t * src2, int dst_stride,
-                                    int src_stride1, int h)
-{
-    int i;
-    vec_u8 a, b, d, tmp1, tmp2, mask, mask_, edges, align;
-
-    mask_ = vec_lvsl(0, src2);
-
-    for (i = 0; i < h; i++) {
-
-        tmp1 = vec_ld(i * src_stride1, src1);
-        mask = vec_lvsl(i * src_stride1, src1);
-        tmp2 = vec_ld(i * src_stride1 + 15, src1);
-
-        a = vec_perm(tmp1, tmp2, mask);
-
-        tmp1 = vec_ld(i * 16, src2);
-        tmp2 = vec_ld(i * 16 + 15, src2);
-
-        b = vec_perm(tmp1, tmp2, mask_);
-
-        tmp1 = vec_ld(0, dst);
-        mask = vec_lvsl(0, dst);
-        tmp2 = vec_ld(15, dst);
-
-        d = vec_avg(vec_perm(tmp1, tmp2, mask), vec_avg(a, b));
-
-        edges = vec_perm(tmp2, tmp1, mask);
-
-        align = vec_lvsr(0, dst);
-
-        tmp2 = vec_perm(d, edges, align);
-        tmp1 = vec_perm(edges, d, align);
-
-        vec_st(tmp2, 15, dst);
-        vec_st(tmp1, 0 , dst);
-
-        dst += dst_stride;
-    }
-}
-
-/* Implemented but could be faster
-#define put_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) put_pixels16_l2(d,s1,s2,ds,s1s,16,h)
-#define avg_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) avg_pixels16_l2(d,s1,s2,ds,s1s,16,h)
- */
-
-H264_MC(put_, 16, altivec)
-H264_MC(avg_, 16, altivec)
-
-
 /****************************************************************************
  * IDCT transform:
  ****************************************************************************/
@@ -967,39 +718,6 @@ static void ff_biweight_h264_pixels ## W ## _altivec(uint8_t *dst, uint8_t *src,
 H264_WEIGHT(16)
 H264_WEIGHT( 8)
 
-void ff_dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx) {
-    const int high_bit_depth = avctx->bits_per_raw_sample > 8;
-
-    if (av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC) {
-    if (!high_bit_depth) {
-        c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec;
-        c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_altivec;
-
-#define dspfunc(PFX, IDX, NUM) \
-        c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_altivec; \
-        c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_altivec; \
-        c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_altivec; \
-        c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_altivec; \
-        c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_altivec; \
-        c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_altivec; \
-        c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_altivec; \
-        c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_altivec; \
-        c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_altivec; \
-        c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_altivec; \
-        c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_altivec; \
-        c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_altivec; \
-        c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_altivec; \
-        c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_altivec; \
-        c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_altivec; \
-        c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_altivec
-
-        dspfunc(put_h264_qpel, 0, 16);
-        dspfunc(avg_h264_qpel, 0, 16);
-#undef dspfunc
-    }
-    }
-}
-
 void ff_h264dsp_init_ppc(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
 {
     if (av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC) {
diff --git a/libavcodec/ppc/h264_altivec_template.c b/libavcodec/ppc/h264_altivec_template.c
deleted file mode 100644 (file)
index b445f92..0000000
+++ /dev/null
@@ -1,775 +0,0 @@
-/*
- * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
- *
- * This file is part of Libav.
- *
- * Libav is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * Libav is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include "libavutil/mem.h"
-
-#ifdef DEBUG
-#define ASSERT_ALIGNED(ptr) assert(((unsigned long)ptr&0x0000000F));
-#else
-#define ASSERT_ALIGNED(ptr) ;
-#endif
-
-/* this code assume that stride % 16 == 0 */
-
-#define CHROMA_MC8_ALTIVEC_CORE(BIAS1, BIAS2) \
-        vsrc2ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc2uc);\
-        vsrc3ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc3uc);\
-\
-        psum = vec_mladd(vA, vsrc0ssH, BIAS1);\
-        psum = vec_mladd(vB, vsrc1ssH, psum);\
-        psum = vec_mladd(vC, vsrc2ssH, psum);\
-        psum = vec_mladd(vD, vsrc3ssH, psum);\
-        psum = BIAS2(psum);\
-        psum = vec_sr(psum, v6us);\
-\
-        vdst = vec_ld(0, dst);\
-        ppsum = (vec_u8)vec_pack(psum, psum);\
-        vfdst = vec_perm(vdst, ppsum, fperm);\
-\
-        OP_U8_ALTIVEC(fsum, vfdst, vdst);\
-\
-        vec_st(fsum, 0, dst);\
-\
-        vsrc0ssH = vsrc2ssH;\
-        vsrc1ssH = vsrc3ssH;\
-\
-        dst += stride;\
-        src += stride;
-
-#define CHROMA_MC8_ALTIVEC_CORE_SIMPLE \
-\
-        vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc0uc);\
-        vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc1uc);\
-\
-        psum = vec_mladd(vA, vsrc0ssH, v32ss);\
-        psum = vec_mladd(vE, vsrc1ssH, psum);\
-        psum = vec_sr(psum, v6us);\
-\
-        vdst = vec_ld(0, dst);\
-        ppsum = (vec_u8)vec_pack(psum, psum);\
-        vfdst = vec_perm(vdst, ppsum, fperm);\
-\
-        OP_U8_ALTIVEC(fsum, vfdst, vdst);\
-\
-        vec_st(fsum, 0, dst);\
-\
-        dst += stride;\
-        src += stride;
-
-#define noop(a) a
-#define add28(a) vec_add(v28ss, a)
-
-#ifdef PREFIX_h264_chroma_mc8_altivec
-static void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src,
-                                    int stride, int h, int x, int y) {
-    DECLARE_ALIGNED(16, signed int, ABCD)[4] =
-                        {((8 - x) * (8 - y)),
-                         ((    x) * (8 - y)),
-                         ((8 - x) * (    y)),
-                         ((    x) * (    y))};
-    register int i;
-    vec_u8 fperm;
-    const vec_s32 vABCD = vec_ld(0, ABCD);
-    const vec_s16 vA = vec_splat((vec_s16)vABCD, 1);
-    const vec_s16 vB = vec_splat((vec_s16)vABCD, 3);
-    const vec_s16 vC = vec_splat((vec_s16)vABCD, 5);
-    const vec_s16 vD = vec_splat((vec_s16)vABCD, 7);
-    LOAD_ZERO;
-    const vec_s16 v32ss = vec_sl(vec_splat_s16(1),vec_splat_u16(5));
-    const vec_u16 v6us = vec_splat_u16(6);
-    register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
-    register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
-
-    vec_u8 vsrcAuc, av_uninit(vsrcBuc), vsrcperm0, vsrcperm1;
-    vec_u8 vsrc0uc, vsrc1uc;
-    vec_s16 vsrc0ssH, vsrc1ssH;
-    vec_u8 vsrcCuc, vsrc2uc, vsrc3uc;
-    vec_s16 vsrc2ssH, vsrc3ssH, psum;
-    vec_u8 vdst, ppsum, vfdst, fsum;
-
-    if (((unsigned long)dst) % 16 == 0) {
-        fperm = (vec_u8){0x10, 0x11, 0x12, 0x13,
-                         0x14, 0x15, 0x16, 0x17,
-                         0x08, 0x09, 0x0A, 0x0B,
-                         0x0C, 0x0D, 0x0E, 0x0F};
-    } else {
-        fperm = (vec_u8){0x00, 0x01, 0x02, 0x03,
-                         0x04, 0x05, 0x06, 0x07,
-                         0x18, 0x19, 0x1A, 0x1B,
-                         0x1C, 0x1D, 0x1E, 0x1F};
-    }
-
-    vsrcAuc = vec_ld(0, src);
-
-    if (loadSecond)
-        vsrcBuc = vec_ld(16, src);
-    vsrcperm0 = vec_lvsl(0, src);
-    vsrcperm1 = vec_lvsl(1, src);
-
-    vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
-    if (reallyBadAlign)
-        vsrc1uc = vsrcBuc;
-    else
-        vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
-
-    vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc0uc);
-    vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc1uc);
-
-    if (ABCD[3]) {
-        if (!loadSecond) {// -> !reallyBadAlign
-            for (i = 0 ; i < h ; i++) {
-                vsrcCuc = vec_ld(stride + 0, src);
-                vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
-                vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
-
-                CHROMA_MC8_ALTIVEC_CORE(v32ss, noop)
-            }
-        } else {
-            vec_u8 vsrcDuc;
-            for (i = 0 ; i < h ; i++) {
-                vsrcCuc = vec_ld(stride + 0, src);
-                vsrcDuc = vec_ld(stride + 16, src);
-                vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
-                if (reallyBadAlign)
-                    vsrc3uc = vsrcDuc;
-                else
-                    vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
-
-                CHROMA_MC8_ALTIVEC_CORE(v32ss, noop)
-            }
-        }
-    } else {
-        const vec_s16 vE = vec_add(vB, vC);
-        if (ABCD[2]) { // x == 0 B == 0
-            if (!loadSecond) {// -> !reallyBadAlign
-                for (i = 0 ; i < h ; i++) {
-                    vsrcCuc = vec_ld(stride + 0, src);
-                    vsrc1uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
-                    CHROMA_MC8_ALTIVEC_CORE_SIMPLE
-
-                    vsrc0uc = vsrc1uc;
-                }
-            } else {
-                vec_u8 vsrcDuc;
-                for (i = 0 ; i < h ; i++) {
-                    vsrcCuc = vec_ld(stride + 0, src);
-                    vsrcDuc = vec_ld(stride + 15, src);
-                    vsrc1uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
-                    CHROMA_MC8_ALTIVEC_CORE_SIMPLE
-
-                    vsrc0uc = vsrc1uc;
-                }
-            }
-        } else { // y == 0 C == 0
-            if (!loadSecond) {// -> !reallyBadAlign
-                for (i = 0 ; i < h ; i++) {
-                    vsrcCuc = vec_ld(0, src);
-                    vsrc0uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
-                    vsrc1uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
-
-                    CHROMA_MC8_ALTIVEC_CORE_SIMPLE
-                }
-            } else {
-                vec_u8 vsrcDuc;
-                for (i = 0 ; i < h ; i++) {
-                    vsrcCuc = vec_ld(0, src);
-                    vsrcDuc = vec_ld(15, src);
-                    vsrc0uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
-                    if (reallyBadAlign)
-                        vsrc1uc = vsrcDuc;
-                    else
-                        vsrc1uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
-
-                    CHROMA_MC8_ALTIVEC_CORE_SIMPLE
-                }
-            }
-        }
-    }
-}
-#endif
-
-/* this code assume that stride % 16 == 0 */
-#ifdef PREFIX_no_rnd_vc1_chroma_mc8_altivec
-static void PREFIX_no_rnd_vc1_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) {
-   DECLARE_ALIGNED(16, signed int, ABCD)[4] =
-                        {((8 - x) * (8 - y)),
-                         ((    x) * (8 - y)),
-                         ((8 - x) * (    y)),
-                         ((    x) * (    y))};
-    register int i;
-    vec_u8 fperm;
-    const vec_s32 vABCD = vec_ld(0, ABCD);
-    const vec_s16 vA = vec_splat((vec_s16)vABCD, 1);
-    const vec_s16 vB = vec_splat((vec_s16)vABCD, 3);
-    const vec_s16 vC = vec_splat((vec_s16)vABCD, 5);
-    const vec_s16 vD = vec_splat((vec_s16)vABCD, 7);
-    LOAD_ZERO;
-    const vec_s16 v28ss = vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4));
-    const vec_u16 v6us  = vec_splat_u16(6);
-    register int loadSecond     = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
-    register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
-
-    vec_u8 vsrcAuc, av_uninit(vsrcBuc), vsrcperm0, vsrcperm1;
-    vec_u8 vsrc0uc, vsrc1uc;
-    vec_s16 vsrc0ssH, vsrc1ssH;
-    vec_u8 vsrcCuc, vsrc2uc, vsrc3uc;
-    vec_s16 vsrc2ssH, vsrc3ssH, psum;
-    vec_u8 vdst, ppsum, vfdst, fsum;
-
-    if (((unsigned long)dst) % 16 == 0) {
-        fperm = (vec_u8){0x10, 0x11, 0x12, 0x13,
-                         0x14, 0x15, 0x16, 0x17,
-                         0x08, 0x09, 0x0A, 0x0B,
-                         0x0C, 0x0D, 0x0E, 0x0F};
-    } else {
-        fperm = (vec_u8){0x00, 0x01, 0x02, 0x03,
-                         0x04, 0x05, 0x06, 0x07,
-                         0x18, 0x19, 0x1A, 0x1B,
-                         0x1C, 0x1D, 0x1E, 0x1F};
-    }
-
-    vsrcAuc = vec_ld(0, src);
-
-    if (loadSecond)
-        vsrcBuc = vec_ld(16, src);
-    vsrcperm0 = vec_lvsl(0, src);
-    vsrcperm1 = vec_lvsl(1, src);
-
-    vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
-    if (reallyBadAlign)
-        vsrc1uc = vsrcBuc;
-    else
-        vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
-
-    vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc0uc);
-    vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc1uc);
-
-    if (!loadSecond) {// -> !reallyBadAlign
-        for (i = 0 ; i < h ; i++) {
-
-
-            vsrcCuc = vec_ld(stride + 0, src);
-
-            vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
-            vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
-
-            CHROMA_MC8_ALTIVEC_CORE(vec_splat_s16(0), add28)
-        }
-    } else {
-        vec_u8 vsrcDuc;
-        for (i = 0 ; i < h ; i++) {
-            vsrcCuc = vec_ld(stride + 0, src);
-            vsrcDuc = vec_ld(stride + 16, src);
-
-            vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
-            if (reallyBadAlign)
-                vsrc3uc = vsrcDuc;
-            else
-                vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
-
-            CHROMA_MC8_ALTIVEC_CORE(vec_splat_s16(0), add28)
-        }
-    }
-}
-#endif
-
-#undef noop
-#undef add28
-#undef CHROMA_MC8_ALTIVEC_CORE
-
-/* this code assume stride % 16 == 0 */
-#ifdef PREFIX_h264_qpel16_h_lowpass_altivec
-static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) {
-    register int i;
-
-    LOAD_ZERO;
-    const vec_u8 permM2 = vec_lvsl(-2, src);
-    const vec_u8 permM1 = vec_lvsl(-1, src);
-    const vec_u8 permP0 = vec_lvsl(+0, src);
-    const vec_u8 permP1 = vec_lvsl(+1, src);
-    const vec_u8 permP2 = vec_lvsl(+2, src);
-    const vec_u8 permP3 = vec_lvsl(+3, src);
-    const vec_s16 v5ss = vec_splat_s16(5);
-    const vec_u16 v5us = vec_splat_u16(5);
-    const vec_s16 v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
-    const vec_s16 v16ss = vec_sl(vec_splat_s16(1),vec_splat_u16(4));
-
-    vec_u8 srcM2, srcM1, srcP0, srcP1, srcP2, srcP3;
-
-    register int align = ((((unsigned long)src) - 2) % 16);
-
-    vec_s16 srcP0A, srcP0B, srcP1A, srcP1B,
-              srcP2A, srcP2B, srcP3A, srcP3B,
-              srcM1A, srcM1B, srcM2A, srcM2B,
-              sum1A, sum1B, sum2A, sum2B, sum3A, sum3B,
-              pp1A, pp1B, pp2A, pp2B, pp3A, pp3B,
-              psumA, psumB, sumA, sumB;
-
-    vec_u8 sum, fsum;
-
-    for (i = 0 ; i < 16 ; i ++) {
-        vec_u8 srcR1 = vec_ld(-2, src);
-        vec_u8 srcR2 = vec_ld(14, src);
-
-        switch (align) {
-        default: {
-            srcM2 = vec_perm(srcR1, srcR2, permM2);
-            srcM1 = vec_perm(srcR1, srcR2, permM1);
-            srcP0 = vec_perm(srcR1, srcR2, permP0);
-            srcP1 = vec_perm(srcR1, srcR2, permP1);
-            srcP2 = vec_perm(srcR1, srcR2, permP2);
-            srcP3 = vec_perm(srcR1, srcR2, permP3);
-        } break;
-        case 11: {
-            srcM2 = vec_perm(srcR1, srcR2, permM2);
-            srcM1 = vec_perm(srcR1, srcR2, permM1);
-            srcP0 = vec_perm(srcR1, srcR2, permP0);
-            srcP1 = vec_perm(srcR1, srcR2, permP1);
-            srcP2 = vec_perm(srcR1, srcR2, permP2);
-            srcP3 = srcR2;
-        } break;
-        case 12: {
-            vec_u8 srcR3 = vec_ld(30, src);
-            srcM2 = vec_perm(srcR1, srcR2, permM2);
-            srcM1 = vec_perm(srcR1, srcR2, permM1);
-            srcP0 = vec_perm(srcR1, srcR2, permP0);
-            srcP1 = vec_perm(srcR1, srcR2, permP1);
-            srcP2 = srcR2;
-            srcP3 = vec_perm(srcR2, srcR3, permP3);
-        } break;
-        case 13: {
-            vec_u8 srcR3 = vec_ld(30, src);
-            srcM2 = vec_perm(srcR1, srcR2, permM2);
-            srcM1 = vec_perm(srcR1, srcR2, permM1);
-            srcP0 = vec_perm(srcR1, srcR2, permP0);
-            srcP1 = srcR2;
-            srcP2 = vec_perm(srcR2, srcR3, permP2);
-            srcP3 = vec_perm(srcR2, srcR3, permP3);
-        } break;
-        case 14: {
-            vec_u8 srcR3 = vec_ld(30, src);
-            srcM2 = vec_perm(srcR1, srcR2, permM2);
-            srcM1 = vec_perm(srcR1, srcR2, permM1);
-            srcP0 = srcR2;
-            srcP1 = vec_perm(srcR2, srcR3, permP1);
-            srcP2 = vec_perm(srcR2, srcR3, permP2);
-            srcP3 = vec_perm(srcR2, srcR3, permP3);
-        } break;
-        case 15: {
-            vec_u8 srcR3 = vec_ld(30, src);
-            srcM2 = vec_perm(srcR1, srcR2, permM2);
-            srcM1 = srcR2;
-            srcP0 = vec_perm(srcR2, srcR3, permP0);
-            srcP1 = vec_perm(srcR2, srcR3, permP1);
-            srcP2 = vec_perm(srcR2, srcR3, permP2);
-            srcP3 = vec_perm(srcR2, srcR3, permP3);
-        } break;
-        }
-
-        srcP0A = (vec_s16) vec_mergeh(zero_u8v, srcP0);
-        srcP0B = (vec_s16) vec_mergel(zero_u8v, srcP0);
-        srcP1A = (vec_s16) vec_mergeh(zero_u8v, srcP1);
-        srcP1B = (vec_s16) vec_mergel(zero_u8v, srcP1);
-
-        srcP2A = (vec_s16) vec_mergeh(zero_u8v, srcP2);
-        srcP2B = (vec_s16) vec_mergel(zero_u8v, srcP2);
-        srcP3A = (vec_s16) vec_mergeh(zero_u8v, srcP3);
-        srcP3B = (vec_s16) vec_mergel(zero_u8v, srcP3);
-
-        srcM1A = (vec_s16) vec_mergeh(zero_u8v, srcM1);
-        srcM1B = (vec_s16) vec_mergel(zero_u8v, srcM1);
-        srcM2A = (vec_s16) vec_mergeh(zero_u8v, srcM2);
-        srcM2B = (vec_s16) vec_mergel(zero_u8v, srcM2);
-
-        sum1A = vec_adds(srcP0A, srcP1A);
-        sum1B = vec_adds(srcP0B, srcP1B);
-        sum2A = vec_adds(srcM1A, srcP2A);
-        sum2B = vec_adds(srcM1B, srcP2B);
-        sum3A = vec_adds(srcM2A, srcP3A);
-        sum3B = vec_adds(srcM2B, srcP3B);
-
-        pp1A = vec_mladd(sum1A, v20ss, v16ss);
-        pp1B = vec_mladd(sum1B, v20ss, v16ss);
-
-        pp2A = vec_mladd(sum2A, v5ss, zero_s16v);
-        pp2B = vec_mladd(sum2B, v5ss, zero_s16v);
-
-        pp3A = vec_add(sum3A, pp1A);
-        pp3B = vec_add(sum3B, pp1B);
-
-        psumA = vec_sub(pp3A, pp2A);
-        psumB = vec_sub(pp3B, pp2B);
-
-        sumA = vec_sra(psumA, v5us);
-        sumB = vec_sra(psumB, v5us);
-
-        sum = vec_packsu(sumA, sumB);
-
-        ASSERT_ALIGNED(dst);
-
-        OP_U8_ALTIVEC(fsum, sum, vec_ld(0, dst));
-
-        vec_st(fsum, 0, dst);
-
-        src += srcStride;
-        dst += dstStride;
-    }
-}
-#endif
-
-/* this code assume stride % 16 == 0 */
-#ifdef PREFIX_h264_qpel16_v_lowpass_altivec
-static void PREFIX_h264_qpel16_v_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) {
-    register int i;
-
-    LOAD_ZERO;
-    const vec_u8 perm = vec_lvsl(0, src);
-    const vec_s16 v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
-    const vec_u16 v5us = vec_splat_u16(5);
-    const vec_s16 v5ss = vec_splat_s16(5);
-    const vec_s16 v16ss = vec_sl(vec_splat_s16(1),vec_splat_u16(4));
-
-    uint8_t *srcbis = src - (srcStride * 2);
-
-    const vec_u8 srcM2a = vec_ld(0, srcbis);
-    const vec_u8 srcM2b = vec_ld(16, srcbis);
-    const vec_u8 srcM2 = vec_perm(srcM2a, srcM2b, perm);
-    //srcbis += srcStride;
-    const vec_u8 srcM1a = vec_ld(0, srcbis += srcStride);
-    const vec_u8 srcM1b = vec_ld(16, srcbis);
-    const vec_u8 srcM1 = vec_perm(srcM1a, srcM1b, perm);
-    //srcbis += srcStride;
-    const vec_u8 srcP0a = vec_ld(0, srcbis += srcStride);
-    const vec_u8 srcP0b = vec_ld(16, srcbis);
-    const vec_u8 srcP0 = vec_perm(srcP0a, srcP0b, perm);
-    //srcbis += srcStride;
-    const vec_u8 srcP1a = vec_ld(0, srcbis += srcStride);
-    const vec_u8 srcP1b = vec_ld(16, srcbis);
-    const vec_u8 srcP1 = vec_perm(srcP1a, srcP1b, perm);
-    //srcbis += srcStride;
-    const vec_u8 srcP2a = vec_ld(0, srcbis += srcStride);
-    const vec_u8 srcP2b = vec_ld(16, srcbis);
-    const vec_u8 srcP2 = vec_perm(srcP2a, srcP2b, perm);
-    //srcbis += srcStride;
-
-    vec_s16 srcM2ssA = (vec_s16) vec_mergeh(zero_u8v, srcM2);
-    vec_s16 srcM2ssB = (vec_s16) vec_mergel(zero_u8v, srcM2);
-    vec_s16 srcM1ssA = (vec_s16) vec_mergeh(zero_u8v, srcM1);
-    vec_s16 srcM1ssB = (vec_s16) vec_mergel(zero_u8v, srcM1);
-    vec_s16 srcP0ssA = (vec_s16) vec_mergeh(zero_u8v, srcP0);
-    vec_s16 srcP0ssB = (vec_s16) vec_mergel(zero_u8v, srcP0);
-    vec_s16 srcP1ssA = (vec_s16) vec_mergeh(zero_u8v, srcP1);
-    vec_s16 srcP1ssB = (vec_s16) vec_mergel(zero_u8v, srcP1);
-    vec_s16 srcP2ssA = (vec_s16) vec_mergeh(zero_u8v, srcP2);
-    vec_s16 srcP2ssB = (vec_s16) vec_mergel(zero_u8v, srcP2);
-
-    vec_s16 pp1A, pp1B, pp2A, pp2B, pp3A, pp3B,
-              psumA, psumB, sumA, sumB,
-              srcP3ssA, srcP3ssB,
-              sum1A, sum1B, sum2A, sum2B, sum3A, sum3B;
-
-    vec_u8 sum, fsum, srcP3a, srcP3b, srcP3;
-
-    for (i = 0 ; i < 16 ; i++) {
-        srcP3a = vec_ld(0, srcbis += srcStride);
-        srcP3b = vec_ld(16, srcbis);
-        srcP3 = vec_perm(srcP3a, srcP3b, perm);
-        srcP3ssA = (vec_s16) vec_mergeh(zero_u8v, srcP3);
-        srcP3ssB = (vec_s16) vec_mergel(zero_u8v, srcP3);
-        //srcbis += srcStride;
-
-        sum1A = vec_adds(srcP0ssA, srcP1ssA);
-        sum1B = vec_adds(srcP0ssB, srcP1ssB);
-        sum2A = vec_adds(srcM1ssA, srcP2ssA);
-        sum2B = vec_adds(srcM1ssB, srcP2ssB);
-        sum3A = vec_adds(srcM2ssA, srcP3ssA);
-        sum3B = vec_adds(srcM2ssB, srcP3ssB);
-
-        srcM2ssA = srcM1ssA;
-        srcM2ssB = srcM1ssB;
-        srcM1ssA = srcP0ssA;
-        srcM1ssB = srcP0ssB;
-        srcP0ssA = srcP1ssA;
-        srcP0ssB = srcP1ssB;
-        srcP1ssA = srcP2ssA;
-        srcP1ssB = srcP2ssB;
-        srcP2ssA = srcP3ssA;
-        srcP2ssB = srcP3ssB;
-
-        pp1A = vec_mladd(sum1A, v20ss, v16ss);
-        pp1B = vec_mladd(sum1B, v20ss, v16ss);
-
-        pp2A = vec_mladd(sum2A, v5ss, zero_s16v);
-        pp2B = vec_mladd(sum2B, v5ss, zero_s16v);
-
-        pp3A = vec_add(sum3A, pp1A);
-        pp3B = vec_add(sum3B, pp1B);
-
-        psumA = vec_sub(pp3A, pp2A);
-        psumB = vec_sub(pp3B, pp2B);
-
-        sumA = vec_sra(psumA, v5us);
-        sumB = vec_sra(psumB, v5us);
-
-        sum = vec_packsu(sumA, sumB);
-
-        ASSERT_ALIGNED(dst);
-
-        OP_U8_ALTIVEC(fsum, sum, vec_ld(0, dst));
-
-        vec_st(fsum, 0, dst);
-
-        dst += dstStride;
-    }
-}
-#endif
-
-/* this code assume stride % 16 == 0 *and* tmp is properly aligned */
-#ifdef PREFIX_h264_qpel16_hv_lowpass_altivec
-static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t * dst, int16_t * tmp, uint8_t * src, int dstStride, int tmpStride, int srcStride) {
-    register int i;
-    LOAD_ZERO;
-    const vec_u8 permM2 = vec_lvsl(-2, src);
-    const vec_u8 permM1 = vec_lvsl(-1, src);
-    const vec_u8 permP0 = vec_lvsl(+0, src);
-    const vec_u8 permP1 = vec_lvsl(+1, src);
-    const vec_u8 permP2 = vec_lvsl(+2, src);
-    const vec_u8 permP3 = vec_lvsl(+3, src);
-    const vec_s16 v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
-    const vec_u32 v10ui = vec_splat_u32(10);
-    const vec_s16 v5ss = vec_splat_s16(5);
-    const vec_s16 v1ss = vec_splat_s16(1);
-    const vec_s32 v512si = vec_sl(vec_splat_s32(1),vec_splat_u32(9));
-    const vec_u32 v16ui = vec_sl(vec_splat_u32(1),vec_splat_u32(4));
-
-    register int align = ((((unsigned long)src) - 2) % 16);
-
-    vec_s16 srcP0A, srcP0B, srcP1A, srcP1B,
-              srcP2A, srcP2B, srcP3A, srcP3B,
-              srcM1A, srcM1B, srcM2A, srcM2B,
-              sum1A, sum1B, sum2A, sum2B, sum3A, sum3B,
-              pp1A, pp1B, pp2A, pp2B, psumA, psumB;
-
-    const vec_u8 mperm = (const vec_u8)
-        {0x00, 0x08, 0x01, 0x09, 0x02, 0x0A, 0x03, 0x0B,
-         0x04, 0x0C, 0x05, 0x0D, 0x06, 0x0E, 0x07, 0x0F};
-    int16_t *tmpbis = tmp;
-
-    vec_s16 tmpM1ssA, tmpM1ssB, tmpM2ssA, tmpM2ssB,
-              tmpP0ssA, tmpP0ssB, tmpP1ssA, tmpP1ssB,
-              tmpP2ssA, tmpP2ssB;
-
-    vec_s32 pp1Ae, pp1Ao, pp1Be, pp1Bo, pp2Ae, pp2Ao, pp2Be, pp2Bo,
-              pp3Ae, pp3Ao, pp3Be, pp3Bo, pp1cAe, pp1cAo, pp1cBe, pp1cBo,
-              pp32Ae, pp32Ao, pp32Be, pp32Bo, sumAe, sumAo, sumBe, sumBo,
-              ssumAe, ssumAo, ssumBe, ssumBo;
-    vec_u8 fsum, sumv, sum;
-    vec_s16 ssume, ssumo;
-
-    src -= (2 * srcStride);
-    for (i = 0 ; i < 21 ; i ++) {
-        vec_u8 srcM2, srcM1, srcP0, srcP1, srcP2, srcP3;
-        vec_u8 srcR1 = vec_ld(-2, src);
-        vec_u8 srcR2 = vec_ld(14, src);
-
-        switch (align) {
-        default: {
-            srcM2 = vec_perm(srcR1, srcR2, permM2);
-            srcM1 = vec_perm(srcR1, srcR2, permM1);
-            srcP0 = vec_perm(srcR1, srcR2, permP0);
-            srcP1 = vec_perm(srcR1, srcR2, permP1);
-            srcP2 = vec_perm(srcR1, srcR2, permP2);
-            srcP3 = vec_perm(srcR1, srcR2, permP3);
-        } break;
-        case 11: {
-            srcM2 = vec_perm(srcR1, srcR2, permM2);
-            srcM1 = vec_perm(srcR1, srcR2, permM1);
-            srcP0 = vec_perm(srcR1, srcR2, permP0);
-            srcP1 = vec_perm(srcR1, srcR2, permP1);
-            srcP2 = vec_perm(srcR1, srcR2, permP2);
-            srcP3 = srcR2;
-        } break;
-        case 12: {
-            vec_u8 srcR3 = vec_ld(30, src);
-            srcM2 = vec_perm(srcR1, srcR2, permM2);
-            srcM1 = vec_perm(srcR1, srcR2, permM1);
-            srcP0 = vec_perm(srcR1, srcR2, permP0);
-            srcP1 = vec_perm(srcR1, srcR2, permP1);
-            srcP2 = srcR2;
-            srcP3 = vec_perm(srcR2, srcR3, permP3);
-        } break;
-        case 13: {
-            vec_u8 srcR3 = vec_ld(30, src);
-            srcM2 = vec_perm(srcR1, srcR2, permM2);
-            srcM1 = vec_perm(srcR1, srcR2, permM1);
-            srcP0 = vec_perm(srcR1, srcR2, permP0);
-            srcP1 = srcR2;
-            srcP2 = vec_perm(srcR2, srcR3, permP2);
-            srcP3 = vec_perm(srcR2, srcR3, permP3);
-        } break;
-        case 14: {
-            vec_u8 srcR3 = vec_ld(30, src);
-            srcM2 = vec_perm(srcR1, srcR2, permM2);
-            srcM1 = vec_perm(srcR1, srcR2, permM1);
-            srcP0 = srcR2;
-            srcP1 = vec_perm(srcR2, srcR3, permP1);
-            srcP2 = vec_perm(srcR2, srcR3, permP2);
-            srcP3 = vec_perm(srcR2, srcR3, permP3);
-        } break;
-        case 15: {
-            vec_u8 srcR3 = vec_ld(30, src);
-            srcM2 = vec_perm(srcR1, srcR2, permM2);
-            srcM1 = srcR2;
-            srcP0 = vec_perm(srcR2, srcR3, permP0);
-            srcP1 = vec_perm(srcR2, srcR3, permP1);
-            srcP2 = vec_perm(srcR2, srcR3, permP2);
-            srcP3 = vec_perm(srcR2, srcR3, permP3);
-        } break;
-        }
-
-        srcP0A = (vec_s16) vec_mergeh(zero_u8v, srcP0);
-        srcP0B = (vec_s16) vec_mergel(zero_u8v, srcP0);
-        srcP1A = (vec_s16) vec_mergeh(zero_u8v, srcP1);
-        srcP1B = (vec_s16) vec_mergel(zero_u8v, srcP1);
-
-        srcP2A = (vec_s16) vec_mergeh(zero_u8v, srcP2);
-        srcP2B = (vec_s16) vec_mergel(zero_u8v, srcP2);
-        srcP3A = (vec_s16) vec_mergeh(zero_u8v, srcP3);
-        srcP3B = (vec_s16) vec_mergel(zero_u8v, srcP3);
-
-        srcM1A = (vec_s16) vec_mergeh(zero_u8v, srcM1);
-        srcM1B = (vec_s16) vec_mergel(zero_u8v, srcM1);
-        srcM2A = (vec_s16) vec_mergeh(zero_u8v, srcM2);
-        srcM2B = (vec_s16) vec_mergel(zero_u8v, srcM2);
-
-        sum1A = vec_adds(srcP0A, srcP1A);
-        sum1B = vec_adds(srcP0B, srcP1B);
-        sum2A = vec_adds(srcM1A, srcP2A);
-        sum2B = vec_adds(srcM1B, srcP2B);
-        sum3A = vec_adds(srcM2A, srcP3A);
-        sum3B = vec_adds(srcM2B, srcP3B);
-
-        pp1A = vec_mladd(sum1A, v20ss, sum3A);
-        pp1B = vec_mladd(sum1B, v20ss, sum3B);
-
-        pp2A = vec_mladd(sum2A, v5ss, zero_s16v);
-        pp2B = vec_mladd(sum2B, v5ss, zero_s16v);
-
-        psumA = vec_sub(pp1A, pp2A);
-        psumB = vec_sub(pp1B, pp2B);
-
-        vec_st(psumA, 0, tmp);
-        vec_st(psumB, 16, tmp);
-
-        src += srcStride;
-        tmp += tmpStride; /* int16_t*, and stride is 16, so it's OK here */
-    }
-
-    tmpM2ssA = vec_ld(0, tmpbis);
-    tmpM2ssB = vec_ld(16, tmpbis);
-    tmpbis += tmpStride;
-    tmpM1ssA = vec_ld(0, tmpbis);
-    tmpM1ssB = vec_ld(16, tmpbis);
-    tmpbis += tmpStride;
-    tmpP0ssA = vec_ld(0, tmpbis);
-    tmpP0ssB = vec_ld(16, tmpbis);
-    tmpbis += tmpStride;
-    tmpP1ssA = vec_ld(0, tmpbis);
-    tmpP1ssB = vec_ld(16, tmpbis);
-    tmpbis += tmpStride;
-    tmpP2ssA = vec_ld(0, tmpbis);
-    tmpP2ssB = vec_ld(16, tmpbis);
-    tmpbis += tmpStride;
-
-    for (i = 0 ; i < 16 ; i++) {
-        const vec_s16 tmpP3ssA = vec_ld(0, tmpbis);
-        const vec_s16 tmpP3ssB = vec_ld(16, tmpbis);
-
-        const vec_s16 sum1A = vec_adds(tmpP0ssA, tmpP1ssA);
-        const vec_s16 sum1B = vec_adds(tmpP0ssB, tmpP1ssB);
-        const vec_s16 sum2A = vec_adds(tmpM1ssA, tmpP2ssA);
-        const vec_s16 sum2B = vec_adds(tmpM1ssB, tmpP2ssB);
-        const vec_s16 sum3A = vec_adds(tmpM2ssA, tmpP3ssA);
-        const vec_s16 sum3B = vec_adds(tmpM2ssB, tmpP3ssB);
-
-        tmpbis += tmpStride;
-
-        tmpM2ssA = tmpM1ssA;
-        tmpM2ssB = tmpM1ssB;
-        tmpM1ssA = tmpP0ssA;
-        tmpM1ssB = tmpP0ssB;
-        tmpP0ssA = tmpP1ssA;
-        tmpP0ssB = tmpP1ssB;
-        tmpP1ssA = tmpP2ssA;
-        tmpP1ssB = tmpP2ssB;
-        tmpP2ssA = tmpP3ssA;
-        tmpP2ssB = tmpP3ssB;
-
-        pp1Ae = vec_mule(sum1A, v20ss);
-        pp1Ao = vec_mulo(sum1A, v20ss);
-        pp1Be = vec_mule(sum1B, v20ss);
-        pp1Bo = vec_mulo(sum1B, v20ss);
-
-        pp2Ae = vec_mule(sum2A, v5ss);
-        pp2Ao = vec_mulo(sum2A, v5ss);
-        pp2Be = vec_mule(sum2B, v5ss);
-        pp2Bo = vec_mulo(sum2B, v5ss);
-
-        pp3Ae = vec_sra((vec_s32)sum3A, v16ui);
-        pp3Ao = vec_mulo(sum3A, v1ss);
-        pp3Be = vec_sra((vec_s32)sum3B, v16ui);
-        pp3Bo = vec_mulo(sum3B, v1ss);
-
-        pp1cAe = vec_add(pp1Ae, v512si);
-        pp1cAo = vec_add(pp1Ao, v512si);
-        pp1cBe = vec_add(pp1Be, v512si);
-        pp1cBo = vec_add(pp1Bo, v512si);
-
-        pp32Ae = vec_sub(pp3Ae, pp2Ae);
-        pp32Ao = vec_sub(pp3Ao, pp2Ao);
-        pp32Be = vec_sub(pp3Be, pp2Be);
-        pp32Bo = vec_sub(pp3Bo, pp2Bo);
-
-        sumAe = vec_add(pp1cAe, pp32Ae);
-        sumAo = vec_add(pp1cAo, pp32Ao);
-        sumBe = vec_add(pp1cBe, pp32Be);
-        sumBo = vec_add(pp1cBo, pp32Bo);
-
-        ssumAe = vec_sra(sumAe, v10ui);
-        ssumAo = vec_sra(sumAo, v10ui);
-        ssumBe = vec_sra(sumBe, v10ui);
-        ssumBo = vec_sra(sumBo, v10ui);
-
-        ssume = vec_packs(ssumAe, ssumBe);
-        ssumo = vec_packs(ssumAo, ssumBo);
-
-        sumv = vec_packsu(ssume, ssumo);
-        sum = vec_perm(sumv, sumv, mperm);
-
-        ASSERT_ALIGNED(dst);
-
-        OP_U8_ALTIVEC(fsum, sum, vec_ld(0, dst));
-
-        vec_st(fsum, 0, dst);
-
-        dst += dstStride;
-    }
-}
-#endif
diff --git a/libavcodec/ppc/h264_qpel.c b/libavcodec/ppc/h264_qpel.c
new file mode 100644 (file)
index 0000000..c32e07b
--- /dev/null
@@ -0,0 +1,319 @@
+/*
+ * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+#include "libavcodec/h264qpel.h"
+
+#if HAVE_ALTIVEC
+#include "libavutil/cpu.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/ppc/types_altivec.h"
+#include "libavutil/ppc/util_altivec.h"
+#include "dsputil_altivec.h"
+
+#define PUT_OP_U8_ALTIVEC(d, s, dst) d = s
+#define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s)
+
+#define OP_U8_ALTIVEC                          PUT_OP_U8_ALTIVEC
+#define PREFIX_h264_chroma_mc8_altivec         put_h264_chroma_mc8_altivec
+#define PREFIX_h264_chroma_mc8_num             altivec_put_h264_chroma_mc8_num
+#define PREFIX_h264_qpel16_h_lowpass_altivec   put_h264_qpel16_h_lowpass_altivec
+#define PREFIX_h264_qpel16_h_lowpass_num       altivec_put_h264_qpel16_h_lowpass_num
+#define PREFIX_h264_qpel16_v_lowpass_altivec   put_h264_qpel16_v_lowpass_altivec
+#define PREFIX_h264_qpel16_v_lowpass_num       altivec_put_h264_qpel16_v_lowpass_num
+#define PREFIX_h264_qpel16_hv_lowpass_altivec  put_h264_qpel16_hv_lowpass_altivec
+#define PREFIX_h264_qpel16_hv_lowpass_num      altivec_put_h264_qpel16_hv_lowpass_num
+#include "h264_qpel_template.c"
+#undef OP_U8_ALTIVEC
+#undef PREFIX_h264_chroma_mc8_altivec
+#undef PREFIX_h264_chroma_mc8_num
+#undef PREFIX_h264_qpel16_h_lowpass_altivec
+#undef PREFIX_h264_qpel16_h_lowpass_num
+#undef PREFIX_h264_qpel16_v_lowpass_altivec
+#undef PREFIX_h264_qpel16_v_lowpass_num
+#undef PREFIX_h264_qpel16_hv_lowpass_altivec
+#undef PREFIX_h264_qpel16_hv_lowpass_num
+
+#define OP_U8_ALTIVEC                          AVG_OP_U8_ALTIVEC
+#define PREFIX_h264_chroma_mc8_altivec         avg_h264_chroma_mc8_altivec
+#define PREFIX_h264_chroma_mc8_num             altivec_avg_h264_chroma_mc8_num
+#define PREFIX_h264_qpel16_h_lowpass_altivec   avg_h264_qpel16_h_lowpass_altivec
+#define PREFIX_h264_qpel16_h_lowpass_num       altivec_avg_h264_qpel16_h_lowpass_num
+#define PREFIX_h264_qpel16_v_lowpass_altivec   avg_h264_qpel16_v_lowpass_altivec
+#define PREFIX_h264_qpel16_v_lowpass_num       altivec_avg_h264_qpel16_v_lowpass_num
+#define PREFIX_h264_qpel16_hv_lowpass_altivec  avg_h264_qpel16_hv_lowpass_altivec
+#define PREFIX_h264_qpel16_hv_lowpass_num      altivec_avg_h264_qpel16_hv_lowpass_num
+#include "h264_qpel_template.c"
+#undef OP_U8_ALTIVEC
+#undef PREFIX_h264_chroma_mc8_altivec
+#undef PREFIX_h264_chroma_mc8_num
+#undef PREFIX_h264_qpel16_h_lowpass_altivec
+#undef PREFIX_h264_qpel16_h_lowpass_num
+#undef PREFIX_h264_qpel16_v_lowpass_altivec
+#undef PREFIX_h264_qpel16_v_lowpass_num
+#undef PREFIX_h264_qpel16_hv_lowpass_altivec
+#undef PREFIX_h264_qpel16_hv_lowpass_num
+
+#define H264_MC(OPNAME, SIZE, CODETYPE) \
+static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## CODETYPE (uint8_t *dst, uint8_t *src, int stride){\
+    ff_ ## OPNAME ## pixels ## SIZE ## _ ## CODETYPE(dst, src, stride, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){ \
+    DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
+    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
+    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+    OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(dst, src, stride, stride);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+    DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
+    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
+    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+1, half, stride, stride, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+    DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
+    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
+    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+    OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(dst, src, stride, stride);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+    DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
+    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
+    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+stride, half, stride, stride, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+    DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
+    DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
+    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
+    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
+    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+    DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
+    DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
+    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
+    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
+    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+    DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
+    DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
+    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
+    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
+    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+    DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
+    DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
+    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
+    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
+    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+    DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
+    OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(dst, tmp, src, stride, SIZE, stride);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+    DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
+    DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
+    DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
+    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
+    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
+    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+    DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
+    DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
+    DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
+    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
+    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
+    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+    DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
+    DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
+    DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
+    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
+    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
+    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+    DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
+    DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
+    DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
+    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
+    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
+    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
+}\
+
+static inline void put_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
+                                    const uint8_t * src2, int dst_stride,
+                                    int src_stride1, int h)
+{
+    int i;
+    vec_u8 a, b, d, tmp1, tmp2, mask, mask_, edges, align;
+
+    mask_ = vec_lvsl(0, src2);
+
+    for (i = 0; i < h; i++) {
+
+        tmp1 = vec_ld(i * src_stride1, src1);
+        mask = vec_lvsl(i * src_stride1, src1);
+        tmp2 = vec_ld(i * src_stride1 + 15, src1);
+
+        a = vec_perm(tmp1, tmp2, mask);
+
+        tmp1 = vec_ld(i * 16, src2);
+        tmp2 = vec_ld(i * 16 + 15, src2);
+
+        b = vec_perm(tmp1, tmp2, mask_);
+
+        tmp1 = vec_ld(0, dst);
+        mask = vec_lvsl(0, dst);
+        tmp2 = vec_ld(15, dst);
+
+        d = vec_avg(a, b);
+
+        edges = vec_perm(tmp2, tmp1, mask);
+
+        align = vec_lvsr(0, dst);
+
+        tmp2 = vec_perm(d, edges, align);
+        tmp1 = vec_perm(edges, d, align);
+
+        vec_st(tmp2, 15, dst);
+        vec_st(tmp1, 0 , dst);
+
+        dst += dst_stride;
+    }
+}
+
+static inline void avg_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
+                                    const uint8_t * src2, int dst_stride,
+                                    int src_stride1, int h)
+{
+    int i;
+    vec_u8 a, b, d, tmp1, tmp2, mask, mask_, edges, align;
+
+    mask_ = vec_lvsl(0, src2);
+
+    for (i = 0; i < h; i++) {
+
+        tmp1 = vec_ld(i * src_stride1, src1);
+        mask = vec_lvsl(i * src_stride1, src1);
+        tmp2 = vec_ld(i * src_stride1 + 15, src1);
+
+        a = vec_perm(tmp1, tmp2, mask);
+
+        tmp1 = vec_ld(i * 16, src2);
+        tmp2 = vec_ld(i * 16 + 15, src2);
+
+        b = vec_perm(tmp1, tmp2, mask_);
+
+        tmp1 = vec_ld(0, dst);
+        mask = vec_lvsl(0, dst);
+        tmp2 = vec_ld(15, dst);
+
+        d = vec_avg(vec_perm(tmp1, tmp2, mask), vec_avg(a, b));
+
+        edges = vec_perm(tmp2, tmp1, mask);
+
+        align = vec_lvsr(0, dst);
+
+        tmp2 = vec_perm(d, edges, align);
+        tmp1 = vec_perm(edges, d, align);
+
+        vec_st(tmp2, 15, dst);
+        vec_st(tmp1, 0 , dst);
+
+        dst += dst_stride;
+    }
+}
+
+/* Implemented but could be faster
+#define put_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) put_pixels16_l2(d,s1,s2,ds,s1s,16,h)
+#define avg_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) avg_pixels16_l2(d,s1,s2,ds,s1s,16,h)
+ */
+
+H264_MC(put_, 16, altivec)
+H264_MC(avg_, 16, altivec)
+
+void ff_dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx)
+{
+    const int high_bit_depth = avctx->bits_per_raw_sample > 8;
+
+    if (av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC) {
+    if (!high_bit_depth) {
+        c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec;
+        c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_altivec;
+    }
+    }
+}
+#endif /* HAVE_ALTIVEC */
+
+void ff_h264qpel_init_ppc(H264QpelContext* c, int bit_depth) {
+#if HAVE_ALTIVEC
+    const int high_bit_depth = bit_depth > 8;
+
+    if (av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC) {
+    if (!high_bit_depth) {
+#define dspfunc(PFX, IDX, NUM) \
+        c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_altivec; \
+        c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_altivec; \
+        c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_altivec; \
+        c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_altivec; \
+        c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_altivec; \
+        c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_altivec; \
+        c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_altivec; \
+        c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_altivec; \
+        c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_altivec; \
+        c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_altivec; \
+        c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_altivec; \
+        c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_altivec; \
+        c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_altivec; \
+        c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_altivec; \
+        c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_altivec; \
+        c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_altivec
+
+        dspfunc(put_h264_qpel, 0, 16);
+        dspfunc(avg_h264_qpel, 0, 16);
+#undef dspfunc
+    }
+    }
+#endif /* HAVE_ALTIVEC */
+}
diff --git a/libavcodec/ppc/h264_qpel_template.c b/libavcodec/ppc/h264_qpel_template.c
new file mode 100644 (file)
index 0000000..b445f92
--- /dev/null
@@ -0,0 +1,775 @@
+/*
+ * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/mem.h"
+
+#ifdef DEBUG
+#define ASSERT_ALIGNED(ptr) assert(((unsigned long)ptr&0x0000000F));
+#else
+#define ASSERT_ALIGNED(ptr) ;
+#endif
+
+/* this code assume that stride % 16 == 0 */
+
+#define CHROMA_MC8_ALTIVEC_CORE(BIAS1, BIAS2) \
+        vsrc2ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc2uc);\
+        vsrc3ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc3uc);\
+\
+        psum = vec_mladd(vA, vsrc0ssH, BIAS1);\
+        psum = vec_mladd(vB, vsrc1ssH, psum);\
+        psum = vec_mladd(vC, vsrc2ssH, psum);\
+        psum = vec_mladd(vD, vsrc3ssH, psum);\
+        psum = BIAS2(psum);\
+        psum = vec_sr(psum, v6us);\
+\
+        vdst = vec_ld(0, dst);\
+        ppsum = (vec_u8)vec_pack(psum, psum);\
+        vfdst = vec_perm(vdst, ppsum, fperm);\
+\
+        OP_U8_ALTIVEC(fsum, vfdst, vdst);\
+\
+        vec_st(fsum, 0, dst);\
+\
+        vsrc0ssH = vsrc2ssH;\
+        vsrc1ssH = vsrc3ssH;\
+\
+        dst += stride;\
+        src += stride;
+
+#define CHROMA_MC8_ALTIVEC_CORE_SIMPLE \
+\
+        vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc0uc);\
+        vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc1uc);\
+\
+        psum = vec_mladd(vA, vsrc0ssH, v32ss);\
+        psum = vec_mladd(vE, vsrc1ssH, psum);\
+        psum = vec_sr(psum, v6us);\
+\
+        vdst = vec_ld(0, dst);\
+        ppsum = (vec_u8)vec_pack(psum, psum);\
+        vfdst = vec_perm(vdst, ppsum, fperm);\
+\
+        OP_U8_ALTIVEC(fsum, vfdst, vdst);\
+\
+        vec_st(fsum, 0, dst);\
+\
+        dst += stride;\
+        src += stride;
+
+#define noop(a) a
+#define add28(a) vec_add(v28ss, a)
+
+#ifdef PREFIX_h264_chroma_mc8_altivec
+static void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src,
+                                    int stride, int h, int x, int y) {
+    DECLARE_ALIGNED(16, signed int, ABCD)[4] =
+                        {((8 - x) * (8 - y)),
+                         ((    x) * (8 - y)),
+                         ((8 - x) * (    y)),
+                         ((    x) * (    y))};
+    register int i;
+    vec_u8 fperm;
+    const vec_s32 vABCD = vec_ld(0, ABCD);
+    const vec_s16 vA = vec_splat((vec_s16)vABCD, 1);
+    const vec_s16 vB = vec_splat((vec_s16)vABCD, 3);
+    const vec_s16 vC = vec_splat((vec_s16)vABCD, 5);
+    const vec_s16 vD = vec_splat((vec_s16)vABCD, 7);
+    LOAD_ZERO;
+    const vec_s16 v32ss = vec_sl(vec_splat_s16(1),vec_splat_u16(5));
+    const vec_u16 v6us = vec_splat_u16(6);
+    register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
+    register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
+
+    vec_u8 vsrcAuc, av_uninit(vsrcBuc), vsrcperm0, vsrcperm1;
+    vec_u8 vsrc0uc, vsrc1uc;
+    vec_s16 vsrc0ssH, vsrc1ssH;
+    vec_u8 vsrcCuc, vsrc2uc, vsrc3uc;
+    vec_s16 vsrc2ssH, vsrc3ssH, psum;
+    vec_u8 vdst, ppsum, vfdst, fsum;
+
+    if (((unsigned long)dst) % 16 == 0) {
+        fperm = (vec_u8){0x10, 0x11, 0x12, 0x13,
+                         0x14, 0x15, 0x16, 0x17,
+                         0x08, 0x09, 0x0A, 0x0B,
+                         0x0C, 0x0D, 0x0E, 0x0F};
+    } else {
+        fperm = (vec_u8){0x00, 0x01, 0x02, 0x03,
+                         0x04, 0x05, 0x06, 0x07,
+                         0x18, 0x19, 0x1A, 0x1B,
+                         0x1C, 0x1D, 0x1E, 0x1F};
+    }
+
+    vsrcAuc = vec_ld(0, src);
+
+    if (loadSecond)
+        vsrcBuc = vec_ld(16, src);
+    vsrcperm0 = vec_lvsl(0, src);
+    vsrcperm1 = vec_lvsl(1, src);
+
+    vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
+    if (reallyBadAlign)
+        vsrc1uc = vsrcBuc;
+    else
+        vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
+
+    vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc0uc);
+    vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc1uc);
+
+    if (ABCD[3]) {
+        if (!loadSecond) {// -> !reallyBadAlign
+            for (i = 0 ; i < h ; i++) {
+                vsrcCuc = vec_ld(stride + 0, src);
+                vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
+                vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
+
+                CHROMA_MC8_ALTIVEC_CORE(v32ss, noop)
+            }
+        } else {
+            vec_u8 vsrcDuc;
+            for (i = 0 ; i < h ; i++) {
+                vsrcCuc = vec_ld(stride + 0, src);
+                vsrcDuc = vec_ld(stride + 16, src);
+                vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
+                if (reallyBadAlign)
+                    vsrc3uc = vsrcDuc;
+                else
+                    vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
+
+                CHROMA_MC8_ALTIVEC_CORE(v32ss, noop)
+            }
+        }
+    } else {
+        const vec_s16 vE = vec_add(vB, vC);
+        if (ABCD[2]) { // x == 0 B == 0
+            if (!loadSecond) {// -> !reallyBadAlign
+                for (i = 0 ; i < h ; i++) {
+                    vsrcCuc = vec_ld(stride + 0, src);
+                    vsrc1uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
+                    CHROMA_MC8_ALTIVEC_CORE_SIMPLE
+
+                    vsrc0uc = vsrc1uc;
+                }
+            } else {
+                vec_u8 vsrcDuc;
+                for (i = 0 ; i < h ; i++) {
+                    vsrcCuc = vec_ld(stride + 0, src);
+                    vsrcDuc = vec_ld(stride + 15, src);
+                    vsrc1uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
+                    CHROMA_MC8_ALTIVEC_CORE_SIMPLE
+
+                    vsrc0uc = vsrc1uc;
+                }
+            }
+        } else { // y == 0 C == 0
+            if (!loadSecond) {// -> !reallyBadAlign
+                for (i = 0 ; i < h ; i++) {
+                    vsrcCuc = vec_ld(0, src);
+                    vsrc0uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
+                    vsrc1uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
+
+                    CHROMA_MC8_ALTIVEC_CORE_SIMPLE
+                }
+            } else {
+                vec_u8 vsrcDuc;
+                for (i = 0 ; i < h ; i++) {
+                    vsrcCuc = vec_ld(0, src);
+                    vsrcDuc = vec_ld(15, src);
+                    vsrc0uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
+                    if (reallyBadAlign)
+                        vsrc1uc = vsrcDuc;
+                    else
+                        vsrc1uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
+
+                    CHROMA_MC8_ALTIVEC_CORE_SIMPLE
+                }
+            }
+        }
+    }
+}
+#endif
+
+/* this code assume that stride % 16 == 0 */
+#ifdef PREFIX_no_rnd_vc1_chroma_mc8_altivec
+static void PREFIX_no_rnd_vc1_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) {
+   DECLARE_ALIGNED(16, signed int, ABCD)[4] =
+                        {((8 - x) * (8 - y)),
+                         ((    x) * (8 - y)),
+                         ((8 - x) * (    y)),
+                         ((    x) * (    y))};
+    register int i;
+    vec_u8 fperm;
+    const vec_s32 vABCD = vec_ld(0, ABCD);
+    const vec_s16 vA = vec_splat((vec_s16)vABCD, 1);
+    const vec_s16 vB = vec_splat((vec_s16)vABCD, 3);
+    const vec_s16 vC = vec_splat((vec_s16)vABCD, 5);
+    const vec_s16 vD = vec_splat((vec_s16)vABCD, 7);
+    LOAD_ZERO;
+    const vec_s16 v28ss = vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4));
+    const vec_u16 v6us  = vec_splat_u16(6);
+    register int loadSecond     = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
+    register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
+
+    vec_u8 vsrcAuc, av_uninit(vsrcBuc), vsrcperm0, vsrcperm1;
+    vec_u8 vsrc0uc, vsrc1uc;
+    vec_s16 vsrc0ssH, vsrc1ssH;
+    vec_u8 vsrcCuc, vsrc2uc, vsrc3uc;
+    vec_s16 vsrc2ssH, vsrc3ssH, psum;
+    vec_u8 vdst, ppsum, vfdst, fsum;
+
+    if (((unsigned long)dst) % 16 == 0) {
+        fperm = (vec_u8){0x10, 0x11, 0x12, 0x13,
+                         0x14, 0x15, 0x16, 0x17,
+                         0x08, 0x09, 0x0A, 0x0B,
+                         0x0C, 0x0D, 0x0E, 0x0F};
+    } else {
+        fperm = (vec_u8){0x00, 0x01, 0x02, 0x03,
+                         0x04, 0x05, 0x06, 0x07,
+                         0x18, 0x19, 0x1A, 0x1B,
+                         0x1C, 0x1D, 0x1E, 0x1F};
+    }
+
+    vsrcAuc = vec_ld(0, src);
+
+    if (loadSecond)
+        vsrcBuc = vec_ld(16, src);
+    vsrcperm0 = vec_lvsl(0, src);
+    vsrcperm1 = vec_lvsl(1, src);
+
+    vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
+    if (reallyBadAlign)
+        vsrc1uc = vsrcBuc;
+    else
+        vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
+
+    vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc0uc);
+    vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc1uc);
+
+    if (!loadSecond) {// -> !reallyBadAlign
+        for (i = 0 ; i < h ; i++) {
+
+
+            vsrcCuc = vec_ld(stride + 0, src);
+
+            vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
+            vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
+
+            CHROMA_MC8_ALTIVEC_CORE(vec_splat_s16(0), add28)
+        }
+    } else {
+        vec_u8 vsrcDuc;
+        for (i = 0 ; i < h ; i++) {
+            vsrcCuc = vec_ld(stride + 0, src);
+            vsrcDuc = vec_ld(stride + 16, src);
+
+            vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
+            if (reallyBadAlign)
+                vsrc3uc = vsrcDuc;
+            else
+                vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
+
+            CHROMA_MC8_ALTIVEC_CORE(vec_splat_s16(0), add28)
+        }
+    }
+}
+#endif
+
+#undef noop
+#undef add28
+#undef CHROMA_MC8_ALTIVEC_CORE
+
+/* this code assume stride % 16 == 0 */
+#ifdef PREFIX_h264_qpel16_h_lowpass_altivec
+static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) {
+    register int i;
+
+    LOAD_ZERO;
+    const vec_u8 permM2 = vec_lvsl(-2, src);
+    const vec_u8 permM1 = vec_lvsl(-1, src);
+    const vec_u8 permP0 = vec_lvsl(+0, src);
+    const vec_u8 permP1 = vec_lvsl(+1, src);
+    const vec_u8 permP2 = vec_lvsl(+2, src);
+    const vec_u8 permP3 = vec_lvsl(+3, src);
+    const vec_s16 v5ss = vec_splat_s16(5);
+    const vec_u16 v5us = vec_splat_u16(5);
+    const vec_s16 v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
+    const vec_s16 v16ss = vec_sl(vec_splat_s16(1),vec_splat_u16(4));
+
+    vec_u8 srcM2, srcM1, srcP0, srcP1, srcP2, srcP3;
+
+    register int align = ((((unsigned long)src) - 2) % 16);
+
+    vec_s16 srcP0A, srcP0B, srcP1A, srcP1B,
+              srcP2A, srcP2B, srcP3A, srcP3B,
+              srcM1A, srcM1B, srcM2A, srcM2B,
+              sum1A, sum1B, sum2A, sum2B, sum3A, sum3B,
+              pp1A, pp1B, pp2A, pp2B, pp3A, pp3B,
+              psumA, psumB, sumA, sumB;
+
+    vec_u8 sum, fsum;
+
+    for (i = 0 ; i < 16 ; i ++) {
+        vec_u8 srcR1 = vec_ld(-2, src);
+        vec_u8 srcR2 = vec_ld(14, src);
+
+        switch (align) {
+        default: {
+            srcM2 = vec_perm(srcR1, srcR2, permM2);
+            srcM1 = vec_perm(srcR1, srcR2, permM1);
+            srcP0 = vec_perm(srcR1, srcR2, permP0);
+            srcP1 = vec_perm(srcR1, srcR2, permP1);
+            srcP2 = vec_perm(srcR1, srcR2, permP2);
+            srcP3 = vec_perm(srcR1, srcR2, permP3);
+        } break;
+        case 11: {
+            srcM2 = vec_perm(srcR1, srcR2, permM2);
+            srcM1 = vec_perm(srcR1, srcR2, permM1);
+            srcP0 = vec_perm(srcR1, srcR2, permP0);
+            srcP1 = vec_perm(srcR1, srcR2, permP1);
+            srcP2 = vec_perm(srcR1, srcR2, permP2);
+            srcP3 = srcR2;
+        } break;
+        case 12: {
+            vec_u8 srcR3 = vec_ld(30, src);
+            srcM2 = vec_perm(srcR1, srcR2, permM2);
+            srcM1 = vec_perm(srcR1, srcR2, permM1);
+            srcP0 = vec_perm(srcR1, srcR2, permP0);
+            srcP1 = vec_perm(srcR1, srcR2, permP1);
+            srcP2 = srcR2;
+            srcP3 = vec_perm(srcR2, srcR3, permP3);
+        } break;
+        case 13: {
+            vec_u8 srcR3 = vec_ld(30, src);
+            srcM2 = vec_perm(srcR1, srcR2, permM2);
+            srcM1 = vec_perm(srcR1, srcR2, permM1);
+            srcP0 = vec_perm(srcR1, srcR2, permP0);
+            srcP1 = srcR2;
+            srcP2 = vec_perm(srcR2, srcR3, permP2);
+            srcP3 = vec_perm(srcR2, srcR3, permP3);
+        } break;
+        case 14: {
+            vec_u8 srcR3 = vec_ld(30, src);
+            srcM2 = vec_perm(srcR1, srcR2, permM2);
+            srcM1 = vec_perm(srcR1, srcR2, permM1);
+            srcP0 = srcR2;
+            srcP1 = vec_perm(srcR2, srcR3, permP1);
+            srcP2 = vec_perm(srcR2, srcR3, permP2);
+            srcP3 = vec_perm(srcR2, srcR3, permP3);
+        } break;
+        case 15: {
+            vec_u8 srcR3 = vec_ld(30, src);
+            srcM2 = vec_perm(srcR1, srcR2, permM2);
+            srcM1 = srcR2;
+            srcP0 = vec_perm(srcR2, srcR3, permP0);
+            srcP1 = vec_perm(srcR2, srcR3, permP1);
+            srcP2 = vec_perm(srcR2, srcR3, permP2);
+            srcP3 = vec_perm(srcR2, srcR3, permP3);
+        } break;
+        }
+
+        srcP0A = (vec_s16) vec_mergeh(zero_u8v, srcP0);
+        srcP0B = (vec_s16) vec_mergel(zero_u8v, srcP0);
+        srcP1A = (vec_s16) vec_mergeh(zero_u8v, srcP1);
+        srcP1B = (vec_s16) vec_mergel(zero_u8v, srcP1);
+
+        srcP2A = (vec_s16) vec_mergeh(zero_u8v, srcP2);
+        srcP2B = (vec_s16) vec_mergel(zero_u8v, srcP2);
+        srcP3A = (vec_s16) vec_mergeh(zero_u8v, srcP3);
+        srcP3B = (vec_s16) vec_mergel(zero_u8v, srcP3);
+
+        srcM1A = (vec_s16) vec_mergeh(zero_u8v, srcM1);
+        srcM1B = (vec_s16) vec_mergel(zero_u8v, srcM1);
+        srcM2A = (vec_s16) vec_mergeh(zero_u8v, srcM2);
+        srcM2B = (vec_s16) vec_mergel(zero_u8v, srcM2);
+
+        sum1A = vec_adds(srcP0A, srcP1A);
+        sum1B = vec_adds(srcP0B, srcP1B);
+        sum2A = vec_adds(srcM1A, srcP2A);
+        sum2B = vec_adds(srcM1B, srcP2B);
+        sum3A = vec_adds(srcM2A, srcP3A);
+        sum3B = vec_adds(srcM2B, srcP3B);
+
+        pp1A = vec_mladd(sum1A, v20ss, v16ss);
+        pp1B = vec_mladd(sum1B, v20ss, v16ss);
+
+        pp2A = vec_mladd(sum2A, v5ss, zero_s16v);
+        pp2B = vec_mladd(sum2B, v5ss, zero_s16v);
+
+        pp3A = vec_add(sum3A, pp1A);
+        pp3B = vec_add(sum3B, pp1B);
+
+        psumA = vec_sub(pp3A, pp2A);
+        psumB = vec_sub(pp3B, pp2B);
+
+        sumA = vec_sra(psumA, v5us);
+        sumB = vec_sra(psumB, v5us);
+
+        sum = vec_packsu(sumA, sumB);
+
+        ASSERT_ALIGNED(dst);
+
+        OP_U8_ALTIVEC(fsum, sum, vec_ld(0, dst));
+
+        vec_st(fsum, 0, dst);
+
+        src += srcStride;
+        dst += dstStride;
+    }
+}
+#endif
+
+/* this code assume stride % 16 == 0 */
+#ifdef PREFIX_h264_qpel16_v_lowpass_altivec
+static void PREFIX_h264_qpel16_v_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) {
+    register int i;
+
+    LOAD_ZERO;
+    const vec_u8 perm = vec_lvsl(0, src);
+    const vec_s16 v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
+    const vec_u16 v5us = vec_splat_u16(5);
+    const vec_s16 v5ss = vec_splat_s16(5);
+    const vec_s16 v16ss = vec_sl(vec_splat_s16(1),vec_splat_u16(4));
+
+    uint8_t *srcbis = src - (srcStride * 2);
+
+    const vec_u8 srcM2a = vec_ld(0, srcbis);
+    const vec_u8 srcM2b = vec_ld(16, srcbis);
+    const vec_u8 srcM2 = vec_perm(srcM2a, srcM2b, perm);
+    //srcbis += srcStride;
+    const vec_u8 srcM1a = vec_ld(0, srcbis += srcStride);
+    const vec_u8 srcM1b = vec_ld(16, srcbis);
+    const vec_u8 srcM1 = vec_perm(srcM1a, srcM1b, perm);
+    //srcbis += srcStride;
+    const vec_u8 srcP0a = vec_ld(0, srcbis += srcStride);
+    const vec_u8 srcP0b = vec_ld(16, srcbis);
+    const vec_u8 srcP0 = vec_perm(srcP0a, srcP0b, perm);
+    //srcbis += srcStride;
+    const vec_u8 srcP1a = vec_ld(0, srcbis += srcStride);
+    const vec_u8 srcP1b = vec_ld(16, srcbis);
+    const vec_u8 srcP1 = vec_perm(srcP1a, srcP1b, perm);
+    //srcbis += srcStride;
+    const vec_u8 srcP2a = vec_ld(0, srcbis += srcStride);
+    const vec_u8 srcP2b = vec_ld(16, srcbis);
+    const vec_u8 srcP2 = vec_perm(srcP2a, srcP2b, perm);
+    //srcbis += srcStride;
+
+    vec_s16 srcM2ssA = (vec_s16) vec_mergeh(zero_u8v, srcM2);
+    vec_s16 srcM2ssB = (vec_s16) vec_mergel(zero_u8v, srcM2);
+    vec_s16 srcM1ssA = (vec_s16) vec_mergeh(zero_u8v, srcM1);
+    vec_s16 srcM1ssB = (vec_s16) vec_mergel(zero_u8v, srcM1);
+    vec_s16 srcP0ssA = (vec_s16) vec_mergeh(zero_u8v, srcP0);
+    vec_s16 srcP0ssB = (vec_s16) vec_mergel(zero_u8v, srcP0);
+    vec_s16 srcP1ssA = (vec_s16) vec_mergeh(zero_u8v, srcP1);
+    vec_s16 srcP1ssB = (vec_s16) vec_mergel(zero_u8v, srcP1);
+    vec_s16 srcP2ssA = (vec_s16) vec_mergeh(zero_u8v, srcP2);
+    vec_s16 srcP2ssB = (vec_s16) vec_mergel(zero_u8v, srcP2);
+
+    vec_s16 pp1A, pp1B, pp2A, pp2B, pp3A, pp3B,
+              psumA, psumB, sumA, sumB,
+              srcP3ssA, srcP3ssB,
+              sum1A, sum1B, sum2A, sum2B, sum3A, sum3B;
+
+    vec_u8 sum, fsum, srcP3a, srcP3b, srcP3;
+
+    for (i = 0 ; i < 16 ; i++) {
+        srcP3a = vec_ld(0, srcbis += srcStride);
+        srcP3b = vec_ld(16, srcbis);
+        srcP3 = vec_perm(srcP3a, srcP3b, perm);
+        srcP3ssA = (vec_s16) vec_mergeh(zero_u8v, srcP3);
+        srcP3ssB = (vec_s16) vec_mergel(zero_u8v, srcP3);
+        //srcbis += srcStride;
+
+        sum1A = vec_adds(srcP0ssA, srcP1ssA);
+        sum1B = vec_adds(srcP0ssB, srcP1ssB);
+        sum2A = vec_adds(srcM1ssA, srcP2ssA);
+        sum2B = vec_adds(srcM1ssB, srcP2ssB);
+        sum3A = vec_adds(srcM2ssA, srcP3ssA);
+        sum3B = vec_adds(srcM2ssB, srcP3ssB);
+
+        srcM2ssA = srcM1ssA;
+        srcM2ssB = srcM1ssB;
+        srcM1ssA = srcP0ssA;
+        srcM1ssB = srcP0ssB;
+        srcP0ssA = srcP1ssA;
+        srcP0ssB = srcP1ssB;
+        srcP1ssA = srcP2ssA;
+        srcP1ssB = srcP2ssB;
+        srcP2ssA = srcP3ssA;
+        srcP2ssB = srcP3ssB;
+
+        pp1A = vec_mladd(sum1A, v20ss, v16ss);
+        pp1B = vec_mladd(sum1B, v20ss, v16ss);
+
+        pp2A = vec_mladd(sum2A, v5ss, zero_s16v);
+        pp2B = vec_mladd(sum2B, v5ss, zero_s16v);
+
+        pp3A = vec_add(sum3A, pp1A);
+        pp3B = vec_add(sum3B, pp1B);
+
+        psumA = vec_sub(pp3A, pp2A);
+        psumB = vec_sub(pp3B, pp2B);
+
+        sumA = vec_sra(psumA, v5us);
+        sumB = vec_sra(psumB, v5us);
+
+        sum = vec_packsu(sumA, sumB);
+
+        ASSERT_ALIGNED(dst);
+
+        OP_U8_ALTIVEC(fsum, sum, vec_ld(0, dst));
+
+        vec_st(fsum, 0, dst);
+
+        dst += dstStride;
+    }
+}
+#endif
+
+/* this code assume stride % 16 == 0 *and* tmp is properly aligned */
+#ifdef PREFIX_h264_qpel16_hv_lowpass_altivec
+static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t * dst, int16_t * tmp, uint8_t * src, int dstStride, int tmpStride, int srcStride) {
+    register int i;
+    LOAD_ZERO;
+    const vec_u8 permM2 = vec_lvsl(-2, src);
+    const vec_u8 permM1 = vec_lvsl(-1, src);
+    const vec_u8 permP0 = vec_lvsl(+0, src);
+    const vec_u8 permP1 = vec_lvsl(+1, src);
+    const vec_u8 permP2 = vec_lvsl(+2, src);
+    const vec_u8 permP3 = vec_lvsl(+3, src);
+    const vec_s16 v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
+    const vec_u32 v10ui = vec_splat_u32(10);
+    const vec_s16 v5ss = vec_splat_s16(5);
+    const vec_s16 v1ss = vec_splat_s16(1);
+    const vec_s32 v512si = vec_sl(vec_splat_s32(1),vec_splat_u32(9));
+    const vec_u32 v16ui = vec_sl(vec_splat_u32(1),vec_splat_u32(4));
+
+    register int align = ((((unsigned long)src) - 2) % 16);
+
+    vec_s16 srcP0A, srcP0B, srcP1A, srcP1B,
+              srcP2A, srcP2B, srcP3A, srcP3B,
+              srcM1A, srcM1B, srcM2A, srcM2B,
+              sum1A, sum1B, sum2A, sum2B, sum3A, sum3B,
+              pp1A, pp1B, pp2A, pp2B, psumA, psumB;
+
+    const vec_u8 mperm = (const vec_u8)
+        {0x00, 0x08, 0x01, 0x09, 0x02, 0x0A, 0x03, 0x0B,
+         0x04, 0x0C, 0x05, 0x0D, 0x06, 0x0E, 0x07, 0x0F};
+    int16_t *tmpbis = tmp;
+
+    vec_s16 tmpM1ssA, tmpM1ssB, tmpM2ssA, tmpM2ssB,
+              tmpP0ssA, tmpP0ssB, tmpP1ssA, tmpP1ssB,
+              tmpP2ssA, tmpP2ssB;
+
+    vec_s32 pp1Ae, pp1Ao, pp1Be, pp1Bo, pp2Ae, pp2Ao, pp2Be, pp2Bo,
+              pp3Ae, pp3Ao, pp3Be, pp3Bo, pp1cAe, pp1cAo, pp1cBe, pp1cBo,
+              pp32Ae, pp32Ao, pp32Be, pp32Bo, sumAe, sumAo, sumBe, sumBo,
+              ssumAe, ssumAo, ssumBe, ssumBo;
+    vec_u8 fsum, sumv, sum;
+    vec_s16 ssume, ssumo;
+
+    src -= (2 * srcStride);
+    for (i = 0 ; i < 21 ; i ++) {
+        vec_u8 srcM2, srcM1, srcP0, srcP1, srcP2, srcP3;
+        vec_u8 srcR1 = vec_ld(-2, src);
+        vec_u8 srcR2 = vec_ld(14, src);
+
+        switch (align) {
+        default: {
+            srcM2 = vec_perm(srcR1, srcR2, permM2);
+            srcM1 = vec_perm(srcR1, srcR2, permM1);
+            srcP0 = vec_perm(srcR1, srcR2, permP0);
+            srcP1 = vec_perm(srcR1, srcR2, permP1);
+            srcP2 = vec_perm(srcR1, srcR2, permP2);
+            srcP3 = vec_perm(srcR1, srcR2, permP3);
+        } break;
+        case 11: {
+            srcM2 = vec_perm(srcR1, srcR2, permM2);
+            srcM1 = vec_perm(srcR1, srcR2, permM1);
+            srcP0 = vec_perm(srcR1, srcR2, permP0);
+            srcP1 = vec_perm(srcR1, srcR2, permP1);
+            srcP2 = vec_perm(srcR1, srcR2, permP2);
+            srcP3 = srcR2;
+        } break;
+        case 12: {
+            vec_u8 srcR3 = vec_ld(30, src);
+            srcM2 = vec_perm(srcR1, srcR2, permM2);
+            srcM1 = vec_perm(srcR1, srcR2, permM1);
+            srcP0 = vec_perm(srcR1, srcR2, permP0);
+            srcP1 = vec_perm(srcR1, srcR2, permP1);
+            srcP2 = srcR2;
+            srcP3 = vec_perm(srcR2, srcR3, permP3);
+        } break;
+        case 13: {
+            vec_u8 srcR3 = vec_ld(30, src);
+            srcM2 = vec_perm(srcR1, srcR2, permM2);
+            srcM1 = vec_perm(srcR1, srcR2, permM1);
+            srcP0 = vec_perm(srcR1, srcR2, permP0);
+            srcP1 = srcR2;
+            srcP2 = vec_perm(srcR2, srcR3, permP2);
+            srcP3 = vec_perm(srcR2, srcR3, permP3);
+        } break;
+        case 14: {
+            vec_u8 srcR3 = vec_ld(30, src);
+            srcM2 = vec_perm(srcR1, srcR2, permM2);
+            srcM1 = vec_perm(srcR1, srcR2, permM1);
+            srcP0 = srcR2;
+            srcP1 = vec_perm(srcR2, srcR3, permP1);
+            srcP2 = vec_perm(srcR2, srcR3, permP2);
+            srcP3 = vec_perm(srcR2, srcR3, permP3);
+        } break;
+        case 15: {
+            vec_u8 srcR3 = vec_ld(30, src);
+            srcM2 = vec_perm(srcR1, srcR2, permM2);
+            srcM1 = srcR2;
+            srcP0 = vec_perm(srcR2, srcR3, permP0);
+            srcP1 = vec_perm(srcR2, srcR3, permP1);
+            srcP2 = vec_perm(srcR2, srcR3, permP2);
+            srcP3 = vec_perm(srcR2, srcR3, permP3);
+        } break;
+        }
+
+        srcP0A = (vec_s16) vec_mergeh(zero_u8v, srcP0);
+        srcP0B = (vec_s16) vec_mergel(zero_u8v, srcP0);
+        srcP1A = (vec_s16) vec_mergeh(zero_u8v, srcP1);
+        srcP1B = (vec_s16) vec_mergel(zero_u8v, srcP1);
+
+        srcP2A = (vec_s16) vec_mergeh(zero_u8v, srcP2);
+        srcP2B = (vec_s16) vec_mergel(zero_u8v, srcP2);
+        srcP3A = (vec_s16) vec_mergeh(zero_u8v, srcP3);
+        srcP3B = (vec_s16) vec_mergel(zero_u8v, srcP3);
+
+        srcM1A = (vec_s16) vec_mergeh(zero_u8v, srcM1);
+        srcM1B = (vec_s16) vec_mergel(zero_u8v, srcM1);
+        srcM2A = (vec_s16) vec_mergeh(zero_u8v, srcM2);
+        srcM2B = (vec_s16) vec_mergel(zero_u8v, srcM2);
+
+        sum1A = vec_adds(srcP0A, srcP1A);
+        sum1B = vec_adds(srcP0B, srcP1B);
+        sum2A = vec_adds(srcM1A, srcP2A);
+        sum2B = vec_adds(srcM1B, srcP2B);
+        sum3A = vec_adds(srcM2A, srcP3A);
+        sum3B = vec_adds(srcM2B, srcP3B);
+
+        pp1A = vec_mladd(sum1A, v20ss, sum3A);
+        pp1B = vec_mladd(sum1B, v20ss, sum3B);
+
+        pp2A = vec_mladd(sum2A, v5ss, zero_s16v);
+        pp2B = vec_mladd(sum2B, v5ss, zero_s16v);
+
+        psumA = vec_sub(pp1A, pp2A);
+        psumB = vec_sub(pp1B, pp2B);
+
+        vec_st(psumA, 0, tmp);
+        vec_st(psumB, 16, tmp);
+
+        src += srcStride;
+        tmp += tmpStride; /* int16_t*, and stride is 16, so it's OK here */
+    }
+
+    tmpM2ssA = vec_ld(0, tmpbis);
+    tmpM2ssB = vec_ld(16, tmpbis);
+    tmpbis += tmpStride;
+    tmpM1ssA = vec_ld(0, tmpbis);
+    tmpM1ssB = vec_ld(16, tmpbis);
+    tmpbis += tmpStride;
+    tmpP0ssA = vec_ld(0, tmpbis);
+    tmpP0ssB = vec_ld(16, tmpbis);
+    tmpbis += tmpStride;
+    tmpP1ssA = vec_ld(0, tmpbis);
+    tmpP1ssB = vec_ld(16, tmpbis);
+    tmpbis += tmpStride;
+    tmpP2ssA = vec_ld(0, tmpbis);
+    tmpP2ssB = vec_ld(16, tmpbis);
+    tmpbis += tmpStride;
+
+    for (i = 0 ; i < 16 ; i++) {
+        const vec_s16 tmpP3ssA = vec_ld(0, tmpbis);
+        const vec_s16 tmpP3ssB = vec_ld(16, tmpbis);
+
+        const vec_s16 sum1A = vec_adds(tmpP0ssA, tmpP1ssA);
+        const vec_s16 sum1B = vec_adds(tmpP0ssB, tmpP1ssB);
+        const vec_s16 sum2A = vec_adds(tmpM1ssA, tmpP2ssA);
+        const vec_s16 sum2B = vec_adds(tmpM1ssB, tmpP2ssB);
+        const vec_s16 sum3A = vec_adds(tmpM2ssA, tmpP3ssA);
+        const vec_s16 sum3B = vec_adds(tmpM2ssB, tmpP3ssB);
+
+        tmpbis += tmpStride;
+
+        tmpM2ssA = tmpM1ssA;
+        tmpM2ssB = tmpM1ssB;
+        tmpM1ssA = tmpP0ssA;
+        tmpM1ssB = tmpP0ssB;
+        tmpP0ssA = tmpP1ssA;
+        tmpP0ssB = tmpP1ssB;
+        tmpP1ssA = tmpP2ssA;
+        tmpP1ssB = tmpP2ssB;
+        tmpP2ssA = tmpP3ssA;
+        tmpP2ssB = tmpP3ssB;
+
+        pp1Ae = vec_mule(sum1A, v20ss);
+        pp1Ao = vec_mulo(sum1A, v20ss);
+        pp1Be = vec_mule(sum1B, v20ss);
+        pp1Bo = vec_mulo(sum1B, v20ss);
+
+        pp2Ae = vec_mule(sum2A, v5ss);
+        pp2Ao = vec_mulo(sum2A, v5ss);
+        pp2Be = vec_mule(sum2B, v5ss);
+        pp2Bo = vec_mulo(sum2B, v5ss);
+
+        pp3Ae = vec_sra((vec_s32)sum3A, v16ui);
+        pp3Ao = vec_mulo(sum3A, v1ss);
+        pp3Be = vec_sra((vec_s32)sum3B, v16ui);
+        pp3Bo = vec_mulo(sum3B, v1ss);
+
+        pp1cAe = vec_add(pp1Ae, v512si);
+        pp1cAo = vec_add(pp1Ao, v512si);
+        pp1cBe = vec_add(pp1Be, v512si);
+        pp1cBo = vec_add(pp1Bo, v512si);
+
+        pp32Ae = vec_sub(pp3Ae, pp2Ae);
+        pp32Ao = vec_sub(pp3Ao, pp2Ao);
+        pp32Be = vec_sub(pp3Be, pp2Be);
+        pp32Bo = vec_sub(pp3Bo, pp2Bo);
+
+        sumAe = vec_add(pp1cAe, pp32Ae);
+        sumAo = vec_add(pp1cAo, pp32Ao);
+        sumBe = vec_add(pp1cBe, pp32Be);
+        sumBo = vec_add(pp1cBo, pp32Bo);
+
+        ssumAe = vec_sra(sumAe, v10ui);
+        ssumAo = vec_sra(sumAo, v10ui);
+        ssumBe = vec_sra(sumBe, v10ui);
+        ssumBo = vec_sra(sumBo, v10ui);
+
+        ssume = vec_packs(ssumAe, ssumBe);
+        ssumo = vec_packs(ssumAo, ssumBo);
+
+        sumv = vec_packsu(ssume, ssumo);
+        sum = vec_perm(sumv, sumv, mperm);
+
+        ASSERT_ALIGNED(dst);
+
+        OP_U8_ALTIVEC(fsum, sum, vec_ld(0, dst));
+
+        vec_st(fsum, 0, dst);
+
+        dst += dstStride;
+    }
+}
+#endif
index b081989..ad2c63e 100644 (file)
@@ -325,13 +325,13 @@ static void vc1_inv_trans_8x4_altivec(uint8_t *dest, int stride, int16_t *block)
 
 #define OP_U8_ALTIVEC                          PUT_OP_U8_ALTIVEC
 #define PREFIX_no_rnd_vc1_chroma_mc8_altivec   put_no_rnd_vc1_chroma_mc8_altivec
-#include "h264_altivec_template.c"
+#include "h264_qpel_template.c"
 #undef OP_U8_ALTIVEC
 #undef PREFIX_no_rnd_vc1_chroma_mc8_altivec
 
 #define OP_U8_ALTIVEC                          AVG_OP_U8_ALTIVEC
 #define PREFIX_no_rnd_vc1_chroma_mc8_altivec   avg_no_rnd_vc1_chroma_mc8_altivec
-#include "h264_altivec_template.c"
+#include "h264_qpel_template.c"
 #undef OP_U8_ALTIVEC
 #undef PREFIX_no_rnd_vc1_chroma_mc8_altivec
 
index bcd1a46..ba5ae0f 100644 (file)
@@ -26,6 +26,7 @@
 
 #include "avcodec.h"
 #include "dsputil.h"
+#include "h264qpel.h"
 #include "rv34dsp.h"
 
 #define RV30_LOWPASS(OPNAME, OP) \
@@ -253,10 +254,12 @@ RV30_MC(avg_, 8)
 RV30_MC(avg_, 16)
 
 av_cold void ff_rv30dsp_init(RV34DSPContext *c, DSPContext* dsp) {
+    H264QpelContext qpel;
 
     ff_rv34dsp_init(c, dsp);
+    ff_h264qpel_init(&qpel, 8);
 
-    c->put_pixels_tab[0][ 0] = dsp->put_h264_qpel_pixels_tab[0][0];
+    c->put_pixels_tab[0][ 0] = qpel.put_h264_qpel_pixels_tab[0][0];
     c->put_pixels_tab[0][ 1] = put_rv30_tpel16_mc10_c;
     c->put_pixels_tab[0][ 2] = put_rv30_tpel16_mc20_c;
     c->put_pixels_tab[0][ 4] = put_rv30_tpel16_mc01_c;
@@ -265,7 +268,7 @@ av_cold void ff_rv30dsp_init(RV34DSPContext *c, DSPContext* dsp) {
     c->put_pixels_tab[0][ 8] = put_rv30_tpel16_mc02_c;
     c->put_pixels_tab[0][ 9] = put_rv30_tpel16_mc12_c;
     c->put_pixels_tab[0][10] = put_rv30_tpel16_mc22_c;
-    c->avg_pixels_tab[0][ 0] = dsp->avg_h264_qpel_pixels_tab[0][0];
+    c->avg_pixels_tab[0][ 0] = qpel.avg_h264_qpel_pixels_tab[0][0];
     c->avg_pixels_tab[0][ 1] = avg_rv30_tpel16_mc10_c;
     c->avg_pixels_tab[0][ 2] = avg_rv30_tpel16_mc20_c;
     c->avg_pixels_tab[0][ 4] = avg_rv30_tpel16_mc01_c;
@@ -274,7 +277,7 @@ av_cold void ff_rv30dsp_init(RV34DSPContext *c, DSPContext* dsp) {
     c->avg_pixels_tab[0][ 8] = avg_rv30_tpel16_mc02_c;
     c->avg_pixels_tab[0][ 9] = avg_rv30_tpel16_mc12_c;
     c->avg_pixels_tab[0][10] = avg_rv30_tpel16_mc22_c;
-    c->put_pixels_tab[1][ 0] = dsp->put_h264_qpel_pixels_tab[1][0];
+    c->put_pixels_tab[1][ 0] = qpel.put_h264_qpel_pixels_tab[1][0];
     c->put_pixels_tab[1][ 1] = put_rv30_tpel8_mc10_c;
     c->put_pixels_tab[1][ 2] = put_rv30_tpel8_mc20_c;
     c->put_pixels_tab[1][ 4] = put_rv30_tpel8_mc01_c;
@@ -283,7 +286,7 @@ av_cold void ff_rv30dsp_init(RV34DSPContext *c, DSPContext* dsp) {
     c->put_pixels_tab[1][ 8] = put_rv30_tpel8_mc02_c;
     c->put_pixels_tab[1][ 9] = put_rv30_tpel8_mc12_c;
     c->put_pixels_tab[1][10] = put_rv30_tpel8_mc22_c;
-    c->avg_pixels_tab[1][ 0] = dsp->avg_h264_qpel_pixels_tab[1][0];
+    c->avg_pixels_tab[1][ 0] = qpel.avg_h264_qpel_pixels_tab[1][0];
     c->avg_pixels_tab[1][ 1] = avg_rv30_tpel8_mc10_c;
     c->avg_pixels_tab[1][ 2] = avg_rv30_tpel8_mc20_c;
     c->avg_pixels_tab[1][ 4] = avg_rv30_tpel8_mc01_c;
index c6968d9..ef723b0 100644 (file)
@@ -26,6 +26,7 @@
 
 #include "avcodec.h"
 #include "dsputil.h"
+#include "h264qpel.h"
 #include "rv34dsp.h"
 #include "libavutil/common.h"
 
@@ -518,18 +519,20 @@ static int rv40_v_loop_filter_strength(uint8_t *src, ptrdiff_t stride,
 }
 
 av_cold void ff_rv40dsp_init(RV34DSPContext *c, DSPContext* dsp) {
+    H264QpelContext qpel;
 
     ff_rv34dsp_init(c, dsp);
+    ff_h264qpel_init(&qpel, 8);
 
-    c->put_pixels_tab[0][ 0] = dsp->put_h264_qpel_pixels_tab[0][0];
+    c->put_pixels_tab[0][ 0] = qpel.put_h264_qpel_pixels_tab[0][0];
     c->put_pixels_tab[0][ 1] = put_rv40_qpel16_mc10_c;
-    c->put_pixels_tab[0][ 2] = dsp->put_h264_qpel_pixels_tab[0][2];
+    c->put_pixels_tab[0][ 2] = qpel.put_h264_qpel_pixels_tab[0][2];
     c->put_pixels_tab[0][ 3] = put_rv40_qpel16_mc30_c;
     c->put_pixels_tab[0][ 4] = put_rv40_qpel16_mc01_c;
     c->put_pixels_tab[0][ 5] = put_rv40_qpel16_mc11_c;
     c->put_pixels_tab[0][ 6] = put_rv40_qpel16_mc21_c;
     c->put_pixels_tab[0][ 7] = put_rv40_qpel16_mc31_c;
-    c->put_pixels_tab[0][ 8] = dsp->put_h264_qpel_pixels_tab[0][8];
+    c->put_pixels_tab[0][ 8] = qpel.put_h264_qpel_pixels_tab[0][8];
     c->put_pixels_tab[0][ 9] = put_rv40_qpel16_mc12_c;
     c->put_pixels_tab[0][10] = put_rv40_qpel16_mc22_c;
     c->put_pixels_tab[0][11] = put_rv40_qpel16_mc32_c;
@@ -537,15 +540,15 @@ av_cold void ff_rv40dsp_init(RV34DSPContext *c, DSPContext* dsp) {
     c->put_pixels_tab[0][13] = put_rv40_qpel16_mc13_c;
     c->put_pixels_tab[0][14] = put_rv40_qpel16_mc23_c;
     c->put_pixels_tab[0][15] = ff_put_rv40_qpel16_mc33_c;
-    c->avg_pixels_tab[0][ 0] = dsp->avg_h264_qpel_pixels_tab[0][0];
+    c->avg_pixels_tab[0][ 0] = qpel.avg_h264_qpel_pixels_tab[0][0];
     c->avg_pixels_tab[0][ 1] = avg_rv40_qpel16_mc10_c;
-    c->avg_pixels_tab[0][ 2] = dsp->avg_h264_qpel_pixels_tab[0][2];
+    c->avg_pixels_tab[0][ 2] = qpel.avg_h264_qpel_pixels_tab[0][2];
     c->avg_pixels_tab[0][ 3] = avg_rv40_qpel16_mc30_c;
     c->avg_pixels_tab[0][ 4] = avg_rv40_qpel16_mc01_c;
     c->avg_pixels_tab[0][ 5] = avg_rv40_qpel16_mc11_c;
     c->avg_pixels_tab[0][ 6] = avg_rv40_qpel16_mc21_c;
     c->avg_pixels_tab[0][ 7] = avg_rv40_qpel16_mc31_c;
-    c->avg_pixels_tab[0][ 8] = dsp->avg_h264_qpel_pixels_tab[0][8];
+    c->avg_pixels_tab[0][ 8] = qpel.avg_h264_qpel_pixels_tab[0][8];
     c->avg_pixels_tab[0][ 9] = avg_rv40_qpel16_mc12_c;
     c->avg_pixels_tab[0][10] = avg_rv40_qpel16_mc22_c;
     c->avg_pixels_tab[0][11] = avg_rv40_qpel16_mc32_c;
@@ -553,15 +556,15 @@ av_cold void ff_rv40dsp_init(RV34DSPContext *c, DSPContext* dsp) {
     c->avg_pixels_tab[0][13] = avg_rv40_qpel16_mc13_c;
     c->avg_pixels_tab[0][14] = avg_rv40_qpel16_mc23_c;
     c->avg_pixels_tab[0][15] = ff_avg_rv40_qpel16_mc33_c;
-    c->put_pixels_tab[1][ 0] = dsp->put_h264_qpel_pixels_tab[1][0];
+    c->put_pixels_tab[1][ 0] = qpel.put_h264_qpel_pixels_tab[1][0];
     c->put_pixels_tab[1][ 1] = put_rv40_qpel8_mc10_c;
-    c->put_pixels_tab[1][ 2] = dsp->put_h264_qpel_pixels_tab[1][2];
+    c->put_pixels_tab[1][ 2] = qpel.put_h264_qpel_pixels_tab[1][2];
     c->put_pixels_tab[1][ 3] = put_rv40_qpel8_mc30_c;
     c->put_pixels_tab[1][ 4] = put_rv40_qpel8_mc01_c;
     c->put_pixels_tab[1][ 5] = put_rv40_qpel8_mc11_c;
     c->put_pixels_tab[1][ 6] = put_rv40_qpel8_mc21_c;
     c->put_pixels_tab[1][ 7] = put_rv40_qpel8_mc31_c;
-    c->put_pixels_tab[1][ 8] = dsp->put_h264_qpel_pixels_tab[1][8];
+    c->put_pixels_tab[1][ 8] = qpel.put_h264_qpel_pixels_tab[1][8];
     c->put_pixels_tab[1][ 9] = put_rv40_qpel8_mc12_c;
     c->put_pixels_tab[1][10] = put_rv40_qpel8_mc22_c;
     c->put_pixels_tab[1][11] = put_rv40_qpel8_mc32_c;
@@ -569,15 +572,15 @@ av_cold void ff_rv40dsp_init(RV34DSPContext *c, DSPContext* dsp) {
     c->put_pixels_tab[1][13] = put_rv40_qpel8_mc13_c;
     c->put_pixels_tab[1][14] = put_rv40_qpel8_mc23_c;
     c->put_pixels_tab[1][15] = ff_put_rv40_qpel8_mc33_c;
-    c->avg_pixels_tab[1][ 0] = dsp->avg_h264_qpel_pixels_tab[1][0];
+    c->avg_pixels_tab[1][ 0] = qpel.avg_h264_qpel_pixels_tab[1][0];
     c->avg_pixels_tab[1][ 1] = avg_rv40_qpel8_mc10_c;
-    c->avg_pixels_tab[1][ 2] = dsp->avg_h264_qpel_pixels_tab[1][2];
+    c->avg_pixels_tab[1][ 2] = qpel.avg_h264_qpel_pixels_tab[1][2];
     c->avg_pixels_tab[1][ 3] = avg_rv40_qpel8_mc30_c;
     c->avg_pixels_tab[1][ 4] = avg_rv40_qpel8_mc01_c;
     c->avg_pixels_tab[1][ 5] = avg_rv40_qpel8_mc11_c;
     c->avg_pixels_tab[1][ 6] = avg_rv40_qpel8_mc21_c;
     c->avg_pixels_tab[1][ 7] = avg_rv40_qpel8_mc31_c;
-    c->avg_pixels_tab[1][ 8] = dsp->avg_h264_qpel_pixels_tab[1][8];
+    c->avg_pixels_tab[1][ 8] = qpel.avg_h264_qpel_pixels_tab[1][8];
     c->avg_pixels_tab[1][ 9] = avg_rv40_qpel8_mc12_c;
     c->avg_pixels_tab[1][10] = avg_rv40_qpel8_mc22_c;
     c->avg_pixels_tab[1][11] = avg_rv40_qpel8_mc32_c;
index 9058e4f..f723f76 100644 (file)
@@ -396,15 +396,6 @@ void ff_dsputil_init_align(DSPContext* c, AVCodecContext *avctx)
     dspfunc(avg_qpel, 1, 8);
     /* dspfunc(avg_no_rnd_qpel, 1, 8); */
 
-    if (!high_bit_depth) {
-    dspfunc(put_h264_qpel, 0, 16);
-    dspfunc(put_h264_qpel, 1, 8);
-    dspfunc(put_h264_qpel, 2, 4);
-    dspfunc(avg_h264_qpel, 0, 16);
-    dspfunc(avg_h264_qpel, 1, 8);
-    dspfunc(avg_h264_qpel, 2, 4);
-    }
-
 #undef dspfunc
     if (!high_bit_depth) {
     c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_sh4;
index 20540f7..055d184 100644 (file)
@@ -842,407 +842,6 @@ QPEL_MC(0, avg_       , _       , op_avg)
 #undef op_put
 #undef op_put_no_rnd
 
-#define H264_LOWPASS(OPNAME, OP, OP2) \
-static inline void OPNAME ## h264_qpel_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,int w,int h){\
-    uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
-    do {\
-        int srcB,srcA,src0,src1,src2,src3,src4,src5,src6;\
-        uint8_t *s = src-2;\
-        srcB = *s++;\
-        srcA = *s++;\
-        src0 = *s++;\
-        src1 = *s++;\
-        src2 = *s++;\
-        src3 = *s++;\
-        OP(dst[0], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
-        src4 = *s++;\
-        OP(dst[1], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
-        src5 = *s++;\
-        OP(dst[2], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
-        src6 = *s++;\
-        OP(dst[3], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
-      if (w>4) { /* it optimized */ \
-        int src7,src8,src9,src10; \
-        src7 = *s++;\
-        OP(dst[4], (src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
-        src8 = *s++;\
-        OP(dst[5], (src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
-        src9 = *s++;\
-        OP(dst[6], (src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
-        src10 = *s++;\
-        OP(dst[7], (src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
-       if (w>8) { \
-        int src11,src12,src13,src14,src15,src16,src17,src18; \
-        src11 = *s++;\
-        OP(dst[8] , (src8 +src9 )*20 - (src7 +src10)*5 + (src6 +src11));\
-        src12 = *s++;\
-        OP(dst[9] , (src9 +src10)*20 - (src8 +src11)*5 + (src7 +src12));\
-        src13 = *s++;\
-        OP(dst[10], (src10+src11)*20 - (src9 +src12)*5 + (src8 +src13));\
-        src14 = *s++;\
-        OP(dst[11], (src11+src12)*20 - (src10+src13)*5 + (src9 +src14));\
-        src15 = *s++;\
-        OP(dst[12], (src12+src13)*20 - (src11+src14)*5 + (src10+src15));\
-        src16 = *s++;\
-        OP(dst[13], (src13+src14)*20 - (src12+src15)*5 + (src11+src16));\
-        src17 = *s++;\
-        OP(dst[14], (src14+src15)*20 - (src13+src16)*5 + (src12+src17));\
-        src18 = *s++;\
-        OP(dst[15], (src15+src16)*20 - (src14+src17)*5 + (src13+src18));\
-       } \
-      } \
-        dst+=dstStride;\
-        src+=srcStride;\
-    }while(--h);\
-}\
-\
-static inline void OPNAME ## h264_qpel_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,int w,int h){\
-    uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
-    do{\
-        int srcB,srcA,src0,src1,src2,src3,src4,src5,src6;\
-        uint8_t *s = src-2*srcStride,*d=dst;\
-        srcB = *s; s+=srcStride;\
-        srcA = *s; s+=srcStride;\
-        src0 = *s; s+=srcStride;\
-        src1 = *s; s+=srcStride;\
-        src2 = *s; s+=srcStride;\
-        src3 = *s; s+=srcStride;\
-        OP(*d, (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));d+=dstStride;\
-        src4 = *s; s+=srcStride;\
-        OP(*d, (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));d+=dstStride;\
-        src5 = *s; s+=srcStride;\
-        OP(*d, (src2+src3)*20 - (src1+src4)*5 + (src0+src5));d+=dstStride;\
-        src6 = *s; s+=srcStride;\
-        OP(*d, (src3+src4)*20 - (src2+src5)*5 + (src1+src6));d+=dstStride;\
-      if (h>4) { \
-        int src7,src8,src9,src10; \
-        src7 = *s; s+=srcStride;\
-        OP(*d, (src4+src5)*20 - (src3+src6)*5 + (src2+src7));d+=dstStride;\
-        src8 = *s; s+=srcStride;\
-        OP(*d, (src5+src6)*20 - (src4+src7)*5 + (src3+src8));d+=dstStride;\
-        src9 = *s; s+=srcStride;\
-        OP(*d, (src6+src7)*20 - (src5+src8)*5 + (src4+src9));d+=dstStride;\
-        src10 = *s; s+=srcStride;\
-        OP(*d, (src7+src8)*20 - (src6+src9)*5 + (src5+src10));d+=dstStride;\
-       if (h>8) { \
-        int src11,src12,src13,src14,src15,src16,src17,src18; \
-        src11 = *s; s+=srcStride;\
-        OP(*d , (src8 +src9 )*20 - (src7 +src10)*5 + (src6 +src11));d+=dstStride;\
-        src12 = *s; s+=srcStride;\
-        OP(*d , (src9 +src10)*20 - (src8 +src11)*5 + (src7 +src12));d+=dstStride;\
-        src13 = *s; s+=srcStride;\
-        OP(*d, (src10+src11)*20 - (src9 +src12)*5 + (src8 +src13));d+=dstStride;\
-        src14 = *s; s+=srcStride;\
-        OP(*d, (src11+src12)*20 - (src10+src13)*5 + (src9 +src14));d+=dstStride;\
-        src15 = *s; s+=srcStride;\
-        OP(*d, (src12+src13)*20 - (src11+src14)*5 + (src10+src15));d+=dstStride;\
-        src16 = *s; s+=srcStride;\
-        OP(*d, (src13+src14)*20 - (src12+src15)*5 + (src11+src16));d+=dstStride;\
-        src17 = *s; s+=srcStride;\
-        OP(*d, (src14+src15)*20 - (src13+src16)*5 + (src12+src17));d+=dstStride;\
-        src18 = *s; s+=srcStride;\
-        OP(*d, (src15+src16)*20 - (src14+src17)*5 + (src13+src18));d+=dstStride;\
-       } \
-      } \
-        dst++;\
-        src++;\
-    }while(--w);\
-}\
-\
-static inline void OPNAME ## h264_qpel_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride,int w,int h){\
-    uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
-    int i;\
-    src -= 2*srcStride;\
-    i= h+5; \
-    do {\
-        int srcB,srcA,src0,src1,src2,src3,src4,src5,src6;\
-        uint8_t *s = src-2;\
-        srcB = *s++;\
-        srcA = *s++;\
-        src0 = *s++;\
-        src1 = *s++;\
-        src2 = *s++;\
-        src3 = *s++;\
-        tmp[0] = ((src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
-        src4 = *s++;\
-        tmp[1] = ((src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
-        src5 = *s++;\
-        tmp[2] = ((src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
-        src6 = *s++;\
-        tmp[3] = ((src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
-      if (w>4) { /* it optimized */ \
-        int src7,src8,src9,src10; \
-        src7 = *s++;\
-        tmp[4] = ((src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
-        src8 = *s++;\
-        tmp[5] = ((src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
-        src9 = *s++;\
-        tmp[6] = ((src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
-        src10 = *s++;\
-        tmp[7] = ((src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
-       if (w>8) { \
-        int src11,src12,src13,src14,src15,src16,src17,src18; \
-        src11 = *s++;\
-        tmp[8] = ((src8 +src9 )*20 - (src7 +src10)*5 + (src6 +src11));\
-        src12 = *s++;\
-        tmp[9] = ((src9 +src10)*20 - (src8 +src11)*5 + (src7 +src12));\
-        src13 = *s++;\
-        tmp[10] = ((src10+src11)*20 - (src9 +src12)*5 + (src8 +src13));\
-        src14 = *s++;\
-        tmp[11] = ((src11+src12)*20 - (src10+src13)*5 + (src9 +src14));\
-        src15 = *s++;\
-        tmp[12] = ((src12+src13)*20 - (src11+src14)*5 + (src10+src15));\
-        src16 = *s++;\
-        tmp[13] = ((src13+src14)*20 - (src12+src15)*5 + (src11+src16));\
-        src17 = *s++;\
-        tmp[14] = ((src14+src15)*20 - (src13+src16)*5 + (src12+src17));\
-        src18 = *s++;\
-        tmp[15] = ((src15+src16)*20 - (src14+src17)*5 + (src13+src18));\
-       } \
-      } \
-        tmp+=tmpStride;\
-        src+=srcStride;\
-    }while(--i);\
-    tmp -= tmpStride*(h+5-2);\
-    i = w; \
-    do {\
-        int tmpB,tmpA,tmp0,tmp1,tmp2,tmp3,tmp4,tmp5,tmp6;\
-        int16_t *s = tmp-2*tmpStride; \
-        uint8_t *d=dst;\
-        tmpB = *s; s+=tmpStride;\
-        tmpA = *s; s+=tmpStride;\
-        tmp0 = *s; s+=tmpStride;\
-        tmp1 = *s; s+=tmpStride;\
-        tmp2 = *s; s+=tmpStride;\
-        tmp3 = *s; s+=tmpStride;\
-        OP2(*d, (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));d+=dstStride;\
-        tmp4 = *s; s+=tmpStride;\
-        OP2(*d, (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));d+=dstStride;\
-        tmp5 = *s; s+=tmpStride;\
-        OP2(*d, (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));d+=dstStride;\
-        tmp6 = *s; s+=tmpStride;\
-        OP2(*d, (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));d+=dstStride;\
-      if (h>4) { \
-        int tmp7,tmp8,tmp9,tmp10; \
-        tmp7 = *s; s+=tmpStride;\
-        OP2(*d, (tmp4+tmp5)*20 - (tmp3+tmp6)*5 + (tmp2+tmp7));d+=dstStride;\
-        tmp8 = *s; s+=tmpStride;\
-        OP2(*d, (tmp5+tmp6)*20 - (tmp4+tmp7)*5 + (tmp3+tmp8));d+=dstStride;\
-        tmp9 = *s; s+=tmpStride;\
-        OP2(*d, (tmp6+tmp7)*20 - (tmp5+tmp8)*5 + (tmp4+tmp9));d+=dstStride;\
-        tmp10 = *s; s+=tmpStride;\
-        OP2(*d, (tmp7+tmp8)*20 - (tmp6+tmp9)*5 + (tmp5+tmp10));d+=dstStride;\
-       if (h>8) { \
-        int tmp11,tmp12,tmp13,tmp14,tmp15,tmp16,tmp17,tmp18; \
-        tmp11 = *s; s+=tmpStride;\
-        OP2(*d , (tmp8 +tmp9 )*20 - (tmp7 +tmp10)*5 + (tmp6 +tmp11));d+=dstStride;\
-        tmp12 = *s; s+=tmpStride;\
-        OP2(*d , (tmp9 +tmp10)*20 - (tmp8 +tmp11)*5 + (tmp7 +tmp12));d+=dstStride;\
-        tmp13 = *s; s+=tmpStride;\
-        OP2(*d, (tmp10+tmp11)*20 - (tmp9 +tmp12)*5 + (tmp8 +tmp13));d+=dstStride;\
-        tmp14 = *s; s+=tmpStride;\
-        OP2(*d, (tmp11+tmp12)*20 - (tmp10+tmp13)*5 + (tmp9 +tmp14));d+=dstStride;\
-        tmp15 = *s; s+=tmpStride;\
-        OP2(*d, (tmp12+tmp13)*20 - (tmp11+tmp14)*5 + (tmp10+tmp15));d+=dstStride;\
-        tmp16 = *s; s+=tmpStride;\
-        OP2(*d, (tmp13+tmp14)*20 - (tmp12+tmp15)*5 + (tmp11+tmp16));d+=dstStride;\
-        tmp17 = *s; s+=tmpStride;\
-        OP2(*d, (tmp14+tmp15)*20 - (tmp13+tmp16)*5 + (tmp12+tmp17));d+=dstStride;\
-        tmp18 = *s; s+=tmpStride;\
-        OP2(*d, (tmp15+tmp16)*20 - (tmp14+tmp17)*5 + (tmp13+tmp18));d+=dstStride;\
-       } \
-      } \
-        dst++;\
-        tmp++;\
-    }while(--i);\
-}\
-\
-static void OPNAME ## h264_qpel4_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
-    OPNAME ## h264_qpel_h_lowpass(dst,src,dstStride,srcStride,4,4); \
-}\
-static void OPNAME ## h264_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
-   OPNAME ## h264_qpel_h_lowpass(dst,src,dstStride,srcStride,8,8); \
-}\
-static void OPNAME ## h264_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
-   OPNAME ## h264_qpel_h_lowpass(dst,src,dstStride,srcStride,16,16); \
-}\
-\
-static void OPNAME ## h264_qpel4_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
-   OPNAME ## h264_qpel_v_lowpass(dst,src,dstStride,srcStride,4,4); \
-}\
-static void OPNAME ## h264_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
-   OPNAME ## h264_qpel_v_lowpass(dst,src,dstStride,srcStride,8,8); \
-}\
-static void OPNAME ## h264_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
-   OPNAME ## h264_qpel_v_lowpass(dst,src,dstStride,srcStride,16,16); \
-}\
-static void OPNAME ## h264_qpel4_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
-   OPNAME ## h264_qpel_hv_lowpass(dst,tmp,src,dstStride,tmpStride,srcStride,4,4); \
-}\
-static void OPNAME ## h264_qpel8_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
-   OPNAME ## h264_qpel_hv_lowpass(dst,tmp,src,dstStride,tmpStride,srcStride,8,8); \
-}\
-static void OPNAME ## h264_qpel16_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
-   OPNAME ## h264_qpel_hv_lowpass(dst,tmp,src,dstStride,tmpStride,srcStride,16,16); \
-}\
-
-#define H264_MC(OPNAME, SIZE) \
-static void OPNAME ## h264_qpel ## SIZE ## _mc00_sh4 (uint8_t *dst, uint8_t *src, int stride){\
-    OPNAME ## pixels ## SIZE ## _c(dst, src, stride, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc10_sh4(uint8_t *dst, uint8_t *src, int stride){\
-    uint8_t half[SIZE*SIZE];\
-    put_h264_qpel ## SIZE ## _h_lowpass(half, src, SIZE, stride);\
-    OPNAME ## pixels ## SIZE ## _l2_aligned2(dst, src, half, stride, stride, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc20_sh4(uint8_t *dst, uint8_t *src, int stride){\
-    OPNAME ## h264_qpel ## SIZE ## _h_lowpass(dst, src, stride, stride);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc30_sh4(uint8_t *dst, uint8_t *src, int stride){\
-    uint8_t half[SIZE*SIZE];\
-    put_h264_qpel ## SIZE ## _h_lowpass(half, src, SIZE, stride);\
-    OPNAME ## pixels ## SIZE ## _l2_aligned2(dst, src+1, half, stride, stride, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc01_sh4(uint8_t *dst, uint8_t *src, int stride){\
-    uint8_t full[SIZE*(SIZE+5)];\
-    uint8_t * const full_mid= full + SIZE*2;\
-    uint8_t half[SIZE*SIZE];\
-    copy_block ## SIZE (full, src - stride*2, SIZE,  stride, SIZE + 5);\
-    put_h264_qpel ## SIZE ## _v_lowpass(half, full_mid, SIZE, SIZE);\
-    OPNAME ## pixels ## SIZE ## _l2_aligned(dst, full_mid, half, stride, SIZE, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc02_sh4(uint8_t *dst, uint8_t *src, int stride){\
-    uint8_t full[SIZE*(SIZE+5)];\
-    uint8_t * const full_mid= full + SIZE*2;\
-    copy_block ## SIZE (full, src - stride*2, SIZE,  stride, SIZE + 5);\
-    OPNAME ## h264_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc03_sh4(uint8_t *dst, uint8_t *src, int stride){\
-    uint8_t full[SIZE*(SIZE+5)];\
-    uint8_t * const full_mid= full + SIZE*2;\
-    uint8_t half[SIZE*SIZE];\
-    copy_block ## SIZE (full, src - stride*2, SIZE,  stride, SIZE + 5);\
-    put_h264_qpel ## SIZE ## _v_lowpass(half, full_mid, SIZE, SIZE);\
-    OPNAME ## pixels ## SIZE ## _l2_aligned(dst, full_mid+SIZE, half, stride, SIZE, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc11_sh4(uint8_t *dst, uint8_t *src, int stride){\
-    uint8_t full[SIZE*(SIZE+5)];\
-    uint8_t * const full_mid= full + SIZE*2;\
-    uint8_t halfH[SIZE*SIZE];\
-    uint8_t halfV[SIZE*SIZE];\
-    put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
-    copy_block ## SIZE (full, src - stride*2, SIZE,  stride, SIZE + 5);\
-    put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
-    OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc31_sh4(uint8_t *dst, uint8_t *src, int stride){\
-    uint8_t full[SIZE*(SIZE+5)];\
-    uint8_t * const full_mid= full + SIZE*2;\
-    uint8_t halfH[SIZE*SIZE];\
-    uint8_t halfV[SIZE*SIZE];\
-    put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
-    copy_block ## SIZE (full, src - stride*2 + 1, SIZE,  stride, SIZE + 5);\
-    put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
-    OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc13_sh4(uint8_t *dst, uint8_t *src, int stride){\
-    uint8_t full[SIZE*(SIZE+5)];\
-    uint8_t * const full_mid= full + SIZE*2;\
-    uint8_t halfH[SIZE*SIZE];\
-    uint8_t halfV[SIZE*SIZE];\
-    put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
-    copy_block ## SIZE (full, src - stride*2, SIZE,  stride, SIZE + 5);\
-    put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
-    OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc33_sh4(uint8_t *dst, uint8_t *src, int stride){\
-    uint8_t full[SIZE*(SIZE+5)];\
-    uint8_t * const full_mid= full + SIZE*2;\
-    uint8_t halfH[SIZE*SIZE];\
-    uint8_t halfV[SIZE*SIZE];\
-    put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
-    copy_block ## SIZE (full, src - stride*2 + 1, SIZE,  stride, SIZE + 5);\
-    put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
-    OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc22_sh4(uint8_t *dst, uint8_t *src, int stride){\
-    int16_t tmp[SIZE*(SIZE+5)];\
-    OPNAME ## h264_qpel ## SIZE ## _hv_lowpass(dst, tmp, src, stride, SIZE, stride);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc21_sh4(uint8_t *dst, uint8_t *src, int stride){\
-    int16_t tmp[SIZE*(SIZE+5)];\
-    uint8_t halfH[SIZE*SIZE];\
-    uint8_t halfHV[SIZE*SIZE];\
-    put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
-    put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
-    OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfHV, stride, SIZE, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc23_sh4(uint8_t *dst, uint8_t *src, int stride){\
-    int16_t tmp[SIZE*(SIZE+5)];\
-    uint8_t halfH[SIZE*SIZE];\
-    uint8_t halfHV[SIZE*SIZE];\
-    put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
-    put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
-    OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfHV, stride, SIZE, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc12_sh4(uint8_t *dst, uint8_t *src, int stride){\
-    uint8_t full[SIZE*(SIZE+5)];\
-    uint8_t * const full_mid= full + SIZE*2;\
-    int16_t tmp[SIZE*(SIZE+5)];\
-    uint8_t halfV[SIZE*SIZE];\
-    uint8_t halfHV[SIZE*SIZE];\
-    copy_block ## SIZE (full, src - stride*2, SIZE,  stride, SIZE + 5);\
-    put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
-    put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
-    OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfV, halfHV, stride, SIZE, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc32_sh4(uint8_t *dst, uint8_t *src, int stride){\
-    uint8_t full[SIZE*(SIZE+5)];\
-    uint8_t * const full_mid= full + SIZE*2;\
-    int16_t tmp[SIZE*(SIZE+5)];\
-    uint8_t halfV[SIZE*SIZE];\
-    uint8_t halfHV[SIZE*SIZE];\
-    copy_block ## SIZE (full, src - stride*2 + 1, SIZE,  stride, SIZE + 5);\
-    put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
-    put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
-    OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfV, halfHV, stride, SIZE, SIZE, SIZE);\
-}\
-
-#define op_avg(a, b)  a = (((a)+cm[((b) + 16)>>5]+1)>>1)
-//#define op_avg2(a, b) a = (((a)*w1+cm[((b) + 16)>>5]*w2 + o + 64)>>7)
-#define op_put(a, b)  a = cm[((b) + 16)>>5]
-#define op2_avg(a, b)  a = (((a)+cm[((b) + 512)>>10]+1)>>1)
-#define op2_put(a, b)  a = cm[((b) + 512)>>10]
-
-H264_LOWPASS(put_       , op_put, op2_put)
-H264_LOWPASS(avg_       , op_avg, op2_avg)
-H264_MC(put_, 4)
-H264_MC(put_, 8)
-H264_MC(put_, 16)
-H264_MC(avg_, 4)
-H264_MC(avg_, 8)
-H264_MC(avg_, 16)
-
-#undef op_avg
-#undef op_put
-#undef op2_avg
-#undef op2_put
-
 static void wmv2_mspel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){
     uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
 
index 0bade86..9b8b653 100644 (file)
@@ -7,6 +7,7 @@ OBJS-$(CONFIG_DNXHD_ENCODER)           += x86/dnxhdenc.o
 OBJS-$(CONFIG_FFT)                     += x86/fft_init.o
 OBJS-$(CONFIG_H264DSP)                 += x86/h264dsp_init.o
 OBJS-$(CONFIG_H264PRED)                += x86/h264_intrapred_init.o
+OBJS-$(CONFIG_H264QPEL)                += x86/h264_qpel.o
 OBJS-$(CONFIG_LPC)                     += x86/lpc.o
 OBJS-$(CONFIG_MLP_DECODER)             += x86/mlpdsp.o
 OBJS-$(CONFIG_MPEGAUDIODSP)            += x86/mpegaudiodec.o
index 2521b9f..f9da04f 100644 (file)
@@ -1690,7 +1690,10 @@ static void gmc_mmx(uint8_t *dst, uint8_t *src,
 }
 #endif /* HAVE_INLINE_ASM */
 
-#include "h264_qpel.c"
+void ff_put_pixels16_sse2(uint8_t *block, const uint8_t *pixels,
+                          int line_size, int h);
+void ff_avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels,
+                          int line_size, int h);
 
 void ff_put_h264_chroma_mc8_rnd_mmx  (uint8_t *dst, uint8_t *src,
                                       int stride, int h, int x, int y);
@@ -1882,22 +1885,6 @@ void ff_vector_clip_int32_sse4    (int32_t *dst, const int32_t *src,
         c->PFX ## _pixels_tab IDX [3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU; \
     } while (0)
 
-#define H264_QPEL_FUNCS(x, y, CPU)                                                            \
-    do {                                                                                      \
-        c->put_h264_qpel_pixels_tab[0][x + y * 4] = put_h264_qpel16_mc ## x ## y ## _ ## CPU; \
-        c->put_h264_qpel_pixels_tab[1][x + y * 4] = put_h264_qpel8_mc  ## x ## y ## _ ## CPU; \
-        c->avg_h264_qpel_pixels_tab[0][x + y * 4] = avg_h264_qpel16_mc ## x ## y ## _ ## CPU; \
-        c->avg_h264_qpel_pixels_tab[1][x + y * 4] = avg_h264_qpel8_mc  ## x ## y ## _ ## CPU; \
-    } while (0)
-
-#define H264_QPEL_FUNCS_10(x, y, CPU)                                                               \
-    do {                                                                                            \
-        c->put_h264_qpel_pixels_tab[0][x + y * 4] = ff_put_h264_qpel16_mc ## x ## y ## _10_ ## CPU; \
-        c->put_h264_qpel_pixels_tab[1][x + y * 4] = ff_put_h264_qpel8_mc  ## x ## y ## _10_ ## CPU; \
-        c->avg_h264_qpel_pixels_tab[0][x + y * 4] = ff_avg_h264_qpel16_mc ## x ## y ## _10_ ## CPU; \
-        c->avg_h264_qpel_pixels_tab[1][x + y * 4] = ff_avg_h264_qpel8_mc  ## x ## y ## _10_ ## CPU; \
-    } while (0)
-
 static void dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx, int mm_flags)
 {
     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
@@ -2014,26 +2001,6 @@ static void dsputil_init_mmxext(DSPContext *c, AVCodecContext *avctx,
 #endif /* HAVE_INLINE_ASM */
 
 #if HAVE_MMXEXT_EXTERNAL
-    if (CONFIG_H264QPEL) {
-        if (!high_bit_depth) {
-            SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmxext, );
-            SET_QPEL_FUNCS(put_h264_qpel, 1,  8, mmxext, );
-            SET_QPEL_FUNCS(put_h264_qpel, 2,  4, mmxext, );
-            SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmxext, );
-            SET_QPEL_FUNCS(avg_h264_qpel, 1,  8, mmxext, );
-            SET_QPEL_FUNCS(avg_h264_qpel, 2,  4, mmxext, );
-        } else if (bit_depth == 10) {
-#if !ARCH_X86_64
-            SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_mmxext, ff_);
-            SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_mmxext, ff_);
-            SET_QPEL_FUNCS(put_h264_qpel, 1,  8, 10_mmxext, ff_);
-            SET_QPEL_FUNCS(avg_h264_qpel, 1,  8, 10_mmxext, ff_);
-#endif
-            SET_QPEL_FUNCS(put_h264_qpel, 2, 4,  10_mmxext, ff_);
-            SET_QPEL_FUNCS(avg_h264_qpel, 2, 4,  10_mmxext, ff_);
-        }
-    }
-
     if (!high_bit_depth && CONFIG_H264CHROMA) {
         c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_rnd_mmxext;
         c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_mmxext;
@@ -2148,36 +2115,10 @@ static void dsputil_init_sse2(DSPContext *c, AVCodecContext *avctx,
             c->put_pixels_tab[0][0]        = ff_put_pixels16_sse2;
             c->put_no_rnd_pixels_tab[0][0] = ff_put_pixels16_sse2;
             c->avg_pixels_tab[0][0]        = ff_avg_pixels16_sse2;
-            if (CONFIG_H264QPEL)
-                H264_QPEL_FUNCS(0, 0, sse2);
         }
     }
 
-    if (!high_bit_depth && CONFIG_H264QPEL) {
-        H264_QPEL_FUNCS(0, 1, sse2);
-        H264_QPEL_FUNCS(0, 2, sse2);
-        H264_QPEL_FUNCS(0, 3, sse2);
-        H264_QPEL_FUNCS(1, 1, sse2);
-        H264_QPEL_FUNCS(1, 2, sse2);
-        H264_QPEL_FUNCS(1, 3, sse2);
-        H264_QPEL_FUNCS(2, 1, sse2);
-        H264_QPEL_FUNCS(2, 2, sse2);
-        H264_QPEL_FUNCS(2, 3, sse2);
-        H264_QPEL_FUNCS(3, 1, sse2);
-        H264_QPEL_FUNCS(3, 2, sse2);
-        H264_QPEL_FUNCS(3, 3, sse2);
-    }
-
     if (bit_depth == 10) {
-        if (CONFIG_H264QPEL) {
-            SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_sse2, ff_);
-            SET_QPEL_FUNCS(put_h264_qpel, 1,  8, 10_sse2, ff_);
-            SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_sse2, ff_);
-            SET_QPEL_FUNCS(avg_h264_qpel, 1,  8, 10_sse2, ff_);
-            H264_QPEL_FUNCS_10(1, 0, sse2_cache64);
-            H264_QPEL_FUNCS_10(2, 0, sse2_cache64);
-            H264_QPEL_FUNCS_10(3, 0, sse2_cache64);
-        }
         if (CONFIG_H264CHROMA) {
             c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_10_sse2;
             c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_10_sse2;
@@ -2205,27 +2146,7 @@ static void dsputil_init_ssse3(DSPContext *c, AVCodecContext *avctx,
 {
 #if HAVE_SSSE3_EXTERNAL
     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
-    const int bit_depth      = avctx->bits_per_raw_sample;
 
-    if (!high_bit_depth && CONFIG_H264QPEL) {
-        H264_QPEL_FUNCS(1, 0, ssse3);
-        H264_QPEL_FUNCS(1, 1, ssse3);
-        H264_QPEL_FUNCS(1, 2, ssse3);
-        H264_QPEL_FUNCS(1, 3, ssse3);
-        H264_QPEL_FUNCS(2, 0, ssse3);
-        H264_QPEL_FUNCS(2, 1, ssse3);
-        H264_QPEL_FUNCS(2, 2, ssse3);
-        H264_QPEL_FUNCS(2, 3, ssse3);
-        H264_QPEL_FUNCS(3, 0, ssse3);
-        H264_QPEL_FUNCS(3, 1, ssse3);
-        H264_QPEL_FUNCS(3, 2, ssse3);
-        H264_QPEL_FUNCS(3, 3, ssse3);
-    }
-    if (bit_depth == 10 && CONFIG_H264QPEL) {
-        H264_QPEL_FUNCS_10(1, 0, ssse3_cache64);
-        H264_QPEL_FUNCS_10(2, 0, ssse3_cache64);
-        H264_QPEL_FUNCS_10(3, 0, ssse3_cache64);
-    }
     if (!high_bit_depth && CONFIG_H264CHROMA) {
         c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_rnd_ssse3;
         c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_rnd_ssse3;
@@ -2262,12 +2183,6 @@ static void dsputil_init_avx(DSPContext *c, AVCodecContext *avctx, int mm_flags)
     if (bit_depth == 10) {
         // AVX implies !cache64.
         // TODO: Port cache(32|64) detection from x264.
-        if (CONFIG_H264QPEL) {
-            H264_QPEL_FUNCS_10(1, 0, sse2);
-            H264_QPEL_FUNCS_10(2, 0, sse2);
-            H264_QPEL_FUNCS_10(3, 0, sse2);
-        }
-
         if (CONFIG_H264CHROMA) {
             c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_10_avx;
             c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_10_avx;
index bc56d09..bebf5a5 100644 (file)
@@ -22,6 +22,7 @@
 #include "libavutil/cpu.h"
 #include "libavutil/x86/asm.h"
 #include "libavcodec/dsputil.h"
+#include "libavcodec/h264qpel.h"
 #include "libavcodec/mpegvideo.h"
 #include "dsputil_mmx.h"
 
@@ -490,3 +491,128 @@ QPEL16(mmxext)
 #endif
 
 #endif /* HAVE_YASM */
+
+#define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX)                          \
+    do {                                                                     \
+    c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \
+    c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \
+    c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \
+    c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \
+    c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \
+    c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \
+    c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \
+    c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \
+    c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \
+    c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \
+    c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \
+    c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \
+    c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \
+    c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \
+    c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \
+    c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU; \
+    } while (0)
+
+#define H264_QPEL_FUNCS(x, y, CPU)                                                            \
+    do {                                                                                      \
+        c->put_h264_qpel_pixels_tab[0][x + y * 4] = put_h264_qpel16_mc ## x ## y ## _ ## CPU; \
+        c->put_h264_qpel_pixels_tab[1][x + y * 4] = put_h264_qpel8_mc  ## x ## y ## _ ## CPU; \
+        c->avg_h264_qpel_pixels_tab[0][x + y * 4] = avg_h264_qpel16_mc ## x ## y ## _ ## CPU; \
+        c->avg_h264_qpel_pixels_tab[1][x + y * 4] = avg_h264_qpel8_mc  ## x ## y ## _ ## CPU; \
+    } while (0)
+
+#define H264_QPEL_FUNCS_10(x, y, CPU)                                                               \
+    do {                                                                                            \
+        c->put_h264_qpel_pixels_tab[0][x + y * 4] = ff_put_h264_qpel16_mc ## x ## y ## _10_ ## CPU; \
+        c->put_h264_qpel_pixels_tab[1][x + y * 4] = ff_put_h264_qpel8_mc  ## x ## y ## _10_ ## CPU; \
+        c->avg_h264_qpel_pixels_tab[0][x + y * 4] = ff_avg_h264_qpel16_mc ## x ## y ## _10_ ## CPU; \
+        c->avg_h264_qpel_pixels_tab[1][x + y * 4] = ff_avg_h264_qpel8_mc  ## x ## y ## _10_ ## CPU; \
+    } while (0)
+
+void ff_h264qpel_init_x86(H264QpelContext *c, int bit_depth)
+{
+    int high_bit_depth = bit_depth > 8;
+    int mm_flags = av_get_cpu_flags();
+
+#if HAVE_MMXEXT_EXTERNAL
+    if (!high_bit_depth) {
+        SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmxext, );
+        SET_QPEL_FUNCS(put_h264_qpel, 1,  8, mmxext, );
+        SET_QPEL_FUNCS(put_h264_qpel, 2,  4, mmxext, );
+        SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmxext, );
+        SET_QPEL_FUNCS(avg_h264_qpel, 1,  8, mmxext, );
+        SET_QPEL_FUNCS(avg_h264_qpel, 2,  4, mmxext, );
+    } else if (bit_depth == 10) {
+#if !ARCH_X86_64
+        SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_mmxext, ff_);
+        SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_mmxext, ff_);
+        SET_QPEL_FUNCS(put_h264_qpel, 1,  8, 10_mmxext, ff_);
+        SET_QPEL_FUNCS(avg_h264_qpel, 1,  8, 10_mmxext, ff_);
+#endif
+        SET_QPEL_FUNCS(put_h264_qpel, 2, 4,  10_mmxext, ff_);
+        SET_QPEL_FUNCS(avg_h264_qpel, 2, 4,  10_mmxext, ff_);
+    }
+#endif
+
+#if HAVE_SSE2_EXTERNAL
+    if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW) && !high_bit_depth) {
+        // these functions are slower than mmx on AMD, but faster on Intel
+        H264_QPEL_FUNCS(0, 0, sse2);
+    }
+
+    if (!high_bit_depth) {
+        H264_QPEL_FUNCS(0, 1, sse2);
+        H264_QPEL_FUNCS(0, 2, sse2);
+        H264_QPEL_FUNCS(0, 3, sse2);
+        H264_QPEL_FUNCS(1, 1, sse2);
+        H264_QPEL_FUNCS(1, 2, sse2);
+        H264_QPEL_FUNCS(1, 3, sse2);
+        H264_QPEL_FUNCS(2, 1, sse2);
+        H264_QPEL_FUNCS(2, 2, sse2);
+        H264_QPEL_FUNCS(2, 3, sse2);
+        H264_QPEL_FUNCS(3, 1, sse2);
+        H264_QPEL_FUNCS(3, 2, sse2);
+        H264_QPEL_FUNCS(3, 3, sse2);
+    }
+
+    if (bit_depth == 10) {
+        SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_sse2, ff_);
+        SET_QPEL_FUNCS(put_h264_qpel, 1,  8, 10_sse2, ff_);
+        SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_sse2, ff_);
+        SET_QPEL_FUNCS(avg_h264_qpel, 1,  8, 10_sse2, ff_);
+        H264_QPEL_FUNCS_10(1, 0, sse2_cache64);
+        H264_QPEL_FUNCS_10(2, 0, sse2_cache64);
+        H264_QPEL_FUNCS_10(3, 0, sse2_cache64);
+    }
+#endif
+
+#if HAVE_SSSE3_EXTERNAL
+    if (!high_bit_depth) {
+        H264_QPEL_FUNCS(1, 0, ssse3);
+        H264_QPEL_FUNCS(1, 1, ssse3);
+        H264_QPEL_FUNCS(1, 2, ssse3);
+        H264_QPEL_FUNCS(1, 3, ssse3);
+        H264_QPEL_FUNCS(2, 0, ssse3);
+        H264_QPEL_FUNCS(2, 1, ssse3);
+        H264_QPEL_FUNCS(2, 2, ssse3);
+        H264_QPEL_FUNCS(2, 3, ssse3);
+        H264_QPEL_FUNCS(3, 0, ssse3);
+        H264_QPEL_FUNCS(3, 1, ssse3);
+        H264_QPEL_FUNCS(3, 2, ssse3);
+        H264_QPEL_FUNCS(3, 3, ssse3);
+    }
+
+    if (bit_depth == 10) {
+        H264_QPEL_FUNCS_10(1, 0, ssse3_cache64);
+        H264_QPEL_FUNCS_10(2, 0, ssse3_cache64);
+        H264_QPEL_FUNCS_10(3, 0, ssse3_cache64);
+    }
+#endif
+
+#if HAVE_AVX_EXTERNAL
+    if (bit_depth == 10) {
+        H264_QPEL_FUNCS_10(1, 0, sse2);
+        H264_QPEL_FUNCS_10(2, 0, sse2);
+        H264_QPEL_FUNCS_10(3, 0, sse2);
+    }
+#endif
+}