WRAPPER8_16_SQ(rd8x8_c, rd16_c)
WRAPPER8_16_SQ(bit8x8_c, bit16_c)
- static void vector_fmul_reverse_c(float *dst, const float *src0, const float *src1, int len){
- int i;
- src1 += len-1;
- for(i=0; i<len; i++)
- dst[i] = src0[i] * src1[-i];
- }
-
-static void butterflies_float_c(float *restrict v1, float *restrict v2,
+static void butterflies_float_c(float *av_restrict v1, float *av_restrict v2,
int len)
{
int i;
#if HAVE_ALTIVEC
if(CONFIG_H264_DECODER) ff_dsputil_h264_init_ppc(c, avctx);
- if (av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC) {
+ if (mm_flags & AV_CPU_FLAG_ALTIVEC) {
ff_dsputil_init_altivec(c, avctx);
- ff_float_init_altivec(c, avctx);
ff_int_init_altivec(c, avctx);
c->gmc1 = ff_gmc1_altivec;
INIT_XMM sse
VECTOR_FMUL_ADD
+%if HAVE_AVX_EXTERNAL
INIT_YMM avx
VECTOR_FMUL_ADD
+%endif
+
+ ;-----------------------------------------------------------------------------
+ ; void vector_fmul_reverse(float *dst, const float *src0, const float *src1,
+ ; int len)
+ ;-----------------------------------------------------------------------------
+ %macro VECTOR_FMUL_REVERSE 0
+ cglobal vector_fmul_reverse, 4,4,2, dst, src0, src1, len
+ lea lenq, [lend*4 - 2*mmsize]
+ ALIGN 16
+ .loop:
+ %if cpuflag(avx)
+ vmovaps xmm0, [src1q + 16]
+ vinsertf128 m0, m0, [src1q], 1
+ vshufps m0, m0, m0, q0123
+ vmovaps xmm1, [src1q + mmsize + 16]
+ vinsertf128 m1, m1, [src1q + mmsize], 1
+ vshufps m1, m1, m1, q0123
+ %else
+ mova m0, [src1q]
+ mova m1, [src1q + mmsize]
+ shufps m0, m0, q0123
+ shufps m1, m1, q0123
+ %endif
+ mulps m0, m0, [src0q + lenq + mmsize]
+ mulps m1, m1, [src0q + lenq]
+ mova [dstq + lenq + mmsize], m0
+ mova [dstq + lenq], m1
+ add src1q, 2*mmsize
+ sub lenq, 2*mmsize
+ jge .loop
+ REP_RET
+ %endmacro
+
+ INIT_XMM sse
+ VECTOR_FMUL_REVERSE
++%if HAVE_AVX_EXTERNAL
+ INIT_YMM avx
+ VECTOR_FMUL_REVERSE
++%endif