d2573dd2749d8691048752f57a012a3b2b1a1012
[ffmpeg.git] / libavcodec / x86 / mpegaudiodec.c
1 /*
2  * MMX optimized MP3 decoding functions
3  * Copyright (c) 2010 Vitor Sessak
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "libavutil/cpu.h"
23 #include "libavutil/x86/asm.h"
24 #include "libavcodec/dsputil.h"
25 #include "libavcodec/mpegaudiodsp.h"
26
27 void ff_imdct36_float_sse(float *out, float *buf, float *in, float *win);
28 void ff_imdct36_float_sse2(float *out, float *buf, float *in, float *win);
29 void ff_imdct36_float_sse3(float *out, float *buf, float *in, float *win);
30 void ff_imdct36_float_ssse3(float *out, float *buf, float *in, float *win);
31 void ff_imdct36_float_avx(float *out, float *buf, float *in, float *win);
32 void ff_four_imdct36_float_sse(float *out, float *buf, float *in, float *win,
33                                float *tmpbuf);
34 void ff_four_imdct36_float_avx(float *out, float *buf, float *in, float *win,
35                                float *tmpbuf);
36
37 DECLARE_ALIGNED(16, static float, mdct_win_sse)[2][4][4*40];
38
39 #if HAVE_INLINE_ASM
40
41 #define MACS(rt, ra, rb) rt+=(ra)*(rb)
42 #define MLSS(rt, ra, rb) rt-=(ra)*(rb)
43
44 #define SUM8(op, sum, w, p)               \
45 {                                         \
46     op(sum, (w)[0 * 64], (p)[0 * 64]);    \
47     op(sum, (w)[1 * 64], (p)[1 * 64]);    \
48     op(sum, (w)[2 * 64], (p)[2 * 64]);    \
49     op(sum, (w)[3 * 64], (p)[3 * 64]);    \
50     op(sum, (w)[4 * 64], (p)[4 * 64]);    \
51     op(sum, (w)[5 * 64], (p)[5 * 64]);    \
52     op(sum, (w)[6 * 64], (p)[6 * 64]);    \
53     op(sum, (w)[7 * 64], (p)[7 * 64]);    \
54 }
55
56 static void apply_window(const float *buf, const float *win1,
57                          const float *win2, float *sum1, float *sum2, int len)
58 {
59     x86_reg count = - 4*len;
60     const float *win1a = win1+len;
61     const float *win2a = win2+len;
62     const float *bufa  = buf+len;
63     float *sum1a = sum1+len;
64     float *sum2a = sum2+len;
65
66
67 #define MULT(a, b)                                 \
68     "movaps " #a "(%1,%0), %%xmm1           \n\t"  \
69     "movaps " #a "(%3,%0), %%xmm2           \n\t"  \
70     "mulps         %%xmm2, %%xmm1           \n\t"  \
71     "subps         %%xmm1, %%xmm0           \n\t"  \
72     "mulps  " #b "(%2,%0), %%xmm2           \n\t"  \
73     "subps         %%xmm2, %%xmm4           \n\t"  \
74
75     __asm__ volatile(
76             "1:                                   \n\t"
77             "xorps       %%xmm0, %%xmm0           \n\t"
78             "xorps       %%xmm4, %%xmm4           \n\t"
79
80             MULT(   0,   0)
81             MULT( 256,  64)
82             MULT( 512, 128)
83             MULT( 768, 192)
84             MULT(1024, 256)
85             MULT(1280, 320)
86             MULT(1536, 384)
87             MULT(1792, 448)
88
89             "movaps      %%xmm0, (%4,%0)          \n\t"
90             "movaps      %%xmm4, (%5,%0)          \n\t"
91             "add            $16,  %0              \n\t"
92             "jl              1b                   \n\t"
93             :"+&r"(count)
94             :"r"(win1a), "r"(win2a), "r"(bufa), "r"(sum1a), "r"(sum2a)
95             );
96
97 #undef MULT
98 }
99
100 static void apply_window_mp3(float *in, float *win, int *unused, float *out,
101                              int incr)
102 {
103     LOCAL_ALIGNED_16(float, suma, [17]);
104     LOCAL_ALIGNED_16(float, sumb, [17]);
105     LOCAL_ALIGNED_16(float, sumc, [17]);
106     LOCAL_ALIGNED_16(float, sumd, [17]);
107
108     float sum;
109
110     /* copy to avoid wrap */
111     __asm__ volatile(
112             "movaps    0(%0), %%xmm0   \n\t" \
113             "movaps   16(%0), %%xmm1   \n\t" \
114             "movaps   32(%0), %%xmm2   \n\t" \
115             "movaps   48(%0), %%xmm3   \n\t" \
116             "movaps   %%xmm0,   0(%1) \n\t" \
117             "movaps   %%xmm1,  16(%1) \n\t" \
118             "movaps   %%xmm2,  32(%1) \n\t" \
119             "movaps   %%xmm3,  48(%1) \n\t" \
120             "movaps   64(%0), %%xmm0   \n\t" \
121             "movaps   80(%0), %%xmm1   \n\t" \
122             "movaps   96(%0), %%xmm2   \n\t" \
123             "movaps  112(%0), %%xmm3   \n\t" \
124             "movaps   %%xmm0,  64(%1) \n\t" \
125             "movaps   %%xmm1,  80(%1) \n\t" \
126             "movaps   %%xmm2,  96(%1) \n\t" \
127             "movaps   %%xmm3, 112(%1) \n\t"
128             ::"r"(in), "r"(in+512)
129             :"memory"
130             );
131
132     apply_window(in + 16, win     , win + 512, suma, sumc, 16);
133     apply_window(in + 32, win + 48, win + 640, sumb, sumd, 16);
134
135     SUM8(MACS, suma[0], win + 32, in + 48);
136
137     sumc[ 0] = 0;
138     sumb[16] = 0;
139     sumd[16] = 0;
140
141 #define SUMS(suma, sumb, sumc, sumd, out1, out2)               \
142             "movups " #sumd "(%4),       %%xmm0          \n\t" \
143             "shufps         $0x1b,       %%xmm0, %%xmm0  \n\t" \
144             "subps  " #suma "(%1),       %%xmm0          \n\t" \
145             "movaps        %%xmm0," #out1 "(%0)          \n\t" \
146 \
147             "movups " #sumc "(%3),       %%xmm0          \n\t" \
148             "shufps         $0x1b,       %%xmm0, %%xmm0  \n\t" \
149             "addps  " #sumb "(%2),       %%xmm0          \n\t" \
150             "movaps        %%xmm0," #out2 "(%0)          \n\t"
151
152     if (incr == 1) {
153         __asm__ volatile(
154             SUMS( 0, 48,  4, 52,  0, 112)
155             SUMS(16, 32, 20, 36, 16,  96)
156             SUMS(32, 16, 36, 20, 32,  80)
157             SUMS(48,  0, 52,  4, 48,  64)
158
159             :"+&r"(out)
160             :"r"(&suma[0]), "r"(&sumb[0]), "r"(&sumc[0]), "r"(&sumd[0])
161             :"memory"
162             );
163         out += 16*incr;
164     } else {
165         int j;
166         float *out2 = out + 32 * incr;
167         out[0  ]  = -suma[   0];
168         out += incr;
169         out2 -= incr;
170         for(j=1;j<16;j++) {
171             *out  = -suma[   j] + sumd[16-j];
172             *out2 =  sumb[16-j] + sumc[   j];
173             out  += incr;
174             out2 -= incr;
175         }
176     }
177
178     sum = 0;
179     SUM8(MLSS, sum, win + 16 + 32, in + 32);
180     *out = sum;
181 }
182
183 #endif /* HAVE_INLINE_ASM */
184
185 #if HAVE_YASM
186 #define DECL_IMDCT_BLOCKS(CPU1, CPU2)                                       \
187 static void imdct36_blocks_ ## CPU1(float *out, float *buf, float *in,      \
188                                int count, int switch_point, int block_type) \
189 {                                                                           \
190     int align_end = count - (count & 3);                                \
191     int j;                                                              \
192     for (j = 0; j < align_end; j+= 4) {                                 \
193         LOCAL_ALIGNED_16(float, tmpbuf, [1024]);                        \
194         float *win = mdct_win_sse[switch_point && j < 4][block_type];   \
195         /* apply window & overlap with previous buffer */               \
196                                                                         \
197         /* select window */                                             \
198         ff_four_imdct36_float_ ## CPU2(out, buf, in, win, tmpbuf);      \
199         in      += 4*18;                                                \
200         buf     += 4*18;                                                \
201         out     += 4;                                                   \
202     }                                                                   \
203     for (; j < count; j++) {                                            \
204         /* apply window & overlap with previous buffer */               \
205                                                                         \
206         /* select window */                                             \
207         int win_idx = (switch_point && j < 2) ? 0 : block_type;         \
208         float *win = ff_mdct_win_float[win_idx + (4 & -(j & 1))];       \
209                                                                         \
210         ff_imdct36_float_ ## CPU1(out, buf, in, win);                   \
211                                                                         \
212         in  += 18;                                                      \
213         buf++;                                                          \
214         out++;                                                          \
215     }                                                                   \
216 }
217
218 DECL_IMDCT_BLOCKS(sse,sse)
219 DECL_IMDCT_BLOCKS(sse2,sse)
220 DECL_IMDCT_BLOCKS(sse3,sse)
221 DECL_IMDCT_BLOCKS(ssse3,sse)
222 DECL_IMDCT_BLOCKS(avx,avx)
223 #endif /* HAVE_YASM */
224
225 void ff_mpadsp_init_mmx(MPADSPContext *s)
226 {
227     int mm_flags = av_get_cpu_flags();
228
229     int i, j;
230     for (j = 0; j < 4; j++) {
231         for (i = 0; i < 40; i ++) {
232             mdct_win_sse[0][j][4*i    ] = ff_mdct_win_float[j    ][i];
233             mdct_win_sse[0][j][4*i + 1] = ff_mdct_win_float[j + 4][i];
234             mdct_win_sse[0][j][4*i + 2] = ff_mdct_win_float[j    ][i];
235             mdct_win_sse[0][j][4*i + 3] = ff_mdct_win_float[j + 4][i];
236             mdct_win_sse[1][j][4*i    ] = ff_mdct_win_float[0    ][i];
237             mdct_win_sse[1][j][4*i + 1] = ff_mdct_win_float[4    ][i];
238             mdct_win_sse[1][j][4*i + 2] = ff_mdct_win_float[j    ][i];
239             mdct_win_sse[1][j][4*i + 3] = ff_mdct_win_float[j + 4][i];
240         }
241     }
242
243 #if HAVE_INLINE_ASM
244     if (mm_flags & AV_CPU_FLAG_SSE2) {
245         s->apply_window_float = apply_window_mp3;
246     }
247 #endif /* HAVE_INLINE_ASM */
248 #if HAVE_YASM
249     if (mm_flags & AV_CPU_FLAG_AVX && HAVE_AVX) {
250         s->imdct36_blocks_float = imdct36_blocks_avx;
251 #if HAVE_SSE
252     } else if (mm_flags & AV_CPU_FLAG_SSSE3) {
253         s->imdct36_blocks_float = imdct36_blocks_ssse3;
254     } else if (mm_flags & AV_CPU_FLAG_SSE3) {
255         s->imdct36_blocks_float = imdct36_blocks_sse3;
256     } else if (mm_flags & AV_CPU_FLAG_SSE2) {
257         s->imdct36_blocks_float = imdct36_blocks_sse2;
258     } else if (mm_flags & AV_CPU_FLAG_SSE) {
259         s->imdct36_blocks_float = imdct36_blocks_sse;
260 #endif /* HAVE_SSE */
261     }
262 #endif /* HAVE_YASM */
263 }