Merge commit '43717469f9daa402f6acb48997255827a56034e9'
[ffmpeg.git] / libavcodec / x86 / ac3dsp_init.c
1 /*
2  * x86-optimized AC-3 DSP functions
3  * Copyright (c) 2011 Justin Ruggles
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "libavutil/attributes.h"
23 #include "libavutil/mem.h"
24 #include "libavutil/x86/asm.h"
25 #include "libavutil/x86/cpu.h"
26 #include "libavcodec/ac3.h"
27 #include "libavcodec/ac3dsp.h"
28
29 void ff_ac3_exponent_min_mmx   (uint8_t *exp, int num_reuse_blocks, int nb_coefs);
30 void ff_ac3_exponent_min_mmxext(uint8_t *exp, int num_reuse_blocks, int nb_coefs);
31 void ff_ac3_exponent_min_sse2  (uint8_t *exp, int num_reuse_blocks, int nb_coefs);
32
33 int ff_ac3_max_msb_abs_int16_mmx  (const int16_t *src, int len);
34 int ff_ac3_max_msb_abs_int16_mmxext(const int16_t *src, int len);
35 int ff_ac3_max_msb_abs_int16_sse2 (const int16_t *src, int len);
36 int ff_ac3_max_msb_abs_int16_ssse3(const int16_t *src, int len);
37
38 void ff_ac3_lshift_int16_mmx (int16_t *src, unsigned int len, unsigned int shift);
39 void ff_ac3_lshift_int16_sse2(int16_t *src, unsigned int len, unsigned int shift);
40
41 void ff_ac3_rshift_int32_mmx (int32_t *src, unsigned int len, unsigned int shift);
42 void ff_ac3_rshift_int32_sse2(int32_t *src, unsigned int len, unsigned int shift);
43
44 void ff_float_to_fixed24_3dnow(int32_t *dst, const float *src, unsigned int len);
45 void ff_float_to_fixed24_sse  (int32_t *dst, const float *src, unsigned int len);
46 void ff_float_to_fixed24_sse2 (int32_t *dst, const float *src, unsigned int len);
47
48 int ff_ac3_compute_mantissa_size_sse2(uint16_t mant_cnt[6][16]);
49
50 void ff_ac3_extract_exponents_sse2 (uint8_t *exp, int32_t *coef, int nb_coefs);
51 void ff_ac3_extract_exponents_ssse3(uint8_t *exp, int32_t *coef, int nb_coefs);
52
53 void ff_apply_window_int16_round_mmxext(int16_t *output, const int16_t *input,
54                                         const int16_t *window, unsigned int len);
55 void ff_apply_window_int16_round_sse2(int16_t *output, const int16_t *input,
56                                       const int16_t *window, unsigned int len);
57 void ff_apply_window_int16_mmxext(int16_t *output, const int16_t *input,
58                                   const int16_t *window, unsigned int len);
59 void ff_apply_window_int16_sse2(int16_t *output, const int16_t *input,
60                                 const int16_t *window, unsigned int len);
61 void ff_apply_window_int16_ssse3(int16_t *output, const int16_t *input,
62                                  const int16_t *window, unsigned int len);
63 void ff_apply_window_int16_ssse3_atom(int16_t *output, const int16_t *input,
64                                       const int16_t *window, unsigned int len);
65
66 #if ARCH_X86_32 && defined(__INTEL_COMPILER)
67 #       undef HAVE_7REGS
68 #       define HAVE_7REGS 0
69 #endif
70
71 #if HAVE_SSE_INLINE && HAVE_7REGS
72
73 #define IF1(x) x
74 #define IF0(x)
75
76 #define MIX5(mono, stereo)                                      \
77     __asm__ volatile (                                          \
78         "movss           0(%1), %%xmm5          \n"             \
79         "movss           4(%1), %%xmm6          \n"             \
80         "movss          12(%1), %%xmm7          \n"             \
81         "shufps     $0, %%xmm5, %%xmm5          \n"             \
82         "shufps     $0, %%xmm6, %%xmm6          \n"             \
83         "shufps     $0, %%xmm7, %%xmm7          \n"             \
84         "1:                                     \n"             \
85         "movaps       (%0, %2), %%xmm0          \n"             \
86         "movaps       (%0, %3), %%xmm1          \n"             \
87         "movaps       (%0, %4), %%xmm2          \n"             \
88         "movaps       (%0, %5), %%xmm3          \n"             \
89         "movaps       (%0, %6), %%xmm4          \n"             \
90         "mulps          %%xmm5, %%xmm0          \n"             \
91         "mulps          %%xmm6, %%xmm1          \n"             \
92         "mulps          %%xmm5, %%xmm2          \n"             \
93         "mulps          %%xmm7, %%xmm3          \n"             \
94         "mulps          %%xmm7, %%xmm4          \n"             \
95  stereo("addps          %%xmm1, %%xmm0          \n")            \
96         "addps          %%xmm1, %%xmm2          \n"             \
97         "addps          %%xmm3, %%xmm0          \n"             \
98         "addps          %%xmm4, %%xmm2          \n"             \
99    mono("addps          %%xmm2, %%xmm0          \n")            \
100         "movaps         %%xmm0, (%0, %2)        \n"             \
101  stereo("movaps         %%xmm2, (%0, %3)        \n")            \
102         "add               $16, %0              \n"             \
103         "jl                 1b                  \n"             \
104         : "+&r"(i)                                              \
105         : "r"(matrix[0]),                                          \
106           "r"(samples[0] + len),                                \
107           "r"(samples[1] + len),                                \
108           "r"(samples[2] + len),                                \
109           "r"(samples[3] + len),                                \
110           "r"(samples[4] + len)                                 \
111         : XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3",      \
112                       "%xmm4", "%xmm5", "%xmm6", "%xmm7",)      \
113          "memory"                                               \
114     );
115
116 #define MIX_MISC(stereo)                                        \
117     __asm__ volatile (                                          \
118         "mov              %5, %2            \n"                 \
119         "1:                                 \n"                 \
120         "mov -%c7(%6, %2, %c8), %3          \n"                 \
121         "movaps     (%3, %0), %%xmm0        \n"                 \
122  stereo("movaps       %%xmm0, %%xmm1        \n")                \
123         "mulps        %%xmm4, %%xmm0        \n"                 \
124  stereo("mulps        %%xmm5, %%xmm1        \n")                \
125         "2:                                 \n"                 \
126         "mov   (%6, %2, %c8), %1            \n"                 \
127         "movaps     (%1, %0), %%xmm2        \n"                 \
128  stereo("movaps       %%xmm2, %%xmm3        \n")                \
129         "mulps   (%4, %2, 8), %%xmm2        \n"                 \
130  stereo("mulps 16(%4, %2, 8), %%xmm3        \n")                \
131         "addps        %%xmm2, %%xmm0        \n"                 \
132  stereo("addps        %%xmm3, %%xmm1        \n")                \
133         "add              $4, %2            \n"                 \
134         "jl               2b                \n"                 \
135         "mov              %5, %2            \n"                 \
136  stereo("mov   (%6, %2, %c8), %1            \n")                \
137         "movaps       %%xmm0, (%3, %0)      \n"                 \
138  stereo("movaps       %%xmm1, (%1, %0)      \n")                \
139         "add             $16, %0            \n"                 \
140         "jl               1b                \n"                 \
141         : "+&r"(i), "=&r"(j), "=&r"(k), "=&r"(m)                \
142         : "r"(matrix_simd + in_ch),                             \
143           "g"((intptr_t) - 4 * (in_ch - 1)),                    \
144           "r"(samp + in_ch),                                    \
145           "i"(sizeof(float *)), "i"(sizeof(float *)/4)          \
146         : "memory"                                              \
147     );
148
149 static void ac3_downmix_sse(float **samples, float **matrix,
150                             int out_ch, int in_ch, int len)
151 {
152     int **matrix_cmp = (int **)matrix;
153     intptr_t i, j, k, m;
154
155     i = -len * sizeof(float);
156     if (in_ch == 5 && out_ch == 2 &&
157         !(matrix_cmp[1][0] | matrix_cmp[0][2]   |
158           matrix_cmp[1][3] | matrix_cmp[0][4]   |
159           (matrix_cmp[0][1] ^ matrix_cmp[1][1]) |
160           (matrix_cmp[0][0] ^ matrix_cmp[1][2]))) {
161         MIX5(IF0, IF1);
162     } else if (in_ch == 5 && out_ch == 1 &&
163                matrix_cmp[0][0] == matrix_cmp[0][2] &&
164                matrix_cmp[0][3] == matrix_cmp[0][4]) {
165         MIX5(IF1, IF0);
166     } else {
167         LOCAL_ALIGNED(16, float, matrix_simd, [AC3_MAX_CHANNELS], [2][4]);
168         float *samp[AC3_MAX_CHANNELS];
169
170         for (j = 0; j < in_ch; j++)
171             samp[j] = samples[j] + len;
172
173         j = 2 * in_ch * sizeof(float);
174         k =     in_ch * sizeof(float);
175         __asm__ volatile (
176             "1:                                 \n"
177             "sub             $4, %1             \n"
178             "sub             $8, %0             \n"
179             "movss     (%3, %1), %%xmm4         \n"
180             "movss     (%4, %1), %%xmm5         \n"
181             "shufps          $0, %%xmm4, %%xmm4 \n"
182             "shufps          $0, %%xmm5, %%xmm5 \n"
183             "movaps      %%xmm4,   (%2, %0, 4)  \n"
184             "movaps      %%xmm5, 16(%2, %0, 4)  \n"
185             "jg              1b                 \n"
186             : "+&r"(j), "+&r"(k)
187             : "r"(matrix_simd), "r"(matrix[0]), "r"(matrix[1])
188             : "memory"
189         );
190         if (out_ch == 2) {
191             MIX_MISC(IF1);
192         } else {
193             MIX_MISC(IF0);
194         }
195     }
196 }
197
198 #endif /* HAVE_SSE_INLINE && HAVE_7REGS */
199
200 av_cold void ff_ac3dsp_init_x86(AC3DSPContext *c, int bit_exact)
201 {
202     int cpu_flags = av_get_cpu_flags();
203
204     if (EXTERNAL_MMX(cpu_flags)) {
205         c->ac3_exponent_min = ff_ac3_exponent_min_mmx;
206         c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_mmx;
207         c->ac3_lshift_int16 = ff_ac3_lshift_int16_mmx;
208         c->ac3_rshift_int32 = ff_ac3_rshift_int32_mmx;
209     }
210     if (EXTERNAL_AMD3DNOW(cpu_flags)) {
211         if (!bit_exact) {
212             c->float_to_fixed24 = ff_float_to_fixed24_3dnow;
213         }
214     }
215     if (EXTERNAL_MMXEXT(cpu_flags)) {
216         c->ac3_exponent_min = ff_ac3_exponent_min_mmxext;
217         c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_mmxext;
218         if (bit_exact) {
219             c->apply_window_int16 = ff_apply_window_int16_mmxext;
220         } else {
221             c->apply_window_int16 = ff_apply_window_int16_round_mmxext;
222         }
223     }
224     if (EXTERNAL_SSE(cpu_flags)) {
225         c->float_to_fixed24 = ff_float_to_fixed24_sse;
226     }
227     if (EXTERNAL_SSE2(cpu_flags)) {
228         c->ac3_exponent_min = ff_ac3_exponent_min_sse2;
229         c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_sse2;
230         c->float_to_fixed24 = ff_float_to_fixed24_sse2;
231         c->compute_mantissa_size = ff_ac3_compute_mantissa_size_sse2;
232         c->extract_exponents = ff_ac3_extract_exponents_sse2;
233         if (bit_exact) {
234             c->apply_window_int16 = ff_apply_window_int16_sse2;
235         }
236     }
237
238     if (EXTERNAL_SSE2_FAST(cpu_flags)) {
239         c->ac3_lshift_int16 = ff_ac3_lshift_int16_sse2;
240         c->ac3_rshift_int32 = ff_ac3_rshift_int32_sse2;
241         if (!bit_exact) {
242             c->apply_window_int16 = ff_apply_window_int16_round_sse2;
243         }
244     }
245
246     if (EXTERNAL_SSSE3(cpu_flags)) {
247         c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_ssse3;
248         if (cpu_flags & AV_CPU_FLAG_ATOM) {
249             c->apply_window_int16 = ff_apply_window_int16_ssse3_atom;
250         } else {
251             c->extract_exponents = ff_ac3_extract_exponents_ssse3;
252             c->apply_window_int16 = ff_apply_window_int16_ssse3;
253         }
254     }
255
256 #if HAVE_SSE_INLINE && HAVE_7REGS
257     if (INLINE_SSE(cpu_flags)) {
258         c->downmix = ac3_downmix_sse;
259     }
260 #endif
261 }