70b2249efdfad2ca1164e9e621199f492f85d1ed
[ffmpeg.git] / libavcodec / x86 / dsputil_mmx.c
1 /*
2  * MMX optimized DSP utils
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
7  *
8  * This file is part of Libav.
9  *
10  * Libav is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * Libav is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with Libav; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24
25 #include "libavutil/attributes.h"
26 #include "libavutil/cpu.h"
27 #include "libavutil/x86/asm.h"
28 #include "libavcodec/dsputil.h"
29 #include "libavcodec/h264dsp.h"
30 #include "libavcodec/mpegvideo.h"
31 #include "libavcodec/simple_idct.h"
32 #include "dsputil_mmx.h"
33 #include "idct_xvid.h"
34
35 //#undef NDEBUG
36 //#include <assert.h>
37
38 /* pixel operations */
39 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_15)   =   0x000F000F000F000FULL;
40 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_17)   = { 0x0011001100110011ULL, 0x0011001100110011ULL };
41 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_20)   =   0x0014001400140014ULL;
42 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_42)   =   0x002A002A002A002AULL;
43 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_53)   =   0x0035003500350035ULL;
44 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_96)   =   0x0060006000600060ULL;
45 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_128)  =   0x0080008000800080ULL;
46 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_255)  =   0x00ff00ff00ff00ffULL;
47 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_512)  = { 0x0200020002000200ULL, 0x0200020002000200ULL };
48 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_1019) = { 0x03FB03FB03FB03FBULL, 0x03FB03FB03FB03FBULL };
49
50 DECLARE_ALIGNED(8,  const uint64_t, ff_pb_3F)   =   0x3F3F3F3F3F3F3F3FULL;
51 DECLARE_ALIGNED(8,  const uint64_t, ff_pb_FC)   =   0xFCFCFCFCFCFCFCFCULL;
52
53 DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 };
54 DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 };
55
56
57 void ff_put_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
58                               int dstStride, int src1Stride, int h);
59 void ff_put_no_rnd_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1,
60                                      uint8_t *src2, int dstStride,
61                                      int src1Stride, int h);
62 void ff_avg_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
63                               int dstStride, int src1Stride, int h);
64 void ff_put_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
65                                int dstStride, int src1Stride, int h);
66 void ff_avg_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
67                                int dstStride, int src1Stride, int h);
68 void ff_put_no_rnd_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
69                                       int dstStride, int src1Stride, int h);
70 void ff_put_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
71                                          int dstStride, int srcStride, int h);
72 void ff_avg_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
73                                          int dstStride, int srcStride, int h);
74 void ff_put_no_rnd_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
75                                                  int dstStride, int srcStride,
76                                                  int h);
77 void ff_put_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
78                                         int dstStride, int srcStride, int h);
79 void ff_avg_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
80                                         int dstStride, int srcStride, int h);
81 void ff_put_no_rnd_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
82                                                 int dstStride, int srcStride,
83                                                 int h);
84 void ff_put_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
85                                          int dstStride, int srcStride);
86 void ff_avg_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
87                                          int dstStride, int srcStride);
88 void ff_put_no_rnd_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
89                                                  int dstStride, int srcStride);
90 void ff_put_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
91                                         int dstStride, int srcStride);
92 void ff_avg_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
93                                         int dstStride, int srcStride);
94 void ff_put_no_rnd_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
95                                                 int dstStride, int srcStride);
96 #define ff_put_no_rnd_pixels16_mmxext ff_put_pixels16_mmxext
97 #define ff_put_no_rnd_pixels8_mmxext ff_put_pixels8_mmxext
98
99
100 #if HAVE_INLINE_ASM
101
102 /***********************************/
103 /* MMX rounding */
104
105 #define DEF(x, y) x ## _ ## y ## _mmx
106 #define SET_RND  MOVQ_WTWO
107 #define PAVGBP(a, b, c, d, e, f)        PAVGBP_MMX(a, b, c, d, e, f)
108 #define PAVGB(a, b, c, e)               PAVGB_MMX(a, b, c, e)
109 #define OP_AVG(a, b, c, e)              PAVGB_MMX(a, b, c, e)
110
111 #include "rnd_template.c"
112
113 #undef DEF
114 #undef SET_RND
115 #undef PAVGBP
116 #undef PAVGB
117 #undef OP_AVG
118
119 /***********************************/
120 /* standard MMX */
121
122 void ff_put_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
123                                int line_size)
124 {
125     const int16_t *p;
126     uint8_t *pix;
127
128     /* read the pixels */
129     p   = block;
130     pix = pixels;
131     /* unrolled loop */
132     __asm__ volatile (
133         "movq      (%3), %%mm0          \n\t"
134         "movq     8(%3), %%mm1          \n\t"
135         "movq    16(%3), %%mm2          \n\t"
136         "movq    24(%3), %%mm3          \n\t"
137         "movq    32(%3), %%mm4          \n\t"
138         "movq    40(%3), %%mm5          \n\t"
139         "movq    48(%3), %%mm6          \n\t"
140         "movq    56(%3), %%mm7          \n\t"
141         "packuswb %%mm1, %%mm0          \n\t"
142         "packuswb %%mm3, %%mm2          \n\t"
143         "packuswb %%mm5, %%mm4          \n\t"
144         "packuswb %%mm7, %%mm6          \n\t"
145         "movq     %%mm0, (%0)           \n\t"
146         "movq     %%mm2, (%0, %1)       \n\t"
147         "movq     %%mm4, (%0, %1, 2)    \n\t"
148         "movq     %%mm6, (%0, %2)       \n\t"
149         :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3),
150            "r"(p)
151         : "memory");
152     pix += line_size * 4;
153     p   += 32;
154
155     // if here would be an exact copy of the code above
156     // compiler would generate some very strange code
157     // thus using "r"
158     __asm__ volatile (
159         "movq       (%3), %%mm0         \n\t"
160         "movq      8(%3), %%mm1         \n\t"
161         "movq     16(%3), %%mm2         \n\t"
162         "movq     24(%3), %%mm3         \n\t"
163         "movq     32(%3), %%mm4         \n\t"
164         "movq     40(%3), %%mm5         \n\t"
165         "movq     48(%3), %%mm6         \n\t"
166         "movq     56(%3), %%mm7         \n\t"
167         "packuswb  %%mm1, %%mm0         \n\t"
168         "packuswb  %%mm3, %%mm2         \n\t"
169         "packuswb  %%mm5, %%mm4         \n\t"
170         "packuswb  %%mm7, %%mm6         \n\t"
171         "movq      %%mm0, (%0)          \n\t"
172         "movq      %%mm2, (%0, %1)      \n\t"
173         "movq      %%mm4, (%0, %1, 2)   \n\t"
174         "movq      %%mm6, (%0, %2)      \n\t"
175         :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3), "r"(p)
176         : "memory");
177 }
178
179 #define put_signed_pixels_clamped_mmx_half(off)             \
180     "movq          "#off"(%2), %%mm1        \n\t"           \
181     "movq     16 + "#off"(%2), %%mm2        \n\t"           \
182     "movq     32 + "#off"(%2), %%mm3        \n\t"           \
183     "movq     48 + "#off"(%2), %%mm4        \n\t"           \
184     "packsswb  8 + "#off"(%2), %%mm1        \n\t"           \
185     "packsswb 24 + "#off"(%2), %%mm2        \n\t"           \
186     "packsswb 40 + "#off"(%2), %%mm3        \n\t"           \
187     "packsswb 56 + "#off"(%2), %%mm4        \n\t"           \
188     "paddb              %%mm0, %%mm1        \n\t"           \
189     "paddb              %%mm0, %%mm2        \n\t"           \
190     "paddb              %%mm0, %%mm3        \n\t"           \
191     "paddb              %%mm0, %%mm4        \n\t"           \
192     "movq               %%mm1, (%0)         \n\t"           \
193     "movq               %%mm2, (%0, %3)     \n\t"           \
194     "movq               %%mm3, (%0, %3, 2)  \n\t"           \
195     "movq               %%mm4, (%0, %1)     \n\t"
196
197 void ff_put_signed_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
198                                       int line_size)
199 {
200     x86_reg line_skip = line_size;
201     x86_reg line_skip3;
202
203     __asm__ volatile (
204         "movq "MANGLE(ff_pb_80)", %%mm0     \n\t"
205         "lea         (%3, %3, 2), %1        \n\t"
206         put_signed_pixels_clamped_mmx_half(0)
207         "lea         (%0, %3, 4), %0        \n\t"
208         put_signed_pixels_clamped_mmx_half(64)
209         : "+&r"(pixels), "=&r"(line_skip3)
210         : "r"(block), "r"(line_skip)
211         : "memory");
212 }
213
214 void ff_add_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
215                                int line_size)
216 {
217     const int16_t *p;
218     uint8_t *pix;
219     int i;
220
221     /* read the pixels */
222     p   = block;
223     pix = pixels;
224     MOVQ_ZERO(mm7);
225     i = 4;
226     do {
227         __asm__ volatile (
228             "movq        (%2), %%mm0    \n\t"
229             "movq       8(%2), %%mm1    \n\t"
230             "movq      16(%2), %%mm2    \n\t"
231             "movq      24(%2), %%mm3    \n\t"
232             "movq          %0, %%mm4    \n\t"
233             "movq          %1, %%mm6    \n\t"
234             "movq       %%mm4, %%mm5    \n\t"
235             "punpcklbw  %%mm7, %%mm4    \n\t"
236             "punpckhbw  %%mm7, %%mm5    \n\t"
237             "paddsw     %%mm4, %%mm0    \n\t"
238             "paddsw     %%mm5, %%mm1    \n\t"
239             "movq       %%mm6, %%mm5    \n\t"
240             "punpcklbw  %%mm7, %%mm6    \n\t"
241             "punpckhbw  %%mm7, %%mm5    \n\t"
242             "paddsw     %%mm6, %%mm2    \n\t"
243             "paddsw     %%mm5, %%mm3    \n\t"
244             "packuswb   %%mm1, %%mm0    \n\t"
245             "packuswb   %%mm3, %%mm2    \n\t"
246             "movq       %%mm0, %0       \n\t"
247             "movq       %%mm2, %1       \n\t"
248             : "+m"(*pix), "+m"(*(pix + line_size))
249             : "r"(p)
250             : "memory");
251         pix += line_size * 2;
252         p   += 16;
253     } while (--i);
254 }
255
256 static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels,
257                             ptrdiff_t line_size, int h)
258 {
259     __asm__ volatile (
260         "lea   (%3, %3), %%"REG_a"      \n\t"
261         ".p2align     3                 \n\t"
262         "1:                             \n\t"
263         "movq  (%1    ), %%mm0          \n\t"
264         "movq  (%1, %3), %%mm1          \n\t"
265         "movq     %%mm0, (%2)           \n\t"
266         "movq     %%mm1, (%2, %3)       \n\t"
267         "add  %%"REG_a", %1             \n\t"
268         "add  %%"REG_a", %2             \n\t"
269         "movq  (%1    ), %%mm0          \n\t"
270         "movq  (%1, %3), %%mm1          \n\t"
271         "movq     %%mm0, (%2)           \n\t"
272         "movq     %%mm1, (%2, %3)       \n\t"
273         "add  %%"REG_a", %1             \n\t"
274         "add  %%"REG_a", %2             \n\t"
275         "subl        $4, %0             \n\t"
276         "jnz         1b                 \n\t"
277         : "+g"(h), "+r"(pixels),  "+r"(block)
278         : "r"((x86_reg)line_size)
279         : "%"REG_a, "memory"
280         );
281 }
282
283 static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels,
284                              ptrdiff_t line_size, int h)
285 {
286     __asm__ volatile (
287         "lea   (%3, %3), %%"REG_a"      \n\t"
288         ".p2align     3                 \n\t"
289         "1:                             \n\t"
290         "movq  (%1    ), %%mm0          \n\t"
291         "movq 8(%1    ), %%mm4          \n\t"
292         "movq  (%1, %3), %%mm1          \n\t"
293         "movq 8(%1, %3), %%mm5          \n\t"
294         "movq     %%mm0,  (%2)          \n\t"
295         "movq     %%mm4, 8(%2)          \n\t"
296         "movq     %%mm1,  (%2, %3)      \n\t"
297         "movq     %%mm5, 8(%2, %3)      \n\t"
298         "add  %%"REG_a", %1             \n\t"
299         "add  %%"REG_a", %2             \n\t"
300         "movq  (%1    ), %%mm0          \n\t"
301         "movq 8(%1    ), %%mm4          \n\t"
302         "movq  (%1, %3), %%mm1          \n\t"
303         "movq 8(%1, %3), %%mm5          \n\t"
304         "movq     %%mm0,  (%2)          \n\t"
305         "movq     %%mm4, 8(%2)          \n\t"
306         "movq     %%mm1,  (%2, %3)      \n\t"
307         "movq     %%mm5, 8(%2, %3)      \n\t"
308         "add  %%"REG_a", %1             \n\t"
309         "add  %%"REG_a", %2             \n\t"
310         "subl        $4, %0             \n\t"
311         "jnz         1b                 \n\t"
312         : "+g"(h), "+r"(pixels),  "+r"(block)
313         : "r"((x86_reg)line_size)
314         : "%"REG_a, "memory"
315         );
316 }
317
318 #define CLEAR_BLOCKS(name, n)                           \
319 static void name(int16_t *blocks)                       \
320 {                                                       \
321     __asm__ volatile (                                  \
322         "pxor %%mm7, %%mm7              \n\t"           \
323         "mov     %1,        %%"REG_a"   \n\t"           \
324         "1:                             \n\t"           \
325         "movq %%mm7,   (%0, %%"REG_a")  \n\t"           \
326         "movq %%mm7,  8(%0, %%"REG_a")  \n\t"           \
327         "movq %%mm7, 16(%0, %%"REG_a")  \n\t"           \
328         "movq %%mm7, 24(%0, %%"REG_a")  \n\t"           \
329         "add    $32, %%"REG_a"          \n\t"           \
330         "js      1b                     \n\t"           \
331         :: "r"(((uint8_t *)blocks) + 128 * n),          \
332            "i"(-128 * n)                                \
333         : "%"REG_a                                      \
334         );                                              \
335 }
336 CLEAR_BLOCKS(clear_blocks_mmx, 6)
337 CLEAR_BLOCKS(clear_block_mmx, 1)
338
339 static void clear_block_sse(int16_t *block)
340 {
341     __asm__ volatile (
342         "xorps  %%xmm0, %%xmm0          \n"
343         "movaps %%xmm0,    (%0)         \n"
344         "movaps %%xmm0,  16(%0)         \n"
345         "movaps %%xmm0,  32(%0)         \n"
346         "movaps %%xmm0,  48(%0)         \n"
347         "movaps %%xmm0,  64(%0)         \n"
348         "movaps %%xmm0,  80(%0)         \n"
349         "movaps %%xmm0,  96(%0)         \n"
350         "movaps %%xmm0, 112(%0)         \n"
351         :: "r"(block)
352         : "memory"
353     );
354 }
355
356 static void clear_blocks_sse(int16_t *blocks)
357 {
358     __asm__ volatile (
359         "xorps  %%xmm0, %%xmm0              \n"
360         "mov        %1,         %%"REG_a"   \n"
361         "1:                                 \n"
362         "movaps %%xmm0,    (%0, %%"REG_a")  \n"
363         "movaps %%xmm0,  16(%0, %%"REG_a")  \n"
364         "movaps %%xmm0,  32(%0, %%"REG_a")  \n"
365         "movaps %%xmm0,  48(%0, %%"REG_a")  \n"
366         "movaps %%xmm0,  64(%0, %%"REG_a")  \n"
367         "movaps %%xmm0,  80(%0, %%"REG_a")  \n"
368         "movaps %%xmm0,  96(%0, %%"REG_a")  \n"
369         "movaps %%xmm0, 112(%0, %%"REG_a")  \n"
370         "add      $128,         %%"REG_a"   \n"
371         "js         1b                      \n"
372         :: "r"(((uint8_t *)blocks) + 128 * 6),
373            "i"(-128 * 6)
374         : "%"REG_a
375     );
376 }
377
378 static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w)
379 {
380     x86_reg i = 0;
381     __asm__ volatile (
382         "jmp          2f                \n\t"
383         "1:                             \n\t"
384         "movq   (%1, %0), %%mm0         \n\t"
385         "movq   (%2, %0), %%mm1         \n\t"
386         "paddb     %%mm0, %%mm1         \n\t"
387         "movq      %%mm1, (%2, %0)      \n\t"
388         "movq  8(%1, %0), %%mm0         \n\t"
389         "movq  8(%2, %0), %%mm1         \n\t"
390         "paddb     %%mm0, %%mm1         \n\t"
391         "movq      %%mm1, 8(%2, %0)     \n\t"
392         "add         $16, %0            \n\t"
393         "2:                             \n\t"
394         "cmp          %3, %0            \n\t"
395         "js           1b                \n\t"
396         : "+r"(i)
397         : "r"(src), "r"(dst), "r"((x86_reg)w - 15)
398     );
399     for ( ; i < w; i++)
400         dst[i + 0] += src[i + 0];
401 }
402
403 #if HAVE_7REGS
404 static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top,
405                                             const uint8_t *diff, int w,
406                                             int *left, int *left_top)
407 {
408     x86_reg w2 = -w;
409     x86_reg x;
410     int l  = *left     & 0xff;
411     int tl = *left_top & 0xff;
412     int t;
413     __asm__ volatile (
414         "mov          %7, %3            \n"
415         "1:                             \n"
416         "movzbl (%3, %4), %2            \n"
417         "mov          %2, %k3           \n"
418         "sub         %b1, %b3           \n"
419         "add         %b0, %b3           \n"
420         "mov          %2, %1            \n"
421         "cmp          %0, %2            \n"
422         "cmovg        %0, %2            \n"
423         "cmovg        %1, %0            \n"
424         "cmp         %k3, %0            \n"
425         "cmovg       %k3, %0            \n"
426         "mov          %7, %3            \n"
427         "cmp          %2, %0            \n"
428         "cmovl        %2, %0            \n"
429         "add    (%6, %4), %b0           \n"
430         "mov         %b0, (%5, %4)      \n"
431         "inc          %4                \n"
432         "jl           1b                \n"
433         : "+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
434         : "r"(dst + w), "r"(diff + w), "rm"(top + w)
435     );
436     *left     = l;
437     *left_top = tl;
438 }
439 #endif
440
441 /* Draw the edges of width 'w' of an image of size width, height
442  * this MMX version can only handle w == 8 || w == 16. */
443 static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height,
444                            int w, int h, int sides)
445 {
446     uint8_t *ptr, *last_line;
447     int i;
448
449     last_line = buf + (height - 1) * wrap;
450     /* left and right */
451     ptr = buf;
452     if (w == 8) {
453         __asm__ volatile (
454             "1:                             \n\t"
455             "movd            (%0), %%mm0    \n\t"
456             "punpcklbw      %%mm0, %%mm0    \n\t"
457             "punpcklwd      %%mm0, %%mm0    \n\t"
458             "punpckldq      %%mm0, %%mm0    \n\t"
459             "movq           %%mm0, -8(%0)   \n\t"
460             "movq      -8(%0, %2), %%mm1    \n\t"
461             "punpckhbw      %%mm1, %%mm1    \n\t"
462             "punpckhwd      %%mm1, %%mm1    \n\t"
463             "punpckhdq      %%mm1, %%mm1    \n\t"
464             "movq           %%mm1, (%0, %2) \n\t"
465             "add               %1, %0       \n\t"
466             "cmp               %3, %0       \n\t"
467             "jb                1b           \n\t"
468             : "+r"(ptr)
469             : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
470             );
471     } else {
472         __asm__ volatile (
473             "1:                                 \n\t"
474             "movd            (%0), %%mm0        \n\t"
475             "punpcklbw      %%mm0, %%mm0        \n\t"
476             "punpcklwd      %%mm0, %%mm0        \n\t"
477             "punpckldq      %%mm0, %%mm0        \n\t"
478             "movq           %%mm0, -8(%0)       \n\t"
479             "movq           %%mm0, -16(%0)      \n\t"
480             "movq      -8(%0, %2), %%mm1        \n\t"
481             "punpckhbw      %%mm1, %%mm1        \n\t"
482             "punpckhwd      %%mm1, %%mm1        \n\t"
483             "punpckhdq      %%mm1, %%mm1        \n\t"
484             "movq           %%mm1,  (%0, %2)    \n\t"
485             "movq           %%mm1, 8(%0, %2)    \n\t"
486             "add               %1, %0           \n\t"
487             "cmp               %3, %0           \n\t"
488             "jb                1b               \n\t"
489             : "+r"(ptr)
490             : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
491             );
492     }
493
494     /* top and bottom (and hopefully also the corners) */
495     if (sides & EDGE_TOP) {
496         for (i = 0; i < h; i += 4) {
497             ptr = buf - (i + 1) * wrap - w;
498             __asm__ volatile (
499                 "1:                             \n\t"
500                 "movq (%1, %0), %%mm0           \n\t"
501                 "movq    %%mm0, (%0)            \n\t"
502                 "movq    %%mm0, (%0, %2)        \n\t"
503                 "movq    %%mm0, (%0, %2, 2)     \n\t"
504                 "movq    %%mm0, (%0, %3)        \n\t"
505                 "add        $8, %0              \n\t"
506                 "cmp        %4, %0              \n\t"
507                 "jb         1b                  \n\t"
508                 : "+r"(ptr)
509                 : "r"((x86_reg)buf - (x86_reg)ptr - w), "r"((x86_reg) -wrap),
510                   "r"((x86_reg) -wrap * 3), "r"(ptr + width + 2 * w)
511                 );
512         }
513     }
514
515     if (sides & EDGE_BOTTOM) {
516         for (i = 0; i < h; i += 4) {
517             ptr = last_line + (i + 1) * wrap - w;
518             __asm__ volatile (
519                 "1:                             \n\t"
520                 "movq (%1, %0), %%mm0           \n\t"
521                 "movq    %%mm0, (%0)            \n\t"
522                 "movq    %%mm0, (%0, %2)        \n\t"
523                 "movq    %%mm0, (%0, %2, 2)     \n\t"
524                 "movq    %%mm0, (%0, %3)        \n\t"
525                 "add        $8, %0              \n\t"
526                 "cmp        %4, %0              \n\t"
527                 "jb         1b                  \n\t"
528                 : "+r"(ptr)
529                 : "r"((x86_reg)last_line - (x86_reg)ptr - w),
530                   "r"((x86_reg)wrap), "r"((x86_reg)wrap * 3),
531                   "r"(ptr + width + 2 * w)
532                 );
533         }
534     }
535 }
536 #endif /* HAVE_INLINE_ASM */
537
538
539 #if HAVE_YASM
540 static void ff_avg_pixels16_mmxext(uint8_t *block, const uint8_t *pixels,
541                                    int line_size, int h)
542 {
543     ff_avg_pixels8_mmxext(block,     pixels,     line_size, h);
544     ff_avg_pixels8_mmxext(block + 8, pixels + 8, line_size, h);
545 }
546
547 static void ff_put_pixels16_mmxext(uint8_t *block, const uint8_t *pixels,
548                                    ptrdiff_t line_size, int h)
549 {
550     ff_put_pixels8_mmxext(block,     pixels,     line_size, h);
551     ff_put_pixels8_mmxext(block + 8, pixels + 8, line_size, h);
552 }
553
554 #define QPEL_OP(OPNAME, ROUNDER, RND, MMX)                              \
555 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src,   \
556                                           ptrdiff_t stride)             \
557 {                                                                       \
558     ff_ ## OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);              \
559 }                                                                       \
560                                                                         \
561 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src,    \
562                                          ptrdiff_t stride)              \
563 {                                                                       \
564     uint64_t temp[8];                                                   \
565     uint8_t * const half = (uint8_t*)temp;                              \
566     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8,        \
567                                                    stride, 8);          \
568     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src, half,                 \
569                                         stride, stride, 8);             \
570 }                                                                       \
571                                                                         \
572 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src,    \
573                                          ptrdiff_t stride)              \
574 {                                                                       \
575     ff_ ## OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride,    \
576                                                    stride, 8);          \
577 }                                                                       \
578                                                                         \
579 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src,    \
580                                          ptrdiff_t stride)              \
581 {                                                                       \
582     uint64_t temp[8];                                                   \
583     uint8_t * const half = (uint8_t*)temp;                              \
584     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8,        \
585                                                    stride, 8);          \
586     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src + 1, half, stride,     \
587                                         stride, 8);                     \
588 }                                                                       \
589                                                                         \
590 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src,    \
591                                          ptrdiff_t stride)              \
592 {                                                                       \
593     uint64_t temp[8];                                                   \
594     uint8_t * const half = (uint8_t*)temp;                              \
595     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src,           \
596                                                    8, stride);          \
597     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src, half,                 \
598                                         stride, stride, 8);             \
599 }                                                                       \
600                                                                         \
601 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src,    \
602                                          ptrdiff_t stride)              \
603 {                                                                       \
604     ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src,            \
605                                                    stride, stride);     \
606 }                                                                       \
607                                                                         \
608 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src,    \
609                                          ptrdiff_t stride)              \
610 {                                                                       \
611     uint64_t temp[8];                                                   \
612     uint8_t * const half = (uint8_t*)temp;                              \
613     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src,           \
614                                                    8, stride);          \
615     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src + stride, half, stride,\
616                                         stride, 8);                     \
617 }                                                                       \
618                                                                         \
619 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src,    \
620                                          ptrdiff_t stride)              \
621 {                                                                       \
622     uint64_t half[8 + 9];                                               \
623     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
624     uint8_t * const halfHV = ((uint8_t*)half);                          \
625     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
626                                                    stride, 9);          \
627     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8,           \
628                                         stride, 9);                     \
629     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
630     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV,             \
631                                         stride, 8, 8);                  \
632 }                                                                       \
633                                                                         \
634 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src,    \
635                                          ptrdiff_t stride)              \
636 {                                                                       \
637     uint64_t half[8 + 9];                                               \
638     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
639     uint8_t * const halfHV = ((uint8_t*)half);                          \
640     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
641                                                    stride, 9);          \
642     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8,       \
643                                         stride, 9);                     \
644     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
645     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV,             \
646                                         stride, 8, 8);                  \
647 }                                                                       \
648                                                                         \
649 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src,    \
650                                          ptrdiff_t stride)              \
651 {                                                                       \
652     uint64_t half[8 + 9];                                               \
653     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
654     uint8_t * const halfHV = ((uint8_t*)half);                          \
655     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
656                                                    stride, 9);          \
657     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8,           \
658                                         stride, 9);                     \
659     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
660     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV,         \
661                                         stride, 8, 8);                  \
662 }                                                                       \
663                                                                         \
664 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src,    \
665                                          ptrdiff_t stride)              \
666 {                                                                       \
667     uint64_t half[8 + 9];                                               \
668     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
669     uint8_t * const halfHV = ((uint8_t*)half);                          \
670     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
671                                                    stride, 9);          \
672     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8,       \
673                                         stride, 9);                     \
674     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
675     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV,         \
676                                         stride, 8, 8);                  \
677 }                                                                       \
678                                                                         \
679 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src,    \
680                                          ptrdiff_t stride)              \
681 {                                                                       \
682     uint64_t half[8 + 9];                                               \
683     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
684     uint8_t * const halfHV = ((uint8_t*)half);                          \
685     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
686                                                    stride, 9);          \
687     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
688     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV,             \
689                                         stride, 8, 8);                  \
690 }                                                                       \
691                                                                         \
692 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src,    \
693                                          ptrdiff_t stride)              \
694 {                                                                       \
695     uint64_t half[8 + 9];                                               \
696     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
697     uint8_t * const halfHV = ((uint8_t*)half);                          \
698     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
699                                                    stride, 9);          \
700     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
701     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV,         \
702                                         stride, 8, 8);                  \
703 }                                                                       \
704                                                                         \
705 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src,    \
706                                          ptrdiff_t stride)              \
707 {                                                                       \
708     uint64_t half[8 + 9];                                               \
709     uint8_t * const halfH = ((uint8_t*)half);                           \
710     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
711                                                    stride, 9);          \
712     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH,              \
713                                         8, stride, 9);                  \
714     ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH,          \
715                                                    stride, 8);          \
716 }                                                                       \
717                                                                         \
718 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src,    \
719                                          ptrdiff_t stride)              \
720 {                                                                       \
721     uint64_t half[8 + 9];                                               \
722     uint8_t * const halfH = ((uint8_t*)half);                           \
723     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
724                                                    stride, 9);          \
725     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8,       \
726                                         stride, 9);                     \
727     ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH,          \
728                                                    stride, 8);          \
729 }                                                                       \
730                                                                         \
731 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src,    \
732                                          ptrdiff_t stride)              \
733 {                                                                       \
734     uint64_t half[9];                                                   \
735     uint8_t * const halfH = ((uint8_t*)half);                           \
736     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
737                                                    stride, 9);          \
738     ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH,          \
739                                                    stride, 8);          \
740 }                                                                       \
741                                                                         \
742 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src,  \
743                                            ptrdiff_t stride)            \
744 {                                                                       \
745     ff_ ## OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);            \
746 }                                                                       \
747                                                                         \
748 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src,   \
749                                           ptrdiff_t stride)             \
750 {                                                                       \
751     uint64_t temp[32];                                                  \
752     uint8_t * const half = (uint8_t*)temp;                              \
753     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16,      \
754                                                     stride, 16);        \
755     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride,        \
756                                          stride, 16);                   \
757 }                                                                       \
758                                                                         \
759 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src,   \
760                                           ptrdiff_t stride)             \
761 {                                                                       \
762     ff_ ## OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src,           \
763                                                     stride, stride, 16);\
764 }                                                                       \
765                                                                         \
766 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src,   \
767                                           ptrdiff_t stride)             \
768 {                                                                       \
769     uint64_t temp[32];                                                  \
770     uint8_t * const half = (uint8_t*)temp;                              \
771     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16,      \
772                                                     stride, 16);        \
773     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src + 1, half,            \
774                                          stride, stride, 16);           \
775 }                                                                       \
776                                                                         \
777 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src,   \
778                                           ptrdiff_t stride)             \
779 {                                                                       \
780     uint64_t temp[32];                                                  \
781     uint8_t * const half = (uint8_t*)temp;                              \
782     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16,      \
783                                                     stride);            \
784     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride,        \
785                                          stride, 16);                   \
786 }                                                                       \
787                                                                         \
788 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src,   \
789                                           ptrdiff_t stride)             \
790 {                                                                       \
791     ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src,           \
792                                                     stride, stride);    \
793 }                                                                       \
794                                                                         \
795 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src,   \
796                                           ptrdiff_t stride)             \
797 {                                                                       \
798     uint64_t temp[32];                                                  \
799     uint8_t * const half = (uint8_t*)temp;                              \
800     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16,      \
801                                                     stride);            \
802     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half,         \
803                                          stride, stride, 16);           \
804 }                                                                       \
805                                                                         \
806 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src,   \
807                                           ptrdiff_t stride)             \
808 {                                                                       \
809     uint64_t half[16 * 2 + 17 * 2];                                     \
810     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
811     uint8_t * const halfHV = ((uint8_t*)half);                          \
812     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
813                                                     stride, 17);        \
814     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16,         \
815                                          stride, 17);                   \
816     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
817                                                     16, 16);            \
818     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV,            \
819                                          stride, 16, 16);               \
820 }                                                                       \
821                                                                         \
822 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src,   \
823                                           ptrdiff_t stride)             \
824 {                                                                       \
825     uint64_t half[16 * 2 + 17 * 2];                                     \
826     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
827     uint8_t * const halfHV = ((uint8_t*)half);                          \
828     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
829                                                     stride, 17);        \
830     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16,     \
831                                          stride, 17);                   \
832     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
833                                                     16, 16);            \
834     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV,            \
835                                          stride, 16, 16);               \
836 }                                                                       \
837                                                                         \
838 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src,   \
839                                           ptrdiff_t stride)             \
840 {                                                                       \
841     uint64_t half[16 * 2 + 17 * 2];                                     \
842     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
843     uint8_t * const halfHV = ((uint8_t*)half);                          \
844     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
845                                                     stride, 17);        \
846     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16,         \
847                                          stride, 17);                   \
848     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
849                                                     16, 16);            \
850     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV,       \
851                                          stride, 16, 16);               \
852 }                                                                       \
853                                                                         \
854 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src,   \
855                                           ptrdiff_t stride)             \
856 {                                                                       \
857     uint64_t half[16 * 2 + 17 * 2];                                     \
858     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
859     uint8_t * const halfHV = ((uint8_t*)half);                          \
860     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
861                                                     stride, 17);        \
862     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16,     \
863                                          stride, 17);                   \
864     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
865                                                     16, 16);            \
866     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV,       \
867                                          stride, 16, 16);               \
868 }                                                                       \
869                                                                         \
870 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src,   \
871                                           ptrdiff_t stride)             \
872 {                                                                       \
873     uint64_t half[16 * 2 + 17 * 2];                                     \
874     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
875     uint8_t * const halfHV = ((uint8_t*)half);                          \
876     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
877                                                     stride, 17);        \
878     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
879                                                     16, 16);            \
880     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV,            \
881                                          stride, 16, 16);               \
882 }                                                                       \
883                                                                         \
884 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src,   \
885                                           ptrdiff_t stride)             \
886 {                                                                       \
887     uint64_t half[16 * 2 + 17 * 2];                                     \
888     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
889     uint8_t * const halfHV = ((uint8_t*)half);                          \
890     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
891                                                     stride, 17);        \
892     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
893                                                     16, 16);            \
894     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV,       \
895                                          stride, 16, 16);               \
896 }                                                                       \
897                                                                         \
898 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src,   \
899                                           ptrdiff_t stride)             \
900 {                                                                       \
901     uint64_t half[17 * 2];                                              \
902     uint8_t * const halfH = ((uint8_t*)half);                           \
903     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
904                                                     stride, 17);        \
905     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16,         \
906                                          stride, 17);                   \
907     ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH,         \
908                                                     stride, 16);        \
909 }                                                                       \
910                                                                         \
911 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src,   \
912                                           ptrdiff_t stride)             \
913 {                                                                       \
914     uint64_t half[17 * 2];                                              \
915     uint8_t * const halfH = ((uint8_t*)half);                           \
916     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
917                                                     stride, 17);        \
918     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16,     \
919                                          stride, 17);                   \
920     ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH,         \
921                                                     stride, 16);        \
922 }                                                                       \
923                                                                         \
924 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src,   \
925                                           ptrdiff_t stride)             \
926 {                                                                       \
927     uint64_t half[17 * 2];                                              \
928     uint8_t * const halfH = ((uint8_t*)half);                           \
929     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
930                                                     stride, 17);        \
931     ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH,         \
932                                                     stride, 16);        \
933 }
934
935 QPEL_OP(put_,          ff_pw_16, _,        mmxext)
936 QPEL_OP(avg_,          ff_pw_16, _,        mmxext)
937 QPEL_OP(put_no_rnd_,   ff_pw_15, _no_rnd_, mmxext)
938 #endif /* HAVE_YASM */
939
940
941 #if HAVE_INLINE_ASM
942 void ff_put_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
943 {
944   put_pixels8_xy2_mmx(dst, src, stride, 8);
945 }
946 void ff_put_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
947 {
948   put_pixels16_xy2_mmx(dst, src, stride, 16);
949 }
950 void ff_avg_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
951 {
952   avg_pixels8_xy2_mmx(dst, src, stride, 8);
953 }
954 void ff_avg_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
955 {
956   avg_pixels16_xy2_mmx(dst, src, stride, 16);
957 }
958
959 static void gmc_mmx(uint8_t *dst, uint8_t *src,
960                     int stride, int h, int ox, int oy,
961                     int dxx, int dxy, int dyx, int dyy,
962                     int shift, int r, int width, int height)
963 {
964     const int w    = 8;
965     const int ix   = ox  >> (16 + shift);
966     const int iy   = oy  >> (16 + shift);
967     const int oxs  = ox  >> 4;
968     const int oys  = oy  >> 4;
969     const int dxxs = dxx >> 4;
970     const int dxys = dxy >> 4;
971     const int dyxs = dyx >> 4;
972     const int dyys = dyy >> 4;
973     const uint16_t r4[4]   = { r, r, r, r };
974     const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys };
975     const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys };
976     const uint64_t shift2 = 2 * shift;
977     int x, y;
978
979     const int dxw = (dxx - (1 << (16 + shift))) * (w - 1);
980     const int dyh = (dyy - (1 << (16 + shift))) * (h - 1);
981     const int dxh = dxy * (h - 1);
982     const int dyw = dyx * (w - 1);
983     if ( // non-constant fullpel offset (3% of blocks)
984         ((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) |
985          (oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift)
986         // uses more than 16 bits of subpel mv (only at huge resolution)
987         || (dxx | dxy | dyx | dyy) & 15 ||
988         (unsigned)ix >= width  - w ||
989         (unsigned)iy >= height - h) {
990         // FIXME could still use mmx for some of the rows
991         ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy,
992                  shift, r, width, height);
993         return;
994     }
995
996     src += ix + iy * stride;
997
998     __asm__ volatile (
999         "movd         %0, %%mm6         \n\t"
1000         "pxor      %%mm7, %%mm7         \n\t"
1001         "punpcklwd %%mm6, %%mm6         \n\t"
1002         "punpcklwd %%mm6, %%mm6         \n\t"
1003         :: "r"(1<<shift)
1004     );
1005
1006     for (x = 0; x < w; x += 4) {
1007         uint16_t dx4[4] = { oxs - dxys + dxxs * (x + 0),
1008                             oxs - dxys + dxxs * (x + 1),
1009                             oxs - dxys + dxxs * (x + 2),
1010                             oxs - dxys + dxxs * (x + 3) };
1011         uint16_t dy4[4] = { oys - dyys + dyxs * (x + 0),
1012                             oys - dyys + dyxs * (x + 1),
1013                             oys - dyys + dyxs * (x + 2),
1014                             oys - dyys + dyxs * (x + 3) };
1015
1016         for (y = 0; y < h; y++) {
1017             __asm__ volatile (
1018                 "movq      %0, %%mm4    \n\t"
1019                 "movq      %1, %%mm5    \n\t"
1020                 "paddw     %2, %%mm4    \n\t"
1021                 "paddw     %3, %%mm5    \n\t"
1022                 "movq   %%mm4, %0       \n\t"
1023                 "movq   %%mm5, %1       \n\t"
1024                 "psrlw    $12, %%mm4    \n\t"
1025                 "psrlw    $12, %%mm5    \n\t"
1026                 : "+m"(*dx4), "+m"(*dy4)
1027                 : "m"(*dxy4), "m"(*dyy4)
1028             );
1029
1030             __asm__ volatile (
1031                 "movq      %%mm6, %%mm2 \n\t"
1032                 "movq      %%mm6, %%mm1 \n\t"
1033                 "psubw     %%mm4, %%mm2 \n\t"
1034                 "psubw     %%mm5, %%mm1 \n\t"
1035                 "movq      %%mm2, %%mm0 \n\t"
1036                 "movq      %%mm4, %%mm3 \n\t"
1037                 "pmullw    %%mm1, %%mm0 \n\t" // (s - dx) * (s - dy)
1038                 "pmullw    %%mm5, %%mm3 \n\t" // dx * dy
1039                 "pmullw    %%mm5, %%mm2 \n\t" // (s - dx) * dy
1040                 "pmullw    %%mm4, %%mm1 \n\t" // dx * (s - dy)
1041
1042                 "movd         %4, %%mm5 \n\t"
1043                 "movd         %3, %%mm4 \n\t"
1044                 "punpcklbw %%mm7, %%mm5 \n\t"
1045                 "punpcklbw %%mm7, %%mm4 \n\t"
1046                 "pmullw    %%mm5, %%mm3 \n\t" // src[1, 1] * dx * dy
1047                 "pmullw    %%mm4, %%mm2 \n\t" // src[0, 1] * (s - dx) * dy
1048
1049                 "movd         %2, %%mm5 \n\t"
1050                 "movd         %1, %%mm4 \n\t"
1051                 "punpcklbw %%mm7, %%mm5 \n\t"
1052                 "punpcklbw %%mm7, %%mm4 \n\t"
1053                 "pmullw    %%mm5, %%mm1 \n\t" // src[1, 0] * dx * (s - dy)
1054                 "pmullw    %%mm4, %%mm0 \n\t" // src[0, 0] * (s - dx) * (s - dy)
1055                 "paddw        %5, %%mm1 \n\t"
1056                 "paddw     %%mm3, %%mm2 \n\t"
1057                 "paddw     %%mm1, %%mm0 \n\t"
1058                 "paddw     %%mm2, %%mm0 \n\t"
1059
1060                 "psrlw        %6, %%mm0 \n\t"
1061                 "packuswb  %%mm0, %%mm0 \n\t"
1062                 "movd      %%mm0, %0    \n\t"
1063
1064                 : "=m"(dst[x + y * stride])
1065                 : "m"(src[0]), "m"(src[1]),
1066                   "m"(src[stride]), "m"(src[stride + 1]),
1067                   "m"(*r4), "m"(shift2)
1068             );
1069             src += stride;
1070         }
1071         src += 4 - h * stride;
1072     }
1073 }
1074
1075 /* CAVS-specific */
1076 void ff_put_cavs_qpel8_mc00_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1077 {
1078     put_pixels8_mmx(dst, src, stride, 8);
1079 }
1080
1081 void ff_avg_cavs_qpel8_mc00_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1082 {
1083     avg_pixels8_mmx(dst, src, stride, 8);
1084 }
1085
1086 void ff_put_cavs_qpel16_mc00_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1087 {
1088     put_pixels16_mmx(dst, src, stride, 16);
1089 }
1090
1091 void ff_avg_cavs_qpel16_mc00_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1092 {
1093     avg_pixels16_mmx(dst, src, stride, 16);
1094 }
1095
1096 /* VC-1-specific */
1097 void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src,
1098                                ptrdiff_t stride, int rnd)
1099 {
1100     put_pixels8_mmx(dst, src, stride, 8);
1101 }
1102
1103 static void vector_clipf_sse(float *dst, const float *src,
1104                              float min, float max, int len)
1105 {
1106     x86_reg i = (len - 16) * 4;
1107     __asm__ volatile (
1108         "movss          %3, %%xmm4      \n\t"
1109         "movss          %4, %%xmm5      \n\t"
1110         "shufps $0, %%xmm4, %%xmm4      \n\t"
1111         "shufps $0, %%xmm5, %%xmm5      \n\t"
1112         "1:                             \n\t"
1113         "movaps   (%2, %0), %%xmm0      \n\t" // 3/1 on intel
1114         "movaps 16(%2, %0), %%xmm1      \n\t"
1115         "movaps 32(%2, %0), %%xmm2      \n\t"
1116         "movaps 48(%2, %0), %%xmm3      \n\t"
1117         "maxps      %%xmm4, %%xmm0      \n\t"
1118         "maxps      %%xmm4, %%xmm1      \n\t"
1119         "maxps      %%xmm4, %%xmm2      \n\t"
1120         "maxps      %%xmm4, %%xmm3      \n\t"
1121         "minps      %%xmm5, %%xmm0      \n\t"
1122         "minps      %%xmm5, %%xmm1      \n\t"
1123         "minps      %%xmm5, %%xmm2      \n\t"
1124         "minps      %%xmm5, %%xmm3      \n\t"
1125         "movaps     %%xmm0,   (%1, %0)  \n\t"
1126         "movaps     %%xmm1, 16(%1, %0)  \n\t"
1127         "movaps     %%xmm2, 32(%1, %0)  \n\t"
1128         "movaps     %%xmm3, 48(%1, %0)  \n\t"
1129         "sub           $64, %0          \n\t"
1130         "jge            1b              \n\t"
1131         : "+&r"(i)
1132         : "r"(dst), "r"(src), "m"(min), "m"(max)
1133         : "memory"
1134     );
1135 }
1136
1137 #endif /* HAVE_INLINE_ASM */
1138
1139 void ff_h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale);
1140 void ff_h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale);
1141
1142 int32_t ff_scalarproduct_int16_mmxext(const int16_t *v1, const int16_t *v2,
1143                                       int order);
1144 int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2,
1145                                     int order);
1146 int32_t ff_scalarproduct_and_madd_int16_mmxext(int16_t *v1, const int16_t *v2,
1147                                                const int16_t *v3,
1148                                                int order, int mul);
1149 int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2,
1150                                              const int16_t *v3,
1151                                              int order, int mul);
1152 int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2,
1153                                               const int16_t *v3,
1154                                               int order, int mul);
1155
1156 void ff_apply_window_int16_round_mmxext(int16_t *output, const int16_t *input,
1157                                         const int16_t *window, unsigned int len);
1158 void ff_apply_window_int16_round_sse2(int16_t *output, const int16_t *input,
1159                                       const int16_t *window, unsigned int len);
1160 void ff_apply_window_int16_mmxext(int16_t *output, const int16_t *input,
1161                                   const int16_t *window, unsigned int len);
1162 void ff_apply_window_int16_sse2(int16_t *output, const int16_t *input,
1163                                 const int16_t *window, unsigned int len);
1164 void ff_apply_window_int16_ssse3(int16_t *output, const int16_t *input,
1165                                  const int16_t *window, unsigned int len);
1166 void ff_apply_window_int16_ssse3_atom(int16_t *output, const int16_t *input,
1167                                       const int16_t *window, unsigned int len);
1168
1169 void ff_bswap32_buf_ssse3(uint32_t *dst, const uint32_t *src, int w);
1170 void ff_bswap32_buf_sse2(uint32_t *dst, const uint32_t *src, int w);
1171
1172 void ff_add_hfyu_median_prediction_mmxext(uint8_t *dst, const uint8_t *top,
1173                                           const uint8_t *diff, int w,
1174                                           int *left, int *left_top);
1175 int  ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src,
1176                                        int w, int left);
1177 int  ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src,
1178                                       int w, int left);
1179
1180 void ff_vector_clip_int32_mmx     (int32_t *dst, const int32_t *src,
1181                                    int32_t min, int32_t max, unsigned int len);
1182 void ff_vector_clip_int32_sse2    (int32_t *dst, const int32_t *src,
1183                                    int32_t min, int32_t max, unsigned int len);
1184 void ff_vector_clip_int32_int_sse2(int32_t *dst, const int32_t *src,
1185                                    int32_t min, int32_t max, unsigned int len);
1186 void ff_vector_clip_int32_sse4    (int32_t *dst, const int32_t *src,
1187                                    int32_t min, int32_t max, unsigned int len);
1188
1189 #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX)                          \
1190     do {                                                                     \
1191     c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \
1192     c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \
1193     c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \
1194     c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \
1195     c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \
1196     c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \
1197     c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \
1198     c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \
1199     c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \
1200     c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \
1201     c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \
1202     c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \
1203     c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \
1204     c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \
1205     c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \
1206     c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU; \
1207     } while (0)
1208
1209 static av_cold void dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx,
1210                                      int mm_flags)
1211 {
1212 #if HAVE_INLINE_ASM
1213     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1214
1215     c->put_pixels_clamped        = ff_put_pixels_clamped_mmx;
1216     c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx;
1217     c->add_pixels_clamped        = ff_add_pixels_clamped_mmx;
1218
1219     if (!high_bit_depth) {
1220         c->clear_block  = clear_block_mmx;
1221         c->clear_blocks = clear_blocks_mmx;
1222         c->draw_edges   = draw_edges_mmx;
1223
1224         switch (avctx->idct_algo) {
1225         case FF_IDCT_AUTO:
1226         case FF_IDCT_SIMPLEMMX:
1227             c->idct_put              = ff_simple_idct_put_mmx;
1228             c->idct_add              = ff_simple_idct_add_mmx;
1229             c->idct                  = ff_simple_idct_mmx;
1230             c->idct_permutation_type = FF_SIMPLE_IDCT_PERM;
1231             break;
1232         case FF_IDCT_XVIDMMX:
1233             c->idct_put              = ff_idct_xvid_mmx_put;
1234             c->idct_add              = ff_idct_xvid_mmx_add;
1235             c->idct                  = ff_idct_xvid_mmx;
1236             break;
1237         }
1238     }
1239
1240     c->gmc = gmc_mmx;
1241
1242     c->add_bytes = add_bytes_mmx;
1243 #endif /* HAVE_INLINE_ASM */
1244
1245 #if HAVE_YASM
1246     if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
1247         c->h263_v_loop_filter = ff_h263_v_loop_filter_mmx;
1248         c->h263_h_loop_filter = ff_h263_h_loop_filter_mmx;
1249     }
1250
1251     c->vector_clip_int32 = ff_vector_clip_int32_mmx;
1252 #endif /* HAVE_YASM */
1253 }
1254
1255 static av_cold void dsputil_init_mmxext(DSPContext *c, AVCodecContext *avctx,
1256                                         int mm_flags)
1257 {
1258 #if HAVE_INLINE_ASM
1259     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1260
1261     if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX) {
1262         c->idct_put = ff_idct_xvid_mmxext_put;
1263         c->idct_add = ff_idct_xvid_mmxext_add;
1264         c->idct     = ff_idct_xvid_mmxext;
1265     }
1266 #endif /* HAVE_INLINE_ASM */
1267
1268 #if HAVE_MMXEXT_EXTERNAL
1269     SET_QPEL_FUNCS(avg_qpel,        0, 16, mmxext, );
1270     SET_QPEL_FUNCS(avg_qpel,        1,  8, mmxext, );
1271
1272     SET_QPEL_FUNCS(put_qpel,        0, 16, mmxext, );
1273     SET_QPEL_FUNCS(put_qpel,        1,  8, mmxext, );
1274     SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmxext, );
1275     SET_QPEL_FUNCS(put_no_rnd_qpel, 1,  8, mmxext, );
1276
1277     /* slower than cmov version on AMD */
1278     if (!(mm_flags & AV_CPU_FLAG_3DNOW))
1279         c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmxext;
1280
1281     c->scalarproduct_int16          = ff_scalarproduct_int16_mmxext;
1282     c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmxext;
1283
1284     if (avctx->flags & CODEC_FLAG_BITEXACT) {
1285         c->apply_window_int16 = ff_apply_window_int16_mmxext;
1286     } else {
1287         c->apply_window_int16 = ff_apply_window_int16_round_mmxext;
1288     }
1289 #endif /* HAVE_MMXEXT_EXTERNAL */
1290 }
1291
1292 static av_cold void dsputil_init_sse(DSPContext *c, AVCodecContext *avctx,
1293                                      int mm_flags)
1294 {
1295 #if HAVE_INLINE_ASM
1296     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1297
1298     if (!high_bit_depth) {
1299         if (!(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)) {
1300             /* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
1301             c->clear_block  = clear_block_sse;
1302             c->clear_blocks = clear_blocks_sse;
1303         }
1304     }
1305
1306     c->vector_clipf = vector_clipf_sse;
1307 #endif /* HAVE_INLINE_ASM */
1308 }
1309
1310 static av_cold void dsputil_init_sse2(DSPContext *c, AVCodecContext *avctx,
1311                                       int mm_flags)
1312 {
1313 #if HAVE_SSE2_INLINE
1314     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1315
1316     if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX) {
1317         c->idct_put              = ff_idct_xvid_sse2_put;
1318         c->idct_add              = ff_idct_xvid_sse2_add;
1319         c->idct                  = ff_idct_xvid_sse2;
1320         c->idct_permutation_type = FF_SSE2_IDCT_PERM;
1321     }
1322 #endif /* HAVE_SSE2_INLINE */
1323
1324 #if HAVE_SSE2_EXTERNAL
1325     c->scalarproduct_int16          = ff_scalarproduct_int16_sse2;
1326     c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
1327     if (mm_flags & AV_CPU_FLAG_ATOM) {
1328         c->vector_clip_int32 = ff_vector_clip_int32_int_sse2;
1329     } else {
1330         c->vector_clip_int32 = ff_vector_clip_int32_sse2;
1331     }
1332     if (avctx->flags & CODEC_FLAG_BITEXACT) {
1333         c->apply_window_int16 = ff_apply_window_int16_sse2;
1334     } else if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
1335         c->apply_window_int16 = ff_apply_window_int16_round_sse2;
1336     }
1337     c->bswap_buf = ff_bswap32_buf_sse2;
1338 #endif /* HAVE_SSE2_EXTERNAL */
1339 }
1340
1341 static av_cold void dsputil_init_ssse3(DSPContext *c, AVCodecContext *avctx,
1342                                        int mm_flags)
1343 {
1344 #if HAVE_SSSE3_EXTERNAL
1345     c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
1346     if (mm_flags & AV_CPU_FLAG_SSE4) // not really sse4, just slow on Conroe
1347         c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
1348
1349     if (mm_flags & AV_CPU_FLAG_ATOM)
1350         c->apply_window_int16 = ff_apply_window_int16_ssse3_atom;
1351     else
1352         c->apply_window_int16 = ff_apply_window_int16_ssse3;
1353     if (!(mm_flags & (AV_CPU_FLAG_SSE42|AV_CPU_FLAG_3DNOW))) // cachesplit
1354         c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
1355     c->bswap_buf = ff_bswap32_buf_ssse3;
1356 #endif /* HAVE_SSSE3_EXTERNAL */
1357 }
1358
1359 static av_cold void dsputil_init_sse4(DSPContext *c, AVCodecContext *avctx,
1360                                       int mm_flags)
1361 {
1362 #if HAVE_SSE4_EXTERNAL
1363     c->vector_clip_int32 = ff_vector_clip_int32_sse4;
1364 #endif /* HAVE_SSE4_EXTERNAL */
1365 }
1366
1367 av_cold void ff_dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx)
1368 {
1369     int mm_flags = av_get_cpu_flags();
1370
1371 #if HAVE_7REGS && HAVE_INLINE_ASM
1372     if (mm_flags & AV_CPU_FLAG_CMOV)
1373         c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
1374 #endif
1375
1376     if (mm_flags & AV_CPU_FLAG_MMX)
1377         dsputil_init_mmx(c, avctx, mm_flags);
1378
1379     if (mm_flags & AV_CPU_FLAG_MMXEXT)
1380         dsputil_init_mmxext(c, avctx, mm_flags);
1381
1382     if (mm_flags & AV_CPU_FLAG_SSE)
1383         dsputil_init_sse(c, avctx, mm_flags);
1384
1385     if (mm_flags & AV_CPU_FLAG_SSE2)
1386         dsputil_init_sse2(c, avctx, mm_flags);
1387
1388     if (mm_flags & AV_CPU_FLAG_SSSE3)
1389         dsputil_init_ssse3(c, avctx, mm_flags);
1390
1391     if (mm_flags & AV_CPU_FLAG_SSE4)
1392         dsputil_init_sse4(c, avctx, mm_flags);
1393
1394     if (CONFIG_ENCODERS)
1395         ff_dsputilenc_init_mmx(c, avctx);
1396 }