Merge remote-tracking branch 'qatar/master'
[ffmpeg.git] / libavcodec / x86 / dsputil_mmx.c
1 /*
2  * MMX optimized DSP utils
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  *
22  * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
23  */
24
25 #include "libavutil/attributes.h"
26 #include "libavutil/cpu.h"
27 #include "libavutil/x86/asm.h"
28 #include "libavcodec/dsputil.h"
29 #include "libavcodec/h264dsp.h"
30 #include "libavcodec/mpegvideo.h"
31 #include "libavcodec/simple_idct.h"
32 #include "libavcodec/videodsp.h"
33 #include "dsputil_mmx.h"
34 #include "idct_xvid.h"
35 #include "diracdsp_mmx.h"
36
37 //#undef NDEBUG
38 //#include <assert.h>
39
40 /* pixel operations */
41 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_15)   =   0x000F000F000F000FULL;
42 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_17)   = { 0x0011001100110011ULL, 0x0011001100110011ULL };
43 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_20)   =   0x0014001400140014ULL;
44 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_42)   =   0x002A002A002A002AULL;
45 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_53)   =   0x0035003500350035ULL;
46 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_96)   =   0x0060006000600060ULL;
47 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_128)  =   0x0080008000800080ULL;
48 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_255)  =   0x00ff00ff00ff00ffULL;
49 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_512)  = { 0x0200020002000200ULL, 0x0200020002000200ULL };
50 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_1019) = { 0x03FB03FB03FB03FBULL, 0x03FB03FB03FB03FBULL };
51
52 DECLARE_ALIGNED(8,  const uint64_t, ff_pb_3F)   =   0x3F3F3F3F3F3F3F3FULL;
53 DECLARE_ALIGNED(8,  const uint64_t, ff_pb_FC)   =   0xFCFCFCFCFCFCFCFCULL;
54
55 DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 };
56 DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 };
57
58
59 #if HAVE_YASM
60 void ff_put_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
61                               int dstStride, int src1Stride, int h);
62 void ff_put_no_rnd_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1,
63                                      uint8_t *src2, int dstStride,
64                                      int src1Stride, int h);
65 void ff_avg_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
66                               int dstStride, int src1Stride, int h);
67 void ff_put_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
68                                int dstStride, int src1Stride, int h);
69 void ff_avg_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
70                                int dstStride, int src1Stride, int h);
71 void ff_put_no_rnd_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
72                                       int dstStride, int src1Stride, int h);
73
74 static void ff_put_pixels16_mmxext(uint8_t *block, const uint8_t *pixels,
75                                    ptrdiff_t line_size, int h)
76 {
77     ff_put_pixels8_mmxext(block,     pixels,     line_size, h);
78     ff_put_pixels8_mmxext(block + 8, pixels + 8, line_size, h);
79 }
80
81 void ff_put_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
82                                          int dstStride, int srcStride, int h);
83 void ff_avg_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
84                                          int dstStride, int srcStride, int h);
85 void ff_put_no_rnd_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
86                                                  int dstStride, int srcStride,
87                                                  int h);
88 void ff_put_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
89                                         int dstStride, int srcStride, int h);
90 void ff_avg_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
91                                         int dstStride, int srcStride, int h);
92 void ff_put_no_rnd_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
93                                                 int dstStride, int srcStride,
94                                                 int h);
95 void ff_put_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
96                                          int dstStride, int srcStride);
97 void ff_avg_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
98                                          int dstStride, int srcStride);
99 void ff_put_no_rnd_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
100                                                  int dstStride, int srcStride);
101 void ff_put_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
102                                         int dstStride, int srcStride);
103 void ff_avg_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
104                                         int dstStride, int srcStride);
105 void ff_put_no_rnd_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
106                                                 int dstStride, int srcStride);
107 #define ff_put_no_rnd_pixels16_mmxext ff_put_pixels16_mmxext
108 #define ff_put_no_rnd_pixels8_mmxext ff_put_pixels8_mmxext
109 #endif /* HAVE_YASM */
110
111
112 #if HAVE_INLINE_ASM
113
114 /***********************************/
115 /* MMX rounding */
116
117 #define DEF(x, y) x ## _ ## y ## _mmx
118 #define SET_RND  MOVQ_WTWO
119 #define PAVGBP(a, b, c, d, e, f)        PAVGBP_MMX(a, b, c, d, e, f)
120 #define PAVGB(a, b, c, e)               PAVGB_MMX(a, b, c, e)
121 #define OP_AVG(a, b, c, e)              PAVGB_MMX(a, b, c, e)
122
123 #include "rnd_template.c"
124
125 #undef DEF
126 #undef SET_RND
127 #undef PAVGBP
128 #undef PAVGB
129 #undef OP_AVG
130
131 #endif /* HAVE_INLINE_ASM */
132
133
134 #if HAVE_YASM
135
136 /***********************************/
137 /* MMXEXT specific */
138
139 //FIXME the following could be optimized too ...
140 static void ff_avg_pixels16_mmxext(uint8_t *block, const uint8_t *pixels,
141                                    int line_size, int h)
142 {
143     ff_avg_pixels8_mmxext(block,     pixels,     line_size, h);
144     ff_avg_pixels8_mmxext(block + 8, pixels + 8, line_size, h);
145 }
146
147 #endif /* HAVE_YASM */
148
149
150 #if HAVE_INLINE_ASM
151 /***********************************/
152 /* standard MMX */
153
154 void ff_put_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
155                                int line_size)
156 {
157     const int16_t *p;
158     uint8_t *pix;
159
160     /* read the pixels */
161     p   = block;
162     pix = pixels;
163     /* unrolled loop */
164     __asm__ volatile (
165         "movq      (%3), %%mm0          \n\t"
166         "movq     8(%3), %%mm1          \n\t"
167         "movq    16(%3), %%mm2          \n\t"
168         "movq    24(%3), %%mm3          \n\t"
169         "movq    32(%3), %%mm4          \n\t"
170         "movq    40(%3), %%mm5          \n\t"
171         "movq    48(%3), %%mm6          \n\t"
172         "movq    56(%3), %%mm7          \n\t"
173         "packuswb %%mm1, %%mm0          \n\t"
174         "packuswb %%mm3, %%mm2          \n\t"
175         "packuswb %%mm5, %%mm4          \n\t"
176         "packuswb %%mm7, %%mm6          \n\t"
177         "movq     %%mm0, (%0)           \n\t"
178         "movq     %%mm2, (%0, %1)       \n\t"
179         "movq     %%mm4, (%0, %1, 2)    \n\t"
180         "movq     %%mm6, (%0, %2)       \n\t"
181         :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3),
182            "r"(p)
183         : "memory");
184     pix += line_size * 4;
185     p   += 32;
186
187     // if here would be an exact copy of the code above
188     // compiler would generate some very strange code
189     // thus using "r"
190     __asm__ volatile (
191         "movq       (%3), %%mm0         \n\t"
192         "movq      8(%3), %%mm1         \n\t"
193         "movq     16(%3), %%mm2         \n\t"
194         "movq     24(%3), %%mm3         \n\t"
195         "movq     32(%3), %%mm4         \n\t"
196         "movq     40(%3), %%mm5         \n\t"
197         "movq     48(%3), %%mm6         \n\t"
198         "movq     56(%3), %%mm7         \n\t"
199         "packuswb  %%mm1, %%mm0         \n\t"
200         "packuswb  %%mm3, %%mm2         \n\t"
201         "packuswb  %%mm5, %%mm4         \n\t"
202         "packuswb  %%mm7, %%mm6         \n\t"
203         "movq      %%mm0, (%0)          \n\t"
204         "movq      %%mm2, (%0, %1)      \n\t"
205         "movq      %%mm4, (%0, %1, 2)   \n\t"
206         "movq      %%mm6, (%0, %2)      \n\t"
207         :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3), "r"(p)
208         : "memory");
209 }
210
211 #define put_signed_pixels_clamped_mmx_half(off)             \
212     "movq          "#off"(%2), %%mm1        \n\t"           \
213     "movq     16 + "#off"(%2), %%mm2        \n\t"           \
214     "movq     32 + "#off"(%2), %%mm3        \n\t"           \
215     "movq     48 + "#off"(%2), %%mm4        \n\t"           \
216     "packsswb  8 + "#off"(%2), %%mm1        \n\t"           \
217     "packsswb 24 + "#off"(%2), %%mm2        \n\t"           \
218     "packsswb 40 + "#off"(%2), %%mm3        \n\t"           \
219     "packsswb 56 + "#off"(%2), %%mm4        \n\t"           \
220     "paddb              %%mm0, %%mm1        \n\t"           \
221     "paddb              %%mm0, %%mm2        \n\t"           \
222     "paddb              %%mm0, %%mm3        \n\t"           \
223     "paddb              %%mm0, %%mm4        \n\t"           \
224     "movq               %%mm1, (%0)         \n\t"           \
225     "movq               %%mm2, (%0, %3)     \n\t"           \
226     "movq               %%mm3, (%0, %3, 2)  \n\t"           \
227     "movq               %%mm4, (%0, %1)     \n\t"
228
229 void ff_put_signed_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
230                                       int line_size)
231 {
232     x86_reg line_skip = line_size;
233     x86_reg line_skip3;
234
235     __asm__ volatile (
236         "movq "MANGLE(ff_pb_80)", %%mm0     \n\t"
237         "lea         (%3, %3, 2), %1        \n\t"
238         put_signed_pixels_clamped_mmx_half(0)
239         "lea         (%0, %3, 4), %0        \n\t"
240         put_signed_pixels_clamped_mmx_half(64)
241         : "+&r"(pixels), "=&r"(line_skip3)
242         : "r"(block), "r"(line_skip)
243         : "memory");
244 }
245
246 void ff_add_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
247                                int line_size)
248 {
249     const int16_t *p;
250     uint8_t *pix;
251     int i;
252
253     /* read the pixels */
254     p   = block;
255     pix = pixels;
256     MOVQ_ZERO(mm7);
257     i = 4;
258     do {
259         __asm__ volatile (
260             "movq        (%2), %%mm0    \n\t"
261             "movq       8(%2), %%mm1    \n\t"
262             "movq      16(%2), %%mm2    \n\t"
263             "movq      24(%2), %%mm3    \n\t"
264             "movq          %0, %%mm4    \n\t"
265             "movq          %1, %%mm6    \n\t"
266             "movq       %%mm4, %%mm5    \n\t"
267             "punpcklbw  %%mm7, %%mm4    \n\t"
268             "punpckhbw  %%mm7, %%mm5    \n\t"
269             "paddsw     %%mm4, %%mm0    \n\t"
270             "paddsw     %%mm5, %%mm1    \n\t"
271             "movq       %%mm6, %%mm5    \n\t"
272             "punpcklbw  %%mm7, %%mm6    \n\t"
273             "punpckhbw  %%mm7, %%mm5    \n\t"
274             "paddsw     %%mm6, %%mm2    \n\t"
275             "paddsw     %%mm5, %%mm3    \n\t"
276             "packuswb   %%mm1, %%mm0    \n\t"
277             "packuswb   %%mm3, %%mm2    \n\t"
278             "movq       %%mm0, %0       \n\t"
279             "movq       %%mm2, %1       \n\t"
280             : "+m"(*pix), "+m"(*(pix + line_size))
281             : "r"(p)
282             : "memory");
283         pix += line_size * 2;
284         p   += 16;
285     } while (--i);
286 }
287
288 static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels,
289                             ptrdiff_t line_size, int h)
290 {
291     __asm__ volatile (
292         "lea   (%3, %3), %%"REG_a"      \n\t"
293         ".p2align     3                 \n\t"
294         "1:                             \n\t"
295         "movq  (%1    ), %%mm0          \n\t"
296         "movq  (%1, %3), %%mm1          \n\t"
297         "movq     %%mm0, (%2)           \n\t"
298         "movq     %%mm1, (%2, %3)       \n\t"
299         "add  %%"REG_a", %1             \n\t"
300         "add  %%"REG_a", %2             \n\t"
301         "movq  (%1    ), %%mm0          \n\t"
302         "movq  (%1, %3), %%mm1          \n\t"
303         "movq     %%mm0, (%2)           \n\t"
304         "movq     %%mm1, (%2, %3)       \n\t"
305         "add  %%"REG_a", %1             \n\t"
306         "add  %%"REG_a", %2             \n\t"
307         "subl        $4, %0             \n\t"
308         "jnz         1b                 \n\t"
309         : "+g"(h), "+r"(pixels),  "+r"(block)
310         : "r"((x86_reg)line_size)
311         : "%"REG_a, "memory"
312         );
313 }
314
315 static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels,
316                              ptrdiff_t line_size, int h)
317 {
318     __asm__ volatile (
319         "lea   (%3, %3), %%"REG_a"      \n\t"
320         ".p2align     3                 \n\t"
321         "1:                             \n\t"
322         "movq  (%1    ), %%mm0          \n\t"
323         "movq 8(%1    ), %%mm4          \n\t"
324         "movq  (%1, %3), %%mm1          \n\t"
325         "movq 8(%1, %3), %%mm5          \n\t"
326         "movq     %%mm0,  (%2)          \n\t"
327         "movq     %%mm4, 8(%2)          \n\t"
328         "movq     %%mm1,  (%2, %3)      \n\t"
329         "movq     %%mm5, 8(%2, %3)      \n\t"
330         "add  %%"REG_a", %1             \n\t"
331         "add  %%"REG_a", %2             \n\t"
332         "movq  (%1    ), %%mm0          \n\t"
333         "movq 8(%1    ), %%mm4          \n\t"
334         "movq  (%1, %3), %%mm1          \n\t"
335         "movq 8(%1, %3), %%mm5          \n\t"
336         "movq     %%mm0,  (%2)          \n\t"
337         "movq     %%mm4, 8(%2)          \n\t"
338         "movq     %%mm1,  (%2, %3)      \n\t"
339         "movq     %%mm5, 8(%2, %3)      \n\t"
340         "add  %%"REG_a", %1             \n\t"
341         "add  %%"REG_a", %2             \n\t"
342         "subl        $4, %0             \n\t"
343         "jnz         1b                 \n\t"
344         : "+g"(h), "+r"(pixels),  "+r"(block)
345         : "r"((x86_reg)line_size)
346         : "%"REG_a, "memory"
347         );
348 }
349
350 #define CLEAR_BLOCKS(name, n)                           \
351 static void name(int16_t *blocks)                       \
352 {                                                       \
353     __asm__ volatile (                                  \
354         "pxor %%mm7, %%mm7              \n\t"           \
355         "mov     %1,        %%"REG_a"   \n\t"           \
356         "1:                             \n\t"           \
357         "movq %%mm7,   (%0, %%"REG_a")  \n\t"           \
358         "movq %%mm7,  8(%0, %%"REG_a")  \n\t"           \
359         "movq %%mm7, 16(%0, %%"REG_a")  \n\t"           \
360         "movq %%mm7, 24(%0, %%"REG_a")  \n\t"           \
361         "add    $32, %%"REG_a"          \n\t"           \
362         "js      1b                     \n\t"           \
363         :: "r"(((uint8_t *)blocks) + 128 * n),          \
364            "i"(-128 * n)                                \
365         : "%"REG_a                                      \
366         );                                              \
367 }
368 CLEAR_BLOCKS(clear_blocks_mmx, 6)
369 CLEAR_BLOCKS(clear_block_mmx, 1)
370
371 static void clear_block_sse(int16_t *block)
372 {
373     __asm__ volatile (
374         "xorps  %%xmm0, %%xmm0          \n"
375         "movaps %%xmm0,    (%0)         \n"
376         "movaps %%xmm0,  16(%0)         \n"
377         "movaps %%xmm0,  32(%0)         \n"
378         "movaps %%xmm0,  48(%0)         \n"
379         "movaps %%xmm0,  64(%0)         \n"
380         "movaps %%xmm0,  80(%0)         \n"
381         "movaps %%xmm0,  96(%0)         \n"
382         "movaps %%xmm0, 112(%0)         \n"
383         :: "r"(block)
384         : "memory"
385     );
386 }
387
388 static void clear_blocks_sse(int16_t *blocks)
389 {
390     __asm__ volatile (
391         "xorps  %%xmm0, %%xmm0              \n"
392         "mov        %1,         %%"REG_a"   \n"
393         "1:                                 \n"
394         "movaps %%xmm0,    (%0, %%"REG_a")  \n"
395         "movaps %%xmm0,  16(%0, %%"REG_a")  \n"
396         "movaps %%xmm0,  32(%0, %%"REG_a")  \n"
397         "movaps %%xmm0,  48(%0, %%"REG_a")  \n"
398         "movaps %%xmm0,  64(%0, %%"REG_a")  \n"
399         "movaps %%xmm0,  80(%0, %%"REG_a")  \n"
400         "movaps %%xmm0,  96(%0, %%"REG_a")  \n"
401         "movaps %%xmm0, 112(%0, %%"REG_a")  \n"
402         "add      $128,         %%"REG_a"   \n"
403         "js         1b                      \n"
404         :: "r"(((uint8_t *)blocks) + 128 * 6),
405            "i"(-128 * 6)
406         : "%"REG_a
407     );
408 }
409
410 static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w)
411 {
412     x86_reg i = 0;
413     __asm__ volatile (
414         "jmp          2f                \n\t"
415         "1:                             \n\t"
416         "movq   (%1, %0), %%mm0         \n\t"
417         "movq   (%2, %0), %%mm1         \n\t"
418         "paddb     %%mm0, %%mm1         \n\t"
419         "movq      %%mm1, (%2, %0)      \n\t"
420         "movq  8(%1, %0), %%mm0         \n\t"
421         "movq  8(%2, %0), %%mm1         \n\t"
422         "paddb     %%mm0, %%mm1         \n\t"
423         "movq      %%mm1, 8(%2, %0)     \n\t"
424         "add         $16, %0            \n\t"
425         "2:                             \n\t"
426         "cmp          %3, %0            \n\t"
427         "js           1b                \n\t"
428         : "+r"(i)
429         : "r"(src), "r"(dst), "r"((x86_reg)w - 15)
430     );
431     for ( ; i < w; i++)
432         dst[i + 0] += src[i + 0];
433 }
434
435 #if HAVE_7REGS
436 static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top,
437                                             const uint8_t *diff, int w,
438                                             int *left, int *left_top)
439 {
440     x86_reg w2 = -w;
441     x86_reg x;
442     int l  = *left     & 0xff;
443     int tl = *left_top & 0xff;
444     int t;
445     __asm__ volatile (
446         "mov          %7, %3            \n"
447         "1:                             \n"
448         "movzbl (%3, %4), %2            \n"
449         "mov          %2, %k3           \n"
450         "sub         %b1, %b3           \n"
451         "add         %b0, %b3           \n"
452         "mov          %2, %1            \n"
453         "cmp          %0, %2            \n"
454         "cmovg        %0, %2            \n"
455         "cmovg        %1, %0            \n"
456         "cmp         %k3, %0            \n"
457         "cmovg       %k3, %0            \n"
458         "mov          %7, %3            \n"
459         "cmp          %2, %0            \n"
460         "cmovl        %2, %0            \n"
461         "add    (%6, %4), %b0           \n"
462         "mov         %b0, (%5, %4)      \n"
463         "inc          %4                \n"
464         "jl           1b                \n"
465         : "+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
466         : "r"(dst + w), "r"(diff + w), "rm"(top + w)
467     );
468     *left     = l;
469     *left_top = tl;
470 }
471 #endif
472
473 /* Draw the edges of width 'w' of an image of size width, height
474  * this MMX version can only handle w == 8 || w == 16. */
475 static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height,
476                            int w, int h, int sides)
477 {
478     uint8_t *ptr, *last_line;
479     int i;
480
481     last_line = buf + (height - 1) * wrap;
482     /* left and right */
483     ptr = buf;
484     if (w == 8) {
485         __asm__ volatile (
486             "1:                             \n\t"
487             "movd            (%0), %%mm0    \n\t"
488             "punpcklbw      %%mm0, %%mm0    \n\t"
489             "punpcklwd      %%mm0, %%mm0    \n\t"
490             "punpckldq      %%mm0, %%mm0    \n\t"
491             "movq           %%mm0, -8(%0)   \n\t"
492             "movq      -8(%0, %2), %%mm1    \n\t"
493             "punpckhbw      %%mm1, %%mm1    \n\t"
494             "punpckhwd      %%mm1, %%mm1    \n\t"
495             "punpckhdq      %%mm1, %%mm1    \n\t"
496             "movq           %%mm1, (%0, %2) \n\t"
497             "add               %1, %0       \n\t"
498             "cmp               %3, %0       \n\t"
499             "jb                1b           \n\t"
500             : "+r"(ptr)
501             : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
502             );
503     } else if(w==16){
504         __asm__ volatile (
505             "1:                                 \n\t"
506             "movd            (%0), %%mm0        \n\t"
507             "punpcklbw      %%mm0, %%mm0        \n\t"
508             "punpcklwd      %%mm0, %%mm0        \n\t"
509             "punpckldq      %%mm0, %%mm0        \n\t"
510             "movq           %%mm0, -8(%0)       \n\t"
511             "movq           %%mm0, -16(%0)      \n\t"
512             "movq      -8(%0, %2), %%mm1        \n\t"
513             "punpckhbw      %%mm1, %%mm1        \n\t"
514             "punpckhwd      %%mm1, %%mm1        \n\t"
515             "punpckhdq      %%mm1, %%mm1        \n\t"
516             "movq           %%mm1,  (%0, %2)    \n\t"
517             "movq           %%mm1, 8(%0, %2)    \n\t"
518             "add               %1, %0           \n\t"
519             "cmp               %3, %0           \n\t"
520             "jb                1b               \n\t"
521             : "+r"(ptr)
522             : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
523             );
524     } else {
525         av_assert1(w == 4);
526         __asm__ volatile (
527             "1:                             \n\t"
528             "movd            (%0), %%mm0    \n\t"
529             "punpcklbw      %%mm0, %%mm0    \n\t"
530             "punpcklwd      %%mm0, %%mm0    \n\t"
531             "movd           %%mm0, -4(%0)   \n\t"
532             "movd      -4(%0, %2), %%mm1    \n\t"
533             "punpcklbw      %%mm1, %%mm1    \n\t"
534             "punpckhwd      %%mm1, %%mm1    \n\t"
535             "punpckhdq      %%mm1, %%mm1    \n\t"
536             "movd           %%mm1, (%0, %2) \n\t"
537             "add               %1, %0       \n\t"
538             "cmp               %3, %0       \n\t"
539             "jb                1b           \n\t"
540             : "+r"(ptr)
541             : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
542             );
543     }
544
545     /* top and bottom (and hopefully also the corners) */
546     if (sides & EDGE_TOP) {
547         for (i = 0; i < h; i += 4) {
548             ptr = buf - (i + 1) * wrap - w;
549             __asm__ volatile (
550                 "1:                             \n\t"
551                 "movq (%1, %0), %%mm0           \n\t"
552                 "movq    %%mm0, (%0)            \n\t"
553                 "movq    %%mm0, (%0, %2)        \n\t"
554                 "movq    %%mm0, (%0, %2, 2)     \n\t"
555                 "movq    %%mm0, (%0, %3)        \n\t"
556                 "add        $8, %0              \n\t"
557                 "cmp        %4, %0              \n\t"
558                 "jb         1b                  \n\t"
559                 : "+r"(ptr)
560                 : "r"((x86_reg)buf - (x86_reg)ptr - w), "r"((x86_reg) -wrap),
561                   "r"((x86_reg) -wrap * 3), "r"(ptr + width + 2 * w)
562                 );
563         }
564     }
565
566     if (sides & EDGE_BOTTOM) {
567         for (i = 0; i < h; i += 4) {
568             ptr = last_line + (i + 1) * wrap - w;
569             __asm__ volatile (
570                 "1:                             \n\t"
571                 "movq (%1, %0), %%mm0           \n\t"
572                 "movq    %%mm0, (%0)            \n\t"
573                 "movq    %%mm0, (%0, %2)        \n\t"
574                 "movq    %%mm0, (%0, %2, 2)     \n\t"
575                 "movq    %%mm0, (%0, %3)        \n\t"
576                 "add        $8, %0              \n\t"
577                 "cmp        %4, %0              \n\t"
578                 "jb         1b                  \n\t"
579                 : "+r"(ptr)
580                 : "r"((x86_reg)last_line - (x86_reg)ptr - w),
581                   "r"((x86_reg)wrap), "r"((x86_reg)wrap * 3),
582                   "r"(ptr + width + 2 * w)
583                 );
584         }
585     }
586 }
587 #endif /* HAVE_INLINE_ASM */
588
589
590 #if HAVE_YASM
591 #define QPEL_OP(OPNAME, ROUNDER, RND, MMX)                              \
592 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src,   \
593                                           ptrdiff_t stride)             \
594 {                                                                       \
595     ff_ ## OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);              \
596 }                                                                       \
597                                                                         \
598 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src,    \
599                                          ptrdiff_t stride)              \
600 {                                                                       \
601     uint64_t temp[8];                                                   \
602     uint8_t * const half = (uint8_t*)temp;                              \
603     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8,        \
604                                                    stride, 8);          \
605     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src, half,                 \
606                                         stride, stride, 8);             \
607 }                                                                       \
608                                                                         \
609 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src,    \
610                                          ptrdiff_t stride)              \
611 {                                                                       \
612     ff_ ## OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride,    \
613                                                    stride, 8);          \
614 }                                                                       \
615                                                                         \
616 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src,    \
617                                          ptrdiff_t stride)              \
618 {                                                                       \
619     uint64_t temp[8];                                                   \
620     uint8_t * const half = (uint8_t*)temp;                              \
621     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8,        \
622                                                    stride, 8);          \
623     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src + 1, half, stride,     \
624                                         stride, 8);                     \
625 }                                                                       \
626                                                                         \
627 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src,    \
628                                          ptrdiff_t stride)              \
629 {                                                                       \
630     uint64_t temp[8];                                                   \
631     uint8_t * const half = (uint8_t*)temp;                              \
632     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src,           \
633                                                    8, stride);          \
634     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src, half,                 \
635                                         stride, stride, 8);             \
636 }                                                                       \
637                                                                         \
638 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src,    \
639                                          ptrdiff_t stride)              \
640 {                                                                       \
641     ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src,            \
642                                                    stride, stride);     \
643 }                                                                       \
644                                                                         \
645 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src,    \
646                                          ptrdiff_t stride)              \
647 {                                                                       \
648     uint64_t temp[8];                                                   \
649     uint8_t * const half = (uint8_t*)temp;                              \
650     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src,           \
651                                                    8, stride);          \
652     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src + stride, half, stride,\
653                                         stride, 8);                     \
654 }                                                                       \
655                                                                         \
656 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src,    \
657                                          ptrdiff_t stride)              \
658 {                                                                       \
659     uint64_t half[8 + 9];                                               \
660     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
661     uint8_t * const halfHV = ((uint8_t*)half);                          \
662     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
663                                                    stride, 9);          \
664     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8,           \
665                                         stride, 9);                     \
666     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
667     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV,             \
668                                         stride, 8, 8);                  \
669 }                                                                       \
670                                                                         \
671 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src,    \
672                                          ptrdiff_t stride)              \
673 {                                                                       \
674     uint64_t half[8 + 9];                                               \
675     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
676     uint8_t * const halfHV = ((uint8_t*)half);                          \
677     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
678                                                    stride, 9);          \
679     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8,       \
680                                         stride, 9);                     \
681     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
682     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV,             \
683                                         stride, 8, 8);                  \
684 }                                                                       \
685                                                                         \
686 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src,    \
687                                          ptrdiff_t stride)              \
688 {                                                                       \
689     uint64_t half[8 + 9];                                               \
690     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
691     uint8_t * const halfHV = ((uint8_t*)half);                          \
692     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
693                                                    stride, 9);          \
694     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8,           \
695                                         stride, 9);                     \
696     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
697     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV,         \
698                                         stride, 8, 8);                  \
699 }                                                                       \
700                                                                         \
701 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src,    \
702                                          ptrdiff_t stride)              \
703 {                                                                       \
704     uint64_t half[8 + 9];                                               \
705     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
706     uint8_t * const halfHV = ((uint8_t*)half);                          \
707     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
708                                                    stride, 9);          \
709     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8,       \
710                                         stride, 9);                     \
711     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
712     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV,         \
713                                         stride, 8, 8);                  \
714 }                                                                       \
715                                                                         \
716 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src,    \
717                                          ptrdiff_t stride)              \
718 {                                                                       \
719     uint64_t half[8 + 9];                                               \
720     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
721     uint8_t * const halfHV = ((uint8_t*)half);                          \
722     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
723                                                    stride, 9);          \
724     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
725     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV,             \
726                                         stride, 8, 8);                  \
727 }                                                                       \
728                                                                         \
729 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src,    \
730                                          ptrdiff_t stride)              \
731 {                                                                       \
732     uint64_t half[8 + 9];                                               \
733     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
734     uint8_t * const halfHV = ((uint8_t*)half);                          \
735     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
736                                                    stride, 9);          \
737     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
738     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV,         \
739                                         stride, 8, 8);                  \
740 }                                                                       \
741                                                                         \
742 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src,    \
743                                          ptrdiff_t stride)              \
744 {                                                                       \
745     uint64_t half[8 + 9];                                               \
746     uint8_t * const halfH = ((uint8_t*)half);                           \
747     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
748                                                    stride, 9);          \
749     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH,              \
750                                         8, stride, 9);                  \
751     ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH,          \
752                                                    stride, 8);          \
753 }                                                                       \
754                                                                         \
755 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src,    \
756                                          ptrdiff_t stride)              \
757 {                                                                       \
758     uint64_t half[8 + 9];                                               \
759     uint8_t * const halfH = ((uint8_t*)half);                           \
760     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
761                                                    stride, 9);          \
762     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8,       \
763                                         stride, 9);                     \
764     ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH,          \
765                                                    stride, 8);          \
766 }                                                                       \
767                                                                         \
768 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src,    \
769                                          ptrdiff_t stride)              \
770 {                                                                       \
771     uint64_t half[9];                                                   \
772     uint8_t * const halfH = ((uint8_t*)half);                           \
773     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
774                                                    stride, 9);          \
775     ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH,          \
776                                                    stride, 8);          \
777 }                                                                       \
778                                                                         \
779 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src,  \
780                                            ptrdiff_t stride)            \
781 {                                                                       \
782     ff_ ## OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);            \
783 }                                                                       \
784                                                                         \
785 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src,   \
786                                           ptrdiff_t stride)             \
787 {                                                                       \
788     uint64_t temp[32];                                                  \
789     uint8_t * const half = (uint8_t*)temp;                              \
790     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16,      \
791                                                     stride, 16);        \
792     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride,        \
793                                          stride, 16);                   \
794 }                                                                       \
795                                                                         \
796 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src,   \
797                                           ptrdiff_t stride)             \
798 {                                                                       \
799     ff_ ## OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src,           \
800                                                     stride, stride, 16);\
801 }                                                                       \
802                                                                         \
803 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src,   \
804                                           ptrdiff_t stride)             \
805 {                                                                       \
806     uint64_t temp[32];                                                  \
807     uint8_t * const half = (uint8_t*)temp;                              \
808     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16,      \
809                                                     stride, 16);        \
810     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src + 1, half,            \
811                                          stride, stride, 16);           \
812 }                                                                       \
813                                                                         \
814 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src,   \
815                                           ptrdiff_t stride)             \
816 {                                                                       \
817     uint64_t temp[32];                                                  \
818     uint8_t * const half = (uint8_t*)temp;                              \
819     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16,      \
820                                                     stride);            \
821     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride,        \
822                                          stride, 16);                   \
823 }                                                                       \
824                                                                         \
825 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src,   \
826                                           ptrdiff_t stride)             \
827 {                                                                       \
828     ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src,           \
829                                                     stride, stride);    \
830 }                                                                       \
831                                                                         \
832 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src,   \
833                                           ptrdiff_t stride)             \
834 {                                                                       \
835     uint64_t temp[32];                                                  \
836     uint8_t * const half = (uint8_t*)temp;                              \
837     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16,      \
838                                                     stride);            \
839     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half,         \
840                                          stride, stride, 16);           \
841 }                                                                       \
842                                                                         \
843 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src,   \
844                                           ptrdiff_t stride)             \
845 {                                                                       \
846     uint64_t half[16 * 2 + 17 * 2];                                     \
847     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
848     uint8_t * const halfHV = ((uint8_t*)half);                          \
849     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
850                                                     stride, 17);        \
851     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16,         \
852                                          stride, 17);                   \
853     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
854                                                     16, 16);            \
855     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV,            \
856                                          stride, 16, 16);               \
857 }                                                                       \
858                                                                         \
859 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src,   \
860                                           ptrdiff_t stride)             \
861 {                                                                       \
862     uint64_t half[16 * 2 + 17 * 2];                                     \
863     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
864     uint8_t * const halfHV = ((uint8_t*)half);                          \
865     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
866                                                     stride, 17);        \
867     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16,     \
868                                          stride, 17);                   \
869     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
870                                                     16, 16);            \
871     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV,            \
872                                          stride, 16, 16);               \
873 }                                                                       \
874                                                                         \
875 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src,   \
876                                           ptrdiff_t stride)             \
877 {                                                                       \
878     uint64_t half[16 * 2 + 17 * 2];                                     \
879     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
880     uint8_t * const halfHV = ((uint8_t*)half);                          \
881     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
882                                                     stride, 17);        \
883     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16,         \
884                                          stride, 17);                   \
885     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
886                                                     16, 16);            \
887     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV,       \
888                                          stride, 16, 16);               \
889 }                                                                       \
890                                                                         \
891 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src,   \
892                                           ptrdiff_t stride)             \
893 {                                                                       \
894     uint64_t half[16 * 2 + 17 * 2];                                     \
895     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
896     uint8_t * const halfHV = ((uint8_t*)half);                          \
897     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
898                                                     stride, 17);        \
899     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16,     \
900                                          stride, 17);                   \
901     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
902                                                     16, 16);            \
903     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV,       \
904                                          stride, 16, 16);               \
905 }                                                                       \
906                                                                         \
907 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src,   \
908                                           ptrdiff_t stride)             \
909 {                                                                       \
910     uint64_t half[16 * 2 + 17 * 2];                                     \
911     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
912     uint8_t * const halfHV = ((uint8_t*)half);                          \
913     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
914                                                     stride, 17);        \
915     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
916                                                     16, 16);            \
917     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV,            \
918                                          stride, 16, 16);               \
919 }                                                                       \
920                                                                         \
921 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src,   \
922                                           ptrdiff_t stride)             \
923 {                                                                       \
924     uint64_t half[16 * 2 + 17 * 2];                                     \
925     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
926     uint8_t * const halfHV = ((uint8_t*)half);                          \
927     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
928                                                     stride, 17);        \
929     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
930                                                     16, 16);            \
931     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV,       \
932                                          stride, 16, 16);               \
933 }                                                                       \
934                                                                         \
935 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src,   \
936                                           ptrdiff_t stride)             \
937 {                                                                       \
938     uint64_t half[17 * 2];                                              \
939     uint8_t * const halfH = ((uint8_t*)half);                           \
940     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
941                                                     stride, 17);        \
942     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16,         \
943                                          stride, 17);                   \
944     ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH,         \
945                                                     stride, 16);        \
946 }                                                                       \
947                                                                         \
948 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src,   \
949                                           ptrdiff_t stride)             \
950 {                                                                       \
951     uint64_t half[17 * 2];                                              \
952     uint8_t * const halfH = ((uint8_t*)half);                           \
953     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
954                                                     stride, 17);        \
955     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16,     \
956                                          stride, 17);                   \
957     ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH,         \
958                                                     stride, 16);        \
959 }                                                                       \
960                                                                         \
961 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src,   \
962                                           ptrdiff_t stride)             \
963 {                                                                       \
964     uint64_t half[17 * 2];                                              \
965     uint8_t * const halfH = ((uint8_t*)half);                           \
966     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
967                                                     stride, 17);        \
968     ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH,         \
969                                                     stride, 16);        \
970 }
971
972 QPEL_OP(put_,          ff_pw_16, _,        mmxext)
973 QPEL_OP(avg_,          ff_pw_16, _,        mmxext)
974 QPEL_OP(put_no_rnd_,   ff_pw_15, _no_rnd_, mmxext)
975 #endif /* HAVE_YASM */
976
977
978 #if HAVE_INLINE_ASM
979 void ff_put_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
980 {
981   put_pixels8_xy2_mmx(dst, src, stride, 8);
982 }
983 void ff_put_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
984 {
985   put_pixels16_xy2_mmx(dst, src, stride, 16);
986 }
987 void ff_avg_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
988 {
989   avg_pixels8_xy2_mmx(dst, src, stride, 8);
990 }
991 void ff_avg_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
992 {
993   avg_pixels16_xy2_mmx(dst, src, stride, 16);
994 }
995
996 typedef void emulated_edge_mc_func(uint8_t *dst, const uint8_t *src,
997                                    ptrdiff_t linesize, int block_w, int block_h,
998                                    int src_x, int src_y, int w, int h);
999
1000 static av_always_inline void gmc(uint8_t *dst, uint8_t *src,
1001                                  int stride, int h, int ox, int oy,
1002                                  int dxx, int dxy, int dyx, int dyy,
1003                                  int shift, int r, int width, int height,
1004                                  emulated_edge_mc_func *emu_edge_fn)
1005 {
1006     const int w    = 8;
1007     const int ix   = ox  >> (16 + shift);
1008     const int iy   = oy  >> (16 + shift);
1009     const int oxs  = ox  >> 4;
1010     const int oys  = oy  >> 4;
1011     const int dxxs = dxx >> 4;
1012     const int dxys = dxy >> 4;
1013     const int dyxs = dyx >> 4;
1014     const int dyys = dyy >> 4;
1015     const uint16_t r4[4]   = { r, r, r, r };
1016     const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys };
1017     const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys };
1018     const uint64_t shift2 = 2 * shift;
1019 #define MAX_STRIDE 4096U
1020 #define MAX_H 8U
1021     uint8_t edge_buf[(MAX_H + 1) * MAX_STRIDE];
1022     int x, y;
1023
1024     const int dxw = (dxx - (1 << (16 + shift))) * (w - 1);
1025     const int dyh = (dyy - (1 << (16 + shift))) * (h - 1);
1026     const int dxh = dxy * (h - 1);
1027     const int dyw = dyx * (w - 1);
1028     int need_emu =  (unsigned)ix >= width  - w ||
1029                     (unsigned)iy >= height - h;
1030
1031     if ( // non-constant fullpel offset (3% of blocks)
1032         ((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) |
1033          (oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift)
1034         // uses more than 16 bits of subpel mv (only at huge resolution)
1035         || (dxx | dxy | dyx | dyy) & 15
1036         || (need_emu && (h > MAX_H || stride > MAX_STRIDE))) {
1037         // FIXME could still use mmx for some of the rows
1038         ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy,
1039                  shift, r, width, height);
1040         return;
1041     }
1042
1043     src += ix + iy * stride;
1044     if (need_emu) {
1045         emu_edge_fn(edge_buf, src, stride, w + 1, h + 1, ix, iy, width, height);
1046         src = edge_buf;
1047     }
1048
1049     __asm__ volatile (
1050         "movd         %0, %%mm6         \n\t"
1051         "pxor      %%mm7, %%mm7         \n\t"
1052         "punpcklwd %%mm6, %%mm6         \n\t"
1053         "punpcklwd %%mm6, %%mm6         \n\t"
1054         :: "r"(1<<shift)
1055     );
1056
1057     for (x = 0; x < w; x += 4) {
1058         uint16_t dx4[4] = { oxs - dxys + dxxs * (x + 0),
1059                             oxs - dxys + dxxs * (x + 1),
1060                             oxs - dxys + dxxs * (x + 2),
1061                             oxs - dxys + dxxs * (x + 3) };
1062         uint16_t dy4[4] = { oys - dyys + dyxs * (x + 0),
1063                             oys - dyys + dyxs * (x + 1),
1064                             oys - dyys + dyxs * (x + 2),
1065                             oys - dyys + dyxs * (x + 3) };
1066
1067         for (y = 0; y < h; y++) {
1068             __asm__ volatile (
1069                 "movq      %0, %%mm4    \n\t"
1070                 "movq      %1, %%mm5    \n\t"
1071                 "paddw     %2, %%mm4    \n\t"
1072                 "paddw     %3, %%mm5    \n\t"
1073                 "movq   %%mm4, %0       \n\t"
1074                 "movq   %%mm5, %1       \n\t"
1075                 "psrlw    $12, %%mm4    \n\t"
1076                 "psrlw    $12, %%mm5    \n\t"
1077                 : "+m"(*dx4), "+m"(*dy4)
1078                 : "m"(*dxy4), "m"(*dyy4)
1079             );
1080
1081             __asm__ volatile (
1082                 "movq      %%mm6, %%mm2 \n\t"
1083                 "movq      %%mm6, %%mm1 \n\t"
1084                 "psubw     %%mm4, %%mm2 \n\t"
1085                 "psubw     %%mm5, %%mm1 \n\t"
1086                 "movq      %%mm2, %%mm0 \n\t"
1087                 "movq      %%mm4, %%mm3 \n\t"
1088                 "pmullw    %%mm1, %%mm0 \n\t" // (s - dx) * (s - dy)
1089                 "pmullw    %%mm5, %%mm3 \n\t" // dx * dy
1090                 "pmullw    %%mm5, %%mm2 \n\t" // (s - dx) * dy
1091                 "pmullw    %%mm4, %%mm1 \n\t" // dx * (s - dy)
1092
1093                 "movd         %4, %%mm5 \n\t"
1094                 "movd         %3, %%mm4 \n\t"
1095                 "punpcklbw %%mm7, %%mm5 \n\t"
1096                 "punpcklbw %%mm7, %%mm4 \n\t"
1097                 "pmullw    %%mm5, %%mm3 \n\t" // src[1, 1] * dx * dy
1098                 "pmullw    %%mm4, %%mm2 \n\t" // src[0, 1] * (s - dx) * dy
1099
1100                 "movd         %2, %%mm5 \n\t"
1101                 "movd         %1, %%mm4 \n\t"
1102                 "punpcklbw %%mm7, %%mm5 \n\t"
1103                 "punpcklbw %%mm7, %%mm4 \n\t"
1104                 "pmullw    %%mm5, %%mm1 \n\t" // src[1, 0] * dx * (s - dy)
1105                 "pmullw    %%mm4, %%mm0 \n\t" // src[0, 0] * (s - dx) * (s - dy)
1106                 "paddw        %5, %%mm1 \n\t"
1107                 "paddw     %%mm3, %%mm2 \n\t"
1108                 "paddw     %%mm1, %%mm0 \n\t"
1109                 "paddw     %%mm2, %%mm0 \n\t"
1110
1111                 "psrlw        %6, %%mm0 \n\t"
1112                 "packuswb  %%mm0, %%mm0 \n\t"
1113                 "movd      %%mm0, %0    \n\t"
1114
1115                 : "=m"(dst[x + y * stride])
1116                 : "m"(src[0]), "m"(src[1]),
1117                   "m"(src[stride]), "m"(src[stride + 1]),
1118                   "m"(*r4), "m"(shift2)
1119             );
1120             src += stride;
1121         }
1122         src += 4 - h * stride;
1123     }
1124 }
1125
1126
1127 #if CONFIG_VIDEODSP
1128 #if HAVE_YASM
1129 #if ARCH_X86_32
1130 static void gmc_mmx(uint8_t *dst, uint8_t *src,
1131                     int stride, int h, int ox, int oy,
1132                     int dxx, int dxy, int dyx, int dyy,
1133                     int shift, int r, int width, int height)
1134 {
1135     gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
1136         width, height, &ff_emulated_edge_mc_8);
1137 }
1138 #endif
1139 static void gmc_sse(uint8_t *dst, uint8_t *src,
1140                     int stride, int h, int ox, int oy,
1141                     int dxx, int dxy, int dyx, int dyy,
1142                     int shift, int r, int width, int height)
1143 {
1144     gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
1145         width, height, &ff_emulated_edge_mc_8);
1146 }
1147 #else
1148 static void gmc_mmx(uint8_t *dst, uint8_t *src,
1149                     int stride, int h, int ox, int oy,
1150                     int dxx, int dxy, int dyx, int dyy,
1151                     int shift, int r, int width, int height)
1152 {
1153     gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
1154         width, height, &ff_emulated_edge_mc_8);
1155 }
1156 #endif
1157 #endif
1158
1159 /* CAVS-specific */
1160 void ff_put_cavs_qpel8_mc00_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1161 {
1162     put_pixels8_mmx(dst, src, stride, 8);
1163 }
1164
1165 void ff_avg_cavs_qpel8_mc00_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1166 {
1167     avg_pixels8_mmx(dst, src, stride, 8);
1168 }
1169
1170 void ff_put_cavs_qpel16_mc00_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1171 {
1172     put_pixels16_mmx(dst, src, stride, 16);
1173 }
1174
1175 void ff_avg_cavs_qpel16_mc00_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1176 {
1177     avg_pixels16_mmx(dst, src, stride, 16);
1178 }
1179
1180 /* VC-1-specific */
1181 void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src,
1182                                ptrdiff_t stride, int rnd)
1183 {
1184     put_pixels8_mmx(dst, src, stride, 8);
1185 }
1186
1187 #if CONFIG_DIRAC_DECODER
1188 #define DIRAC_PIXOP(OPNAME2, OPNAME, EXT)\
1189 void ff_ ## OPNAME2 ## _dirac_pixels8_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
1190 {\
1191     if (h&3)\
1192         ff_ ## OPNAME2 ## _dirac_pixels8_c(dst, src, stride, h);\
1193     else\
1194         OPNAME ## _pixels8_ ## EXT(dst, src[0], stride, h);\
1195 }\
1196 void ff_ ## OPNAME2 ## _dirac_pixels16_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
1197 {\
1198     if (h&3)\
1199         ff_ ## OPNAME2 ## _dirac_pixels16_c(dst, src, stride, h);\
1200     else\
1201         OPNAME ## _pixels16_ ## EXT(dst, src[0], stride, h);\
1202 }\
1203 void ff_ ## OPNAME2 ## _dirac_pixels32_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
1204 {\
1205     if (h&3) {\
1206         ff_ ## OPNAME2 ## _dirac_pixels32_c(dst, src, stride, h);\
1207     } else {\
1208         OPNAME ## _pixels16_ ## EXT(dst   , src[0]   , stride, h);\
1209         OPNAME ## _pixels16_ ## EXT(dst+16, src[0]+16, stride, h);\
1210     }\
1211 }
1212
1213 #if HAVE_MMX_INLINE
1214 DIRAC_PIXOP(put, put, mmx)
1215 DIRAC_PIXOP(avg, avg, mmx)
1216 #endif
1217
1218 #if HAVE_YASM
1219 DIRAC_PIXOP(avg, ff_avg, mmxext)
1220
1221 void ff_put_dirac_pixels16_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
1222 {
1223     if (h&3)
1224         ff_put_dirac_pixels16_c(dst, src, stride, h);
1225     else
1226     ff_put_pixels16_sse2(dst, src[0], stride, h);
1227 }
1228 void ff_avg_dirac_pixels16_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
1229 {
1230     if (h&3)
1231         ff_avg_dirac_pixels16_c(dst, src, stride, h);
1232     else
1233     ff_avg_pixels16_sse2(dst, src[0], stride, h);
1234 }
1235 void ff_put_dirac_pixels32_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
1236 {
1237     if (h&3) {
1238         ff_put_dirac_pixels32_c(dst, src, stride, h);
1239     } else {
1240     ff_put_pixels16_sse2(dst   , src[0]   , stride, h);
1241     ff_put_pixels16_sse2(dst+16, src[0]+16, stride, h);
1242     }
1243 }
1244 void ff_avg_dirac_pixels32_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
1245 {
1246     if (h&3) {
1247         ff_avg_dirac_pixels32_c(dst, src, stride, h);
1248     } else {
1249     ff_avg_pixels16_sse2(dst   , src[0]   , stride, h);
1250     ff_avg_pixels16_sse2(dst+16, src[0]+16, stride, h);
1251     }
1252 }
1253 #endif
1254 #endif
1255
1256 static void vector_clipf_sse(float *dst, const float *src,
1257                              float min, float max, int len)
1258 {
1259     x86_reg i = (len - 16) * 4;
1260     __asm__ volatile (
1261         "movss          %3, %%xmm4      \n\t"
1262         "movss          %4, %%xmm5      \n\t"
1263         "shufps $0, %%xmm4, %%xmm4      \n\t"
1264         "shufps $0, %%xmm5, %%xmm5      \n\t"
1265         "1:                             \n\t"
1266         "movaps   (%2, %0), %%xmm0      \n\t" // 3/1 on intel
1267         "movaps 16(%2, %0), %%xmm1      \n\t"
1268         "movaps 32(%2, %0), %%xmm2      \n\t"
1269         "movaps 48(%2, %0), %%xmm3      \n\t"
1270         "maxps      %%xmm4, %%xmm0      \n\t"
1271         "maxps      %%xmm4, %%xmm1      \n\t"
1272         "maxps      %%xmm4, %%xmm2      \n\t"
1273         "maxps      %%xmm4, %%xmm3      \n\t"
1274         "minps      %%xmm5, %%xmm0      \n\t"
1275         "minps      %%xmm5, %%xmm1      \n\t"
1276         "minps      %%xmm5, %%xmm2      \n\t"
1277         "minps      %%xmm5, %%xmm3      \n\t"
1278         "movaps     %%xmm0,   (%1, %0)  \n\t"
1279         "movaps     %%xmm1, 16(%1, %0)  \n\t"
1280         "movaps     %%xmm2, 32(%1, %0)  \n\t"
1281         "movaps     %%xmm3, 48(%1, %0)  \n\t"
1282         "sub           $64, %0          \n\t"
1283         "jge            1b              \n\t"
1284         : "+&r"(i)
1285         : "r"(dst), "r"(src), "m"(min), "m"(max)
1286         : "memory"
1287     );
1288 }
1289
1290 #endif /* HAVE_INLINE_ASM */
1291
1292 void ff_h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale);
1293 void ff_h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale);
1294
1295 int32_t ff_scalarproduct_int16_mmxext(const int16_t *v1, const int16_t *v2,
1296                                       int order);
1297 int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2,
1298                                     int order);
1299 int32_t ff_scalarproduct_and_madd_int16_mmxext(int16_t *v1, const int16_t *v2,
1300                                                const int16_t *v3,
1301                                                int order, int mul);
1302 int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2,
1303                                              const int16_t *v3,
1304                                              int order, int mul);
1305 int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2,
1306                                               const int16_t *v3,
1307                                               int order, int mul);
1308
1309 void ff_apply_window_int16_round_mmxext(int16_t *output, const int16_t *input,
1310                                         const int16_t *window, unsigned int len);
1311 void ff_apply_window_int16_round_sse2(int16_t *output, const int16_t *input,
1312                                       const int16_t *window, unsigned int len);
1313 void ff_apply_window_int16_mmxext(int16_t *output, const int16_t *input,
1314                                   const int16_t *window, unsigned int len);
1315 void ff_apply_window_int16_sse2(int16_t *output, const int16_t *input,
1316                                 const int16_t *window, unsigned int len);
1317 void ff_apply_window_int16_ssse3(int16_t *output, const int16_t *input,
1318                                  const int16_t *window, unsigned int len);
1319 void ff_apply_window_int16_ssse3_atom(int16_t *output, const int16_t *input,
1320                                       const int16_t *window, unsigned int len);
1321
1322 void ff_bswap32_buf_ssse3(uint32_t *dst, const uint32_t *src, int w);
1323 void ff_bswap32_buf_sse2(uint32_t *dst, const uint32_t *src, int w);
1324
1325 void ff_add_hfyu_median_prediction_mmxext(uint8_t *dst, const uint8_t *top,
1326                                           const uint8_t *diff, int w,
1327                                           int *left, int *left_top);
1328 int  ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src,
1329                                        int w, int left);
1330 int  ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src,
1331                                       int w, int left);
1332
1333 void ff_vector_clip_int32_mmx     (int32_t *dst, const int32_t *src,
1334                                    int32_t min, int32_t max, unsigned int len);
1335 void ff_vector_clip_int32_sse2    (int32_t *dst, const int32_t *src,
1336                                    int32_t min, int32_t max, unsigned int len);
1337 void ff_vector_clip_int32_int_sse2(int32_t *dst, const int32_t *src,
1338                                    int32_t min, int32_t max, unsigned int len);
1339 void ff_vector_clip_int32_sse4    (int32_t *dst, const int32_t *src,
1340                                    int32_t min, int32_t max, unsigned int len);
1341
1342 #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX)                          \
1343     do {                                                                     \
1344     c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \
1345     c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \
1346     c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \
1347     c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \
1348     c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \
1349     c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \
1350     c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \
1351     c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \
1352     c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \
1353     c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \
1354     c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \
1355     c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \
1356     c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \
1357     c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \
1358     c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \
1359     c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU; \
1360     } while (0)
1361
1362 static av_cold void dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx,
1363                                      int mm_flags)
1364 {
1365 #if HAVE_INLINE_ASM
1366     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1367
1368     c->put_pixels_clamped        = ff_put_pixels_clamped_mmx;
1369     c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx;
1370     c->add_pixels_clamped        = ff_add_pixels_clamped_mmx;
1371
1372     if (!high_bit_depth) {
1373         c->clear_block  = clear_block_mmx;
1374         c->clear_blocks = clear_blocks_mmx;
1375         c->draw_edges   = draw_edges_mmx;
1376     }
1377
1378 #if CONFIG_VIDEODSP && (ARCH_X86_32 || !HAVE_YASM)
1379     c->gmc = gmc_mmx;
1380 #endif
1381
1382     c->add_bytes = add_bytes_mmx;
1383 #endif /* HAVE_INLINE_ASM */
1384
1385 #if HAVE_YASM
1386     if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
1387         c->h263_v_loop_filter = ff_h263_v_loop_filter_mmx;
1388         c->h263_h_loop_filter = ff_h263_h_loop_filter_mmx;
1389     }
1390
1391     c->vector_clip_int32 = ff_vector_clip_int32_mmx;
1392 #endif /* HAVE_YASM */
1393 }
1394
1395 static av_cold void dsputil_init_mmxext(DSPContext *c, AVCodecContext *avctx,
1396                                         int mm_flags)
1397 {
1398 #if HAVE_MMXEXT_EXTERNAL
1399     SET_QPEL_FUNCS(avg_qpel,        0, 16, mmxext, );
1400     SET_QPEL_FUNCS(avg_qpel,        1,  8, mmxext, );
1401
1402     SET_QPEL_FUNCS(put_qpel,        0, 16, mmxext, );
1403     SET_QPEL_FUNCS(put_qpel,        1,  8, mmxext, );
1404     SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmxext, );
1405     SET_QPEL_FUNCS(put_no_rnd_qpel, 1,  8, mmxext, );
1406
1407     /* slower than cmov version on AMD */
1408     if (!(mm_flags & AV_CPU_FLAG_3DNOW))
1409         c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmxext;
1410
1411     c->scalarproduct_int16          = ff_scalarproduct_int16_mmxext;
1412     c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmxext;
1413
1414     if (avctx->flags & CODEC_FLAG_BITEXACT) {
1415         c->apply_window_int16 = ff_apply_window_int16_mmxext;
1416     } else {
1417         c->apply_window_int16 = ff_apply_window_int16_round_mmxext;
1418     }
1419 #endif /* HAVE_MMXEXT_EXTERNAL */
1420 }
1421
1422 static av_cold void dsputil_init_sse(DSPContext *c, AVCodecContext *avctx,
1423                                      int mm_flags)
1424 {
1425 #if HAVE_INLINE_ASM
1426     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1427
1428     if (!high_bit_depth) {
1429         if (!(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)) {
1430             /* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
1431             c->clear_block  = clear_block_sse;
1432             c->clear_blocks = clear_blocks_sse;
1433         }
1434     }
1435
1436     c->vector_clipf = vector_clipf_sse;
1437 #endif /* HAVE_INLINE_ASM */
1438
1439 #if HAVE_YASM
1440 #if HAVE_INLINE_ASM && CONFIG_VIDEODSP
1441     c->gmc = gmc_sse;
1442 #endif
1443 #endif /* HAVE_YASM */
1444 }
1445
1446 static av_cold void dsputil_init_sse2(DSPContext *c, AVCodecContext *avctx,
1447                                       int mm_flags)
1448 {
1449 #if HAVE_SSE2_INLINE
1450     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1451
1452     if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX) {
1453         c->idct_put              = ff_idct_xvid_sse2_put;
1454         c->idct_add              = ff_idct_xvid_sse2_add;
1455         c->idct                  = ff_idct_xvid_sse2;
1456         c->idct_permutation_type = FF_SSE2_IDCT_PERM;
1457     }
1458 #endif /* HAVE_SSE2_INLINE */
1459
1460 #if HAVE_SSE2_EXTERNAL
1461     c->scalarproduct_int16          = ff_scalarproduct_int16_sse2;
1462     c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
1463     if (mm_flags & AV_CPU_FLAG_ATOM) {
1464         c->vector_clip_int32 = ff_vector_clip_int32_int_sse2;
1465     } else {
1466         c->vector_clip_int32 = ff_vector_clip_int32_sse2;
1467     }
1468     if (avctx->flags & CODEC_FLAG_BITEXACT) {
1469         c->apply_window_int16 = ff_apply_window_int16_sse2;
1470     } else if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
1471         c->apply_window_int16 = ff_apply_window_int16_round_sse2;
1472     }
1473     c->bswap_buf = ff_bswap32_buf_sse2;
1474 #endif /* HAVE_SSE2_EXTERNAL */
1475 }
1476
1477 static av_cold void dsputil_init_ssse3(DSPContext *c, AVCodecContext *avctx,
1478                                        int mm_flags)
1479 {
1480 #if HAVE_SSSE3_EXTERNAL
1481     c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
1482     if (mm_flags & AV_CPU_FLAG_SSE4) // not really sse4, just slow on Conroe
1483         c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
1484
1485     if (mm_flags & AV_CPU_FLAG_ATOM)
1486         c->apply_window_int16 = ff_apply_window_int16_ssse3_atom;
1487     else
1488         c->apply_window_int16 = ff_apply_window_int16_ssse3;
1489     if (!(mm_flags & (AV_CPU_FLAG_SSE42|AV_CPU_FLAG_3DNOW))) // cachesplit
1490         c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
1491     c->bswap_buf = ff_bswap32_buf_ssse3;
1492 #endif /* HAVE_SSSE3_EXTERNAL */
1493 }
1494
1495 static av_cold void dsputil_init_sse4(DSPContext *c, AVCodecContext *avctx,
1496                                       int mm_flags)
1497 {
1498 #if HAVE_SSE4_EXTERNAL
1499     c->vector_clip_int32 = ff_vector_clip_int32_sse4;
1500 #endif /* HAVE_SSE4_EXTERNAL */
1501 }
1502
1503 av_cold void ff_dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx)
1504 {
1505     int mm_flags = av_get_cpu_flags();
1506
1507 #if HAVE_7REGS && HAVE_INLINE_ASM
1508     if (mm_flags & AV_CPU_FLAG_CMOV)
1509         c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
1510 #endif
1511
1512     if (mm_flags & AV_CPU_FLAG_MMX) {
1513 #if HAVE_INLINE_ASM
1514         const int idct_algo = avctx->idct_algo;
1515
1516         if (avctx->lowres == 0 && avctx->bits_per_raw_sample <= 8) {
1517             if (idct_algo == FF_IDCT_AUTO || idct_algo == FF_IDCT_SIMPLEMMX) {
1518                 c->idct_put              = ff_simple_idct_put_mmx;
1519                 c->idct_add              = ff_simple_idct_add_mmx;
1520                 c->idct                  = ff_simple_idct_mmx;
1521                 c->idct_permutation_type = FF_SIMPLE_IDCT_PERM;
1522             } else if (idct_algo == FF_IDCT_XVIDMMX) {
1523                 if (mm_flags & AV_CPU_FLAG_SSE2) {
1524                     c->idct_put              = ff_idct_xvid_sse2_put;
1525                     c->idct_add              = ff_idct_xvid_sse2_add;
1526                     c->idct                  = ff_idct_xvid_sse2;
1527                     c->idct_permutation_type = FF_SSE2_IDCT_PERM;
1528                 } else if (mm_flags & AV_CPU_FLAG_MMXEXT) {
1529                     c->idct_put              = ff_idct_xvid_mmxext_put;
1530                     c->idct_add              = ff_idct_xvid_mmxext_add;
1531                     c->idct                  = ff_idct_xvid_mmxext;
1532                 } else {
1533                     c->idct_put              = ff_idct_xvid_mmx_put;
1534                     c->idct_add              = ff_idct_xvid_mmx_add;
1535                     c->idct                  = ff_idct_xvid_mmx;
1536                 }
1537             }
1538         }
1539 #endif /* HAVE_INLINE_ASM */
1540
1541         dsputil_init_mmx(c, avctx, mm_flags);
1542     }
1543
1544     if (mm_flags & AV_CPU_FLAG_MMXEXT)
1545         dsputil_init_mmxext(c, avctx, mm_flags);
1546
1547     if (mm_flags & AV_CPU_FLAG_SSE)
1548         dsputil_init_sse(c, avctx, mm_flags);
1549
1550     if (mm_flags & AV_CPU_FLAG_SSE2)
1551         dsputil_init_sse2(c, avctx, mm_flags);
1552
1553     if (mm_flags & AV_CPU_FLAG_SSSE3)
1554         dsputil_init_ssse3(c, avctx, mm_flags);
1555
1556     if (mm_flags & AV_CPU_FLAG_SSE4)
1557         dsputil_init_sse4(c, avctx, mm_flags);
1558
1559     if (CONFIG_ENCODERS)
1560         ff_dsputilenc_init_mmx(c, avctx);
1561 }