Merge commit '10f1a4d9bd8239ac64f364e0d3b6423c28230d6d'
[ffmpeg.git] / libavcodec / x86 / dsputil_mmx.c
1 /*
2  * MMX optimized DSP utils
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  *
22  * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
23  */
24
25 #include "libavutil/attributes.h"
26 #include "libavutil/cpu.h"
27 #include "libavutil/x86/asm.h"
28 #include "libavcodec/dsputil.h"
29 #include "libavcodec/h264dsp.h"
30 #include "libavcodec/mpegvideo.h"
31 #include "libavcodec/simple_idct.h"
32 #include "libavcodec/videodsp.h"
33 #include "dsputil_mmx.h"
34 #include "idct_xvid.h"
35 #include "diracdsp_mmx.h"
36
37 //#undef NDEBUG
38 //#include <assert.h>
39
40 /* pixel operations */
41 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_15)   =   0x000F000F000F000FULL;
42 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_17)   = { 0x0011001100110011ULL, 0x0011001100110011ULL };
43 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_20)   =   0x0014001400140014ULL;
44 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_42)   =   0x002A002A002A002AULL;
45 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_53)   =   0x0035003500350035ULL;
46 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_96)   =   0x0060006000600060ULL;
47 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_128)  =   0x0080008000800080ULL;
48 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_255)  =   0x00ff00ff00ff00ffULL;
49 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_512)  = { 0x0200020002000200ULL, 0x0200020002000200ULL };
50 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_1019) = { 0x03FB03FB03FB03FBULL, 0x03FB03FB03FB03FBULL };
51
52 DECLARE_ALIGNED(8,  const uint64_t, ff_pb_3F)   =   0x3F3F3F3F3F3F3F3FULL;
53 DECLARE_ALIGNED(8,  const uint64_t, ff_pb_FC)   =   0xFCFCFCFCFCFCFCFCULL;
54
55 DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 };
56 DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 };
57
58
59 #if HAVE_YASM
60 void ff_put_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
61                               int dstStride, int src1Stride, int h);
62 void ff_put_no_rnd_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1,
63                                      uint8_t *src2, int dstStride,
64                                      int src1Stride, int h);
65 void ff_avg_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
66                               int dstStride, int src1Stride, int h);
67 void ff_put_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
68                                int dstStride, int src1Stride, int h);
69 void ff_avg_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
70                                int dstStride, int src1Stride, int h);
71 void ff_put_no_rnd_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
72                                       int dstStride, int src1Stride, int h);
73
74 static void ff_put_pixels16_mmxext(uint8_t *block, const uint8_t *pixels,
75                                    ptrdiff_t line_size, int h)
76 {
77     ff_put_pixels8_mmxext(block,     pixels,     line_size, h);
78     ff_put_pixels8_mmxext(block + 8, pixels + 8, line_size, h);
79 }
80
81 void ff_put_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
82                                          int dstStride, int srcStride, int h);
83 void ff_avg_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
84                                          int dstStride, int srcStride, int h);
85 void ff_put_no_rnd_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
86                                                  int dstStride, int srcStride,
87                                                  int h);
88 void ff_put_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
89                                         int dstStride, int srcStride, int h);
90 void ff_avg_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
91                                         int dstStride, int srcStride, int h);
92 void ff_put_no_rnd_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
93                                                 int dstStride, int srcStride,
94                                                 int h);
95 void ff_put_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
96                                          int dstStride, int srcStride);
97 void ff_avg_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
98                                          int dstStride, int srcStride);
99 void ff_put_no_rnd_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
100                                                  int dstStride, int srcStride);
101 void ff_put_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
102                                         int dstStride, int srcStride);
103 void ff_avg_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
104                                         int dstStride, int srcStride);
105 void ff_put_no_rnd_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
106                                                 int dstStride, int srcStride);
107 #define ff_put_no_rnd_pixels16_mmxext ff_put_pixels16_mmxext
108 #define ff_put_no_rnd_pixels8_mmxext ff_put_pixels8_mmxext
109 #endif /* HAVE_YASM */
110
111
112 #if HAVE_INLINE_ASM
113
114 #define JUMPALIGN()     __asm__ volatile (".p2align 3"::)
115 #define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%"#regd", %%"#regd ::)
116
117 #define MOVQ_BFE(regd)                                  \
118     __asm__ volatile (                                  \
119         "pcmpeqd %%"#regd", %%"#regd"   \n\t"           \
120         "paddb   %%"#regd", %%"#regd"   \n\t" ::)
121
122 #ifndef PIC
123 #define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%"#regd" \n\t" :: "m"(ff_bone))
124 #define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%"#regd" \n\t" :: "m"(ff_wtwo))
125 #else
126 // for shared library it's better to use this way for accessing constants
127 // pcmpeqd -> -1
128 #define MOVQ_BONE(regd)                                 \
129     __asm__ volatile (                                  \
130         "pcmpeqd  %%"#regd", %%"#regd"  \n\t"           \
131         "psrlw          $15, %%"#regd"  \n\t"           \
132         "packuswb %%"#regd", %%"#regd"  \n\t" ::)
133
134 #define MOVQ_WTWO(regd)                                 \
135     __asm__ volatile (                                  \
136         "pcmpeqd %%"#regd", %%"#regd"   \n\t"           \
137         "psrlw         $15, %%"#regd"   \n\t"           \
138         "psllw          $1, %%"#regd"   \n\t"::)
139
140 #endif
141
142 // using regr as temporary and for the output result
143 // first argument is unmodifed and second is trashed
144 // regfe is supposed to contain 0xfefefefefefefefe
145 #define PAVGB_MMX(rega, regb, regr, regfe)                       \
146     "movq   "#rega", "#regr"            \n\t"                    \
147     "por    "#regb", "#regr"            \n\t"                    \
148     "pxor   "#rega", "#regb"            \n\t"                    \
149     "pand  "#regfe", "#regb"            \n\t"                    \
150     "psrlq       $1, "#regb"            \n\t"                    \
151     "psubb  "#regb", "#regr"            \n\t"
152
153 // mm6 is supposed to contain 0xfefefefefefefefe
154 #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp)           \
155     "movq  "#rega", "#regr"             \n\t"                    \
156     "movq  "#regc", "#regp"             \n\t"                    \
157     "por   "#regb", "#regr"             \n\t"                    \
158     "por   "#regd", "#regp"             \n\t"                    \
159     "pxor  "#rega", "#regb"             \n\t"                    \
160     "pxor  "#regc", "#regd"             \n\t"                    \
161     "pand    %%mm6, "#regb"             \n\t"                    \
162     "pand    %%mm6, "#regd"             \n\t"                    \
163     "psrlq      $1, "#regd"             \n\t"                    \
164     "psrlq      $1, "#regb"             \n\t"                    \
165     "psubb "#regb", "#regr"             \n\t"                    \
166     "psubb "#regd", "#regp"             \n\t"
167
168 /***********************************/
169 /* MMX rounding */
170
171 #define DEF(x, y) x ## _ ## y ## _mmx
172 #define SET_RND  MOVQ_WTWO
173 #define PAVGBP(a, b, c, d, e, f)        PAVGBP_MMX(a, b, c, d, e, f)
174 #define PAVGB(a, b, c, e)               PAVGB_MMX(a, b, c, e)
175 #define OP_AVG(a, b, c, e)              PAVGB_MMX(a, b, c, e)
176
177 #include "dsputil_rnd_template.c"
178
179 #undef DEF
180 #undef SET_RND
181 #undef PAVGBP
182 #undef PAVGB
183 #undef OP_AVG
184
185 #endif /* HAVE_INLINE_ASM */
186
187
188 #if HAVE_YASM
189
190 /***********************************/
191 /* MMXEXT specific */
192
193 //FIXME the following could be optimized too ...
194 static void ff_avg_pixels16_mmxext(uint8_t *block, const uint8_t *pixels,
195                                    int line_size, int h)
196 {
197     ff_avg_pixels8_mmxext(block,     pixels,     line_size, h);
198     ff_avg_pixels8_mmxext(block + 8, pixels + 8, line_size, h);
199 }
200
201 #endif /* HAVE_YASM */
202
203
204 #if HAVE_INLINE_ASM
205 /***********************************/
206 /* standard MMX */
207
208 void ff_put_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
209                                int line_size)
210 {
211     const int16_t *p;
212     uint8_t *pix;
213
214     /* read the pixels */
215     p   = block;
216     pix = pixels;
217     /* unrolled loop */
218     __asm__ volatile (
219         "movq      (%3), %%mm0          \n\t"
220         "movq     8(%3), %%mm1          \n\t"
221         "movq    16(%3), %%mm2          \n\t"
222         "movq    24(%3), %%mm3          \n\t"
223         "movq    32(%3), %%mm4          \n\t"
224         "movq    40(%3), %%mm5          \n\t"
225         "movq    48(%3), %%mm6          \n\t"
226         "movq    56(%3), %%mm7          \n\t"
227         "packuswb %%mm1, %%mm0          \n\t"
228         "packuswb %%mm3, %%mm2          \n\t"
229         "packuswb %%mm5, %%mm4          \n\t"
230         "packuswb %%mm7, %%mm6          \n\t"
231         "movq     %%mm0, (%0)           \n\t"
232         "movq     %%mm2, (%0, %1)       \n\t"
233         "movq     %%mm4, (%0, %1, 2)    \n\t"
234         "movq     %%mm6, (%0, %2)       \n\t"
235         :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3),
236            "r"(p)
237         : "memory");
238     pix += line_size * 4;
239     p   += 32;
240
241     // if here would be an exact copy of the code above
242     // compiler would generate some very strange code
243     // thus using "r"
244     __asm__ volatile (
245         "movq       (%3), %%mm0         \n\t"
246         "movq      8(%3), %%mm1         \n\t"
247         "movq     16(%3), %%mm2         \n\t"
248         "movq     24(%3), %%mm3         \n\t"
249         "movq     32(%3), %%mm4         \n\t"
250         "movq     40(%3), %%mm5         \n\t"
251         "movq     48(%3), %%mm6         \n\t"
252         "movq     56(%3), %%mm7         \n\t"
253         "packuswb  %%mm1, %%mm0         \n\t"
254         "packuswb  %%mm3, %%mm2         \n\t"
255         "packuswb  %%mm5, %%mm4         \n\t"
256         "packuswb  %%mm7, %%mm6         \n\t"
257         "movq      %%mm0, (%0)          \n\t"
258         "movq      %%mm2, (%0, %1)      \n\t"
259         "movq      %%mm4, (%0, %1, 2)   \n\t"
260         "movq      %%mm6, (%0, %2)      \n\t"
261         :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3), "r"(p)
262         : "memory");
263 }
264
265 #define put_signed_pixels_clamped_mmx_half(off)             \
266     "movq          "#off"(%2), %%mm1        \n\t"           \
267     "movq     16 + "#off"(%2), %%mm2        \n\t"           \
268     "movq     32 + "#off"(%2), %%mm3        \n\t"           \
269     "movq     48 + "#off"(%2), %%mm4        \n\t"           \
270     "packsswb  8 + "#off"(%2), %%mm1        \n\t"           \
271     "packsswb 24 + "#off"(%2), %%mm2        \n\t"           \
272     "packsswb 40 + "#off"(%2), %%mm3        \n\t"           \
273     "packsswb 56 + "#off"(%2), %%mm4        \n\t"           \
274     "paddb              %%mm0, %%mm1        \n\t"           \
275     "paddb              %%mm0, %%mm2        \n\t"           \
276     "paddb              %%mm0, %%mm3        \n\t"           \
277     "paddb              %%mm0, %%mm4        \n\t"           \
278     "movq               %%mm1, (%0)         \n\t"           \
279     "movq               %%mm2, (%0, %3)     \n\t"           \
280     "movq               %%mm3, (%0, %3, 2)  \n\t"           \
281     "movq               %%mm4, (%0, %1)     \n\t"
282
283 void ff_put_signed_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
284                                       int line_size)
285 {
286     x86_reg line_skip = line_size;
287     x86_reg line_skip3;
288
289     __asm__ volatile (
290         "movq "MANGLE(ff_pb_80)", %%mm0     \n\t"
291         "lea         (%3, %3, 2), %1        \n\t"
292         put_signed_pixels_clamped_mmx_half(0)
293         "lea         (%0, %3, 4), %0        \n\t"
294         put_signed_pixels_clamped_mmx_half(64)
295         : "+&r"(pixels), "=&r"(line_skip3)
296         : "r"(block), "r"(line_skip)
297         : "memory");
298 }
299
300 void ff_add_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
301                                int line_size)
302 {
303     const int16_t *p;
304     uint8_t *pix;
305     int i;
306
307     /* read the pixels */
308     p   = block;
309     pix = pixels;
310     MOVQ_ZERO(mm7);
311     i = 4;
312     do {
313         __asm__ volatile (
314             "movq        (%2), %%mm0    \n\t"
315             "movq       8(%2), %%mm1    \n\t"
316             "movq      16(%2), %%mm2    \n\t"
317             "movq      24(%2), %%mm3    \n\t"
318             "movq          %0, %%mm4    \n\t"
319             "movq          %1, %%mm6    \n\t"
320             "movq       %%mm4, %%mm5    \n\t"
321             "punpcklbw  %%mm7, %%mm4    \n\t"
322             "punpckhbw  %%mm7, %%mm5    \n\t"
323             "paddsw     %%mm4, %%mm0    \n\t"
324             "paddsw     %%mm5, %%mm1    \n\t"
325             "movq       %%mm6, %%mm5    \n\t"
326             "punpcklbw  %%mm7, %%mm6    \n\t"
327             "punpckhbw  %%mm7, %%mm5    \n\t"
328             "paddsw     %%mm6, %%mm2    \n\t"
329             "paddsw     %%mm5, %%mm3    \n\t"
330             "packuswb   %%mm1, %%mm0    \n\t"
331             "packuswb   %%mm3, %%mm2    \n\t"
332             "movq       %%mm0, %0       \n\t"
333             "movq       %%mm2, %1       \n\t"
334             : "+m"(*pix), "+m"(*(pix + line_size))
335             : "r"(p)
336             : "memory");
337         pix += line_size * 2;
338         p   += 16;
339     } while (--i);
340 }
341
342 static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels,
343                             ptrdiff_t line_size, int h)
344 {
345     __asm__ volatile (
346         "lea   (%3, %3), %%"REG_a"      \n\t"
347         ".p2align     3                 \n\t"
348         "1:                             \n\t"
349         "movq  (%1    ), %%mm0          \n\t"
350         "movq  (%1, %3), %%mm1          \n\t"
351         "movq     %%mm0, (%2)           \n\t"
352         "movq     %%mm1, (%2, %3)       \n\t"
353         "add  %%"REG_a", %1             \n\t"
354         "add  %%"REG_a", %2             \n\t"
355         "movq  (%1    ), %%mm0          \n\t"
356         "movq  (%1, %3), %%mm1          \n\t"
357         "movq     %%mm0, (%2)           \n\t"
358         "movq     %%mm1, (%2, %3)       \n\t"
359         "add  %%"REG_a", %1             \n\t"
360         "add  %%"REG_a", %2             \n\t"
361         "subl        $4, %0             \n\t"
362         "jnz         1b                 \n\t"
363         : "+g"(h), "+r"(pixels),  "+r"(block)
364         : "r"((x86_reg)line_size)
365         : "%"REG_a, "memory"
366         );
367 }
368
369 static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels,
370                              ptrdiff_t line_size, int h)
371 {
372     __asm__ volatile (
373         "lea   (%3, %3), %%"REG_a"      \n\t"
374         ".p2align     3                 \n\t"
375         "1:                             \n\t"
376         "movq  (%1    ), %%mm0          \n\t"
377         "movq 8(%1    ), %%mm4          \n\t"
378         "movq  (%1, %3), %%mm1          \n\t"
379         "movq 8(%1, %3), %%mm5          \n\t"
380         "movq     %%mm0,  (%2)          \n\t"
381         "movq     %%mm4, 8(%2)          \n\t"
382         "movq     %%mm1,  (%2, %3)      \n\t"
383         "movq     %%mm5, 8(%2, %3)      \n\t"
384         "add  %%"REG_a", %1             \n\t"
385         "add  %%"REG_a", %2             \n\t"
386         "movq  (%1    ), %%mm0          \n\t"
387         "movq 8(%1    ), %%mm4          \n\t"
388         "movq  (%1, %3), %%mm1          \n\t"
389         "movq 8(%1, %3), %%mm5          \n\t"
390         "movq     %%mm0,  (%2)          \n\t"
391         "movq     %%mm4, 8(%2)          \n\t"
392         "movq     %%mm1,  (%2, %3)      \n\t"
393         "movq     %%mm5, 8(%2, %3)      \n\t"
394         "add  %%"REG_a", %1             \n\t"
395         "add  %%"REG_a", %2             \n\t"
396         "subl        $4, %0             \n\t"
397         "jnz         1b                 \n\t"
398         : "+g"(h), "+r"(pixels),  "+r"(block)
399         : "r"((x86_reg)line_size)
400         : "%"REG_a, "memory"
401         );
402 }
403
404 #define CLEAR_BLOCKS(name, n)                           \
405 static void name(int16_t *blocks)                       \
406 {                                                       \
407     __asm__ volatile (                                  \
408         "pxor %%mm7, %%mm7              \n\t"           \
409         "mov     %1,        %%"REG_a"   \n\t"           \
410         "1:                             \n\t"           \
411         "movq %%mm7,   (%0, %%"REG_a")  \n\t"           \
412         "movq %%mm7,  8(%0, %%"REG_a")  \n\t"           \
413         "movq %%mm7, 16(%0, %%"REG_a")  \n\t"           \
414         "movq %%mm7, 24(%0, %%"REG_a")  \n\t"           \
415         "add    $32, %%"REG_a"          \n\t"           \
416         "js      1b                     \n\t"           \
417         :: "r"(((uint8_t *)blocks) + 128 * n),          \
418            "i"(-128 * n)                                \
419         : "%"REG_a                                      \
420         );                                              \
421 }
422 CLEAR_BLOCKS(clear_blocks_mmx, 6)
423 CLEAR_BLOCKS(clear_block_mmx, 1)
424
425 static void clear_block_sse(int16_t *block)
426 {
427     __asm__ volatile (
428         "xorps  %%xmm0, %%xmm0          \n"
429         "movaps %%xmm0,    (%0)         \n"
430         "movaps %%xmm0,  16(%0)         \n"
431         "movaps %%xmm0,  32(%0)         \n"
432         "movaps %%xmm0,  48(%0)         \n"
433         "movaps %%xmm0,  64(%0)         \n"
434         "movaps %%xmm0,  80(%0)         \n"
435         "movaps %%xmm0,  96(%0)         \n"
436         "movaps %%xmm0, 112(%0)         \n"
437         :: "r"(block)
438         : "memory"
439     );
440 }
441
442 static void clear_blocks_sse(int16_t *blocks)
443 {
444     __asm__ volatile (
445         "xorps  %%xmm0, %%xmm0              \n"
446         "mov        %1,         %%"REG_a"   \n"
447         "1:                                 \n"
448         "movaps %%xmm0,    (%0, %%"REG_a")  \n"
449         "movaps %%xmm0,  16(%0, %%"REG_a")  \n"
450         "movaps %%xmm0,  32(%0, %%"REG_a")  \n"
451         "movaps %%xmm0,  48(%0, %%"REG_a")  \n"
452         "movaps %%xmm0,  64(%0, %%"REG_a")  \n"
453         "movaps %%xmm0,  80(%0, %%"REG_a")  \n"
454         "movaps %%xmm0,  96(%0, %%"REG_a")  \n"
455         "movaps %%xmm0, 112(%0, %%"REG_a")  \n"
456         "add      $128,         %%"REG_a"   \n"
457         "js         1b                      \n"
458         :: "r"(((uint8_t *)blocks) + 128 * 6),
459            "i"(-128 * 6)
460         : "%"REG_a
461     );
462 }
463
464 static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w)
465 {
466     x86_reg i = 0;
467     __asm__ volatile (
468         "jmp          2f                \n\t"
469         "1:                             \n\t"
470         "movq   (%1, %0), %%mm0         \n\t"
471         "movq   (%2, %0), %%mm1         \n\t"
472         "paddb     %%mm0, %%mm1         \n\t"
473         "movq      %%mm1, (%2, %0)      \n\t"
474         "movq  8(%1, %0), %%mm0         \n\t"
475         "movq  8(%2, %0), %%mm1         \n\t"
476         "paddb     %%mm0, %%mm1         \n\t"
477         "movq      %%mm1, 8(%2, %0)     \n\t"
478         "add         $16, %0            \n\t"
479         "2:                             \n\t"
480         "cmp          %3, %0            \n\t"
481         "js           1b                \n\t"
482         : "+r"(i)
483         : "r"(src), "r"(dst), "r"((x86_reg)w - 15)
484     );
485     for ( ; i < w; i++)
486         dst[i + 0] += src[i + 0];
487 }
488
489 #if HAVE_7REGS
490 static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top,
491                                             const uint8_t *diff, int w,
492                                             int *left, int *left_top)
493 {
494     x86_reg w2 = -w;
495     x86_reg x;
496     int l  = *left     & 0xff;
497     int tl = *left_top & 0xff;
498     int t;
499     __asm__ volatile (
500         "mov          %7, %3            \n"
501         "1:                             \n"
502         "movzbl (%3, %4), %2            \n"
503         "mov          %2, %k3           \n"
504         "sub         %b1, %b3           \n"
505         "add         %b0, %b3           \n"
506         "mov          %2, %1            \n"
507         "cmp          %0, %2            \n"
508         "cmovg        %0, %2            \n"
509         "cmovg        %1, %0            \n"
510         "cmp         %k3, %0            \n"
511         "cmovg       %k3, %0            \n"
512         "mov          %7, %3            \n"
513         "cmp          %2, %0            \n"
514         "cmovl        %2, %0            \n"
515         "add    (%6, %4), %b0           \n"
516         "mov         %b0, (%5, %4)      \n"
517         "inc          %4                \n"
518         "jl           1b                \n"
519         : "+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
520         : "r"(dst + w), "r"(diff + w), "rm"(top + w)
521     );
522     *left     = l;
523     *left_top = tl;
524 }
525 #endif
526
527 /* Draw the edges of width 'w' of an image of size width, height
528  * this MMX version can only handle w == 8 || w == 16. */
529 static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height,
530                            int w, int h, int sides)
531 {
532     uint8_t *ptr, *last_line;
533     int i;
534
535     last_line = buf + (height - 1) * wrap;
536     /* left and right */
537     ptr = buf;
538     if (w == 8) {
539         __asm__ volatile (
540             "1:                             \n\t"
541             "movd            (%0), %%mm0    \n\t"
542             "punpcklbw      %%mm0, %%mm0    \n\t"
543             "punpcklwd      %%mm0, %%mm0    \n\t"
544             "punpckldq      %%mm0, %%mm0    \n\t"
545             "movq           %%mm0, -8(%0)   \n\t"
546             "movq      -8(%0, %2), %%mm1    \n\t"
547             "punpckhbw      %%mm1, %%mm1    \n\t"
548             "punpckhwd      %%mm1, %%mm1    \n\t"
549             "punpckhdq      %%mm1, %%mm1    \n\t"
550             "movq           %%mm1, (%0, %2) \n\t"
551             "add               %1, %0       \n\t"
552             "cmp               %3, %0       \n\t"
553             "jb                1b           \n\t"
554             : "+r"(ptr)
555             : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
556             );
557     } else if(w==16){
558         __asm__ volatile (
559             "1:                                 \n\t"
560             "movd            (%0), %%mm0        \n\t"
561             "punpcklbw      %%mm0, %%mm0        \n\t"
562             "punpcklwd      %%mm0, %%mm0        \n\t"
563             "punpckldq      %%mm0, %%mm0        \n\t"
564             "movq           %%mm0, -8(%0)       \n\t"
565             "movq           %%mm0, -16(%0)      \n\t"
566             "movq      -8(%0, %2), %%mm1        \n\t"
567             "punpckhbw      %%mm1, %%mm1        \n\t"
568             "punpckhwd      %%mm1, %%mm1        \n\t"
569             "punpckhdq      %%mm1, %%mm1        \n\t"
570             "movq           %%mm1,  (%0, %2)    \n\t"
571             "movq           %%mm1, 8(%0, %2)    \n\t"
572             "add               %1, %0           \n\t"
573             "cmp               %3, %0           \n\t"
574             "jb                1b               \n\t"
575             : "+r"(ptr)
576             : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
577             );
578     } else {
579         av_assert1(w == 4);
580         __asm__ volatile (
581             "1:                             \n\t"
582             "movd            (%0), %%mm0    \n\t"
583             "punpcklbw      %%mm0, %%mm0    \n\t"
584             "punpcklwd      %%mm0, %%mm0    \n\t"
585             "movd           %%mm0, -4(%0)   \n\t"
586             "movd      -4(%0, %2), %%mm1    \n\t"
587             "punpcklbw      %%mm1, %%mm1    \n\t"
588             "punpckhwd      %%mm1, %%mm1    \n\t"
589             "punpckhdq      %%mm1, %%mm1    \n\t"
590             "movd           %%mm1, (%0, %2) \n\t"
591             "add               %1, %0       \n\t"
592             "cmp               %3, %0       \n\t"
593             "jb                1b           \n\t"
594             : "+r"(ptr)
595             : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
596             );
597     }
598
599     /* top and bottom (and hopefully also the corners) */
600     if (sides & EDGE_TOP) {
601         for (i = 0; i < h; i += 4) {
602             ptr = buf - (i + 1) * wrap - w;
603             __asm__ volatile (
604                 "1:                             \n\t"
605                 "movq (%1, %0), %%mm0           \n\t"
606                 "movq    %%mm0, (%0)            \n\t"
607                 "movq    %%mm0, (%0, %2)        \n\t"
608                 "movq    %%mm0, (%0, %2, 2)     \n\t"
609                 "movq    %%mm0, (%0, %3)        \n\t"
610                 "add        $8, %0              \n\t"
611                 "cmp        %4, %0              \n\t"
612                 "jb         1b                  \n\t"
613                 : "+r"(ptr)
614                 : "r"((x86_reg)buf - (x86_reg)ptr - w), "r"((x86_reg) -wrap),
615                   "r"((x86_reg) -wrap * 3), "r"(ptr + width + 2 * w)
616                 );
617         }
618     }
619
620     if (sides & EDGE_BOTTOM) {
621         for (i = 0; i < h; i += 4) {
622             ptr = last_line + (i + 1) * wrap - w;
623             __asm__ volatile (
624                 "1:                             \n\t"
625                 "movq (%1, %0), %%mm0           \n\t"
626                 "movq    %%mm0, (%0)            \n\t"
627                 "movq    %%mm0, (%0, %2)        \n\t"
628                 "movq    %%mm0, (%0, %2, 2)     \n\t"
629                 "movq    %%mm0, (%0, %3)        \n\t"
630                 "add        $8, %0              \n\t"
631                 "cmp        %4, %0              \n\t"
632                 "jb         1b                  \n\t"
633                 : "+r"(ptr)
634                 : "r"((x86_reg)last_line - (x86_reg)ptr - w),
635                   "r"((x86_reg)wrap), "r"((x86_reg)wrap * 3),
636                   "r"(ptr + width + 2 * w)
637                 );
638         }
639     }
640 }
641 #endif /* HAVE_INLINE_ASM */
642
643
644 #if HAVE_YASM
645 #define QPEL_OP(OPNAME, ROUNDER, RND, MMX)                              \
646 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src,   \
647                                           ptrdiff_t stride)             \
648 {                                                                       \
649     ff_ ## OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);              \
650 }                                                                       \
651                                                                         \
652 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src,    \
653                                          ptrdiff_t stride)              \
654 {                                                                       \
655     uint64_t temp[8];                                                   \
656     uint8_t * const half = (uint8_t*)temp;                              \
657     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8,        \
658                                                    stride, 8);          \
659     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src, half,                 \
660                                         stride, stride, 8);             \
661 }                                                                       \
662                                                                         \
663 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src,    \
664                                          ptrdiff_t stride)              \
665 {                                                                       \
666     ff_ ## OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride,    \
667                                                    stride, 8);          \
668 }                                                                       \
669                                                                         \
670 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src,    \
671                                          ptrdiff_t stride)              \
672 {                                                                       \
673     uint64_t temp[8];                                                   \
674     uint8_t * const half = (uint8_t*)temp;                              \
675     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8,        \
676                                                    stride, 8);          \
677     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src + 1, half, stride,     \
678                                         stride, 8);                     \
679 }                                                                       \
680                                                                         \
681 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src,    \
682                                          ptrdiff_t stride)              \
683 {                                                                       \
684     uint64_t temp[8];                                                   \
685     uint8_t * const half = (uint8_t*)temp;                              \
686     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src,           \
687                                                    8, stride);          \
688     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src, half,                 \
689                                         stride, stride, 8);             \
690 }                                                                       \
691                                                                         \
692 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src,    \
693                                          ptrdiff_t stride)              \
694 {                                                                       \
695     ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src,            \
696                                                    stride, stride);     \
697 }                                                                       \
698                                                                         \
699 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src,    \
700                                          ptrdiff_t stride)              \
701 {                                                                       \
702     uint64_t temp[8];                                                   \
703     uint8_t * const half = (uint8_t*)temp;                              \
704     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src,           \
705                                                    8, stride);          \
706     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src + stride, half, stride,\
707                                         stride, 8);                     \
708 }                                                                       \
709                                                                         \
710 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src,    \
711                                          ptrdiff_t stride)              \
712 {                                                                       \
713     uint64_t half[8 + 9];                                               \
714     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
715     uint8_t * const halfHV = ((uint8_t*)half);                          \
716     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
717                                                    stride, 9);          \
718     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8,           \
719                                         stride, 9);                     \
720     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
721     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV,             \
722                                         stride, 8, 8);                  \
723 }                                                                       \
724                                                                         \
725 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src,    \
726                                          ptrdiff_t stride)              \
727 {                                                                       \
728     uint64_t half[8 + 9];                                               \
729     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
730     uint8_t * const halfHV = ((uint8_t*)half);                          \
731     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
732                                                    stride, 9);          \
733     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8,       \
734                                         stride, 9);                     \
735     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
736     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV,             \
737                                         stride, 8, 8);                  \
738 }                                                                       \
739                                                                         \
740 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src,    \
741                                          ptrdiff_t stride)              \
742 {                                                                       \
743     uint64_t half[8 + 9];                                               \
744     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
745     uint8_t * const halfHV = ((uint8_t*)half);                          \
746     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
747                                                    stride, 9);          \
748     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8,           \
749                                         stride, 9);                     \
750     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
751     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV,         \
752                                         stride, 8, 8);                  \
753 }                                                                       \
754                                                                         \
755 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src,    \
756                                          ptrdiff_t stride)              \
757 {                                                                       \
758     uint64_t half[8 + 9];                                               \
759     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
760     uint8_t * const halfHV = ((uint8_t*)half);                          \
761     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
762                                                    stride, 9);          \
763     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8,       \
764                                         stride, 9);                     \
765     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
766     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV,         \
767                                         stride, 8, 8);                  \
768 }                                                                       \
769                                                                         \
770 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src,    \
771                                          ptrdiff_t stride)              \
772 {                                                                       \
773     uint64_t half[8 + 9];                                               \
774     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
775     uint8_t * const halfHV = ((uint8_t*)half);                          \
776     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
777                                                    stride, 9);          \
778     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
779     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV,             \
780                                         stride, 8, 8);                  \
781 }                                                                       \
782                                                                         \
783 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src,    \
784                                          ptrdiff_t stride)              \
785 {                                                                       \
786     uint64_t half[8 + 9];                                               \
787     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
788     uint8_t * const halfHV = ((uint8_t*)half);                          \
789     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
790                                                    stride, 9);          \
791     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
792     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV,         \
793                                         stride, 8, 8);                  \
794 }                                                                       \
795                                                                         \
796 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src,    \
797                                          ptrdiff_t stride)              \
798 {                                                                       \
799     uint64_t half[8 + 9];                                               \
800     uint8_t * const halfH = ((uint8_t*)half);                           \
801     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
802                                                    stride, 9);          \
803     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH,              \
804                                         8, stride, 9);                  \
805     ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH,          \
806                                                    stride, 8);          \
807 }                                                                       \
808                                                                         \
809 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src,    \
810                                          ptrdiff_t stride)              \
811 {                                                                       \
812     uint64_t half[8 + 9];                                               \
813     uint8_t * const halfH = ((uint8_t*)half);                           \
814     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
815                                                    stride, 9);          \
816     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8,       \
817                                         stride, 9);                     \
818     ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH,          \
819                                                    stride, 8);          \
820 }                                                                       \
821                                                                         \
822 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src,    \
823                                          ptrdiff_t stride)              \
824 {                                                                       \
825     uint64_t half[9];                                                   \
826     uint8_t * const halfH = ((uint8_t*)half);                           \
827     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
828                                                    stride, 9);          \
829     ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH,          \
830                                                    stride, 8);          \
831 }                                                                       \
832                                                                         \
833 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src,  \
834                                            ptrdiff_t stride)            \
835 {                                                                       \
836     ff_ ## OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);            \
837 }                                                                       \
838                                                                         \
839 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src,   \
840                                           ptrdiff_t stride)             \
841 {                                                                       \
842     uint64_t temp[32];                                                  \
843     uint8_t * const half = (uint8_t*)temp;                              \
844     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16,      \
845                                                     stride, 16);        \
846     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride,        \
847                                          stride, 16);                   \
848 }                                                                       \
849                                                                         \
850 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src,   \
851                                           ptrdiff_t stride)             \
852 {                                                                       \
853     ff_ ## OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src,           \
854                                                     stride, stride, 16);\
855 }                                                                       \
856                                                                         \
857 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src,   \
858                                           ptrdiff_t stride)             \
859 {                                                                       \
860     uint64_t temp[32];                                                  \
861     uint8_t * const half = (uint8_t*)temp;                              \
862     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16,      \
863                                                     stride, 16);        \
864     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src + 1, half,            \
865                                          stride, stride, 16);           \
866 }                                                                       \
867                                                                         \
868 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src,   \
869                                           ptrdiff_t stride)             \
870 {                                                                       \
871     uint64_t temp[32];                                                  \
872     uint8_t * const half = (uint8_t*)temp;                              \
873     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16,      \
874                                                     stride);            \
875     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride,        \
876                                          stride, 16);                   \
877 }                                                                       \
878                                                                         \
879 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src,   \
880                                           ptrdiff_t stride)             \
881 {                                                                       \
882     ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src,           \
883                                                     stride, stride);    \
884 }                                                                       \
885                                                                         \
886 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src,   \
887                                           ptrdiff_t stride)             \
888 {                                                                       \
889     uint64_t temp[32];                                                  \
890     uint8_t * const half = (uint8_t*)temp;                              \
891     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16,      \
892                                                     stride);            \
893     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half,         \
894                                          stride, stride, 16);           \
895 }                                                                       \
896                                                                         \
897 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src,   \
898                                           ptrdiff_t stride)             \
899 {                                                                       \
900     uint64_t half[16 * 2 + 17 * 2];                                     \
901     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
902     uint8_t * const halfHV = ((uint8_t*)half);                          \
903     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
904                                                     stride, 17);        \
905     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16,         \
906                                          stride, 17);                   \
907     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
908                                                     16, 16);            \
909     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV,            \
910                                          stride, 16, 16);               \
911 }                                                                       \
912                                                                         \
913 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src,   \
914                                           ptrdiff_t stride)             \
915 {                                                                       \
916     uint64_t half[16 * 2 + 17 * 2];                                     \
917     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
918     uint8_t * const halfHV = ((uint8_t*)half);                          \
919     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
920                                                     stride, 17);        \
921     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16,     \
922                                          stride, 17);                   \
923     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
924                                                     16, 16);            \
925     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV,            \
926                                          stride, 16, 16);               \
927 }                                                                       \
928                                                                         \
929 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src,   \
930                                           ptrdiff_t stride)             \
931 {                                                                       \
932     uint64_t half[16 * 2 + 17 * 2];                                     \
933     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
934     uint8_t * const halfHV = ((uint8_t*)half);                          \
935     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
936                                                     stride, 17);        \
937     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16,         \
938                                          stride, 17);                   \
939     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
940                                                     16, 16);            \
941     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV,       \
942                                          stride, 16, 16);               \
943 }                                                                       \
944                                                                         \
945 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src,   \
946                                           ptrdiff_t stride)             \
947 {                                                                       \
948     uint64_t half[16 * 2 + 17 * 2];                                     \
949     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
950     uint8_t * const halfHV = ((uint8_t*)half);                          \
951     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
952                                                     stride, 17);        \
953     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16,     \
954                                          stride, 17);                   \
955     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
956                                                     16, 16);            \
957     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV,       \
958                                          stride, 16, 16);               \
959 }                                                                       \
960                                                                         \
961 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src,   \
962                                           ptrdiff_t stride)             \
963 {                                                                       \
964     uint64_t half[16 * 2 + 17 * 2];                                     \
965     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
966     uint8_t * const halfHV = ((uint8_t*)half);                          \
967     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
968                                                     stride, 17);        \
969     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
970                                                     16, 16);            \
971     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV,            \
972                                          stride, 16, 16);               \
973 }                                                                       \
974                                                                         \
975 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src,   \
976                                           ptrdiff_t stride)             \
977 {                                                                       \
978     uint64_t half[16 * 2 + 17 * 2];                                     \
979     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
980     uint8_t * const halfHV = ((uint8_t*)half);                          \
981     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
982                                                     stride, 17);        \
983     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
984                                                     16, 16);            \
985     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV,       \
986                                          stride, 16, 16);               \
987 }                                                                       \
988                                                                         \
989 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src,   \
990                                           ptrdiff_t stride)             \
991 {                                                                       \
992     uint64_t half[17 * 2];                                              \
993     uint8_t * const halfH = ((uint8_t*)half);                           \
994     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
995                                                     stride, 17);        \
996     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16,         \
997                                          stride, 17);                   \
998     ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH,         \
999                                                     stride, 16);        \
1000 }                                                                       \
1001                                                                         \
1002 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src,   \
1003                                           ptrdiff_t stride)             \
1004 {                                                                       \
1005     uint64_t half[17 * 2];                                              \
1006     uint8_t * const halfH = ((uint8_t*)half);                           \
1007     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
1008                                                     stride, 17);        \
1009     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16,     \
1010                                          stride, 17);                   \
1011     ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH,         \
1012                                                     stride, 16);        \
1013 }                                                                       \
1014                                                                         \
1015 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src,   \
1016                                           ptrdiff_t stride)             \
1017 {                                                                       \
1018     uint64_t half[17 * 2];                                              \
1019     uint8_t * const halfH = ((uint8_t*)half);                           \
1020     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
1021                                                     stride, 17);        \
1022     ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH,         \
1023                                                     stride, 16);        \
1024 }
1025
1026 QPEL_OP(put_,          ff_pw_16, _,        mmxext)
1027 QPEL_OP(avg_,          ff_pw_16, _,        mmxext)
1028 QPEL_OP(put_no_rnd_,   ff_pw_15, _no_rnd_, mmxext)
1029 #endif /* HAVE_YASM */
1030
1031
1032 #if HAVE_INLINE_ASM
1033 void ff_put_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1034 {
1035   put_pixels8_xy2_mmx(dst, src, stride, 8);
1036 }
1037 void ff_put_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1038 {
1039   put_pixels16_xy2_mmx(dst, src, stride, 16);
1040 }
1041 void ff_avg_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1042 {
1043   avg_pixels8_xy2_mmx(dst, src, stride, 8);
1044 }
1045 void ff_avg_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1046 {
1047   avg_pixels16_xy2_mmx(dst, src, stride, 16);
1048 }
1049
1050 typedef void emulated_edge_mc_func(uint8_t *dst, const uint8_t *src,
1051                                    ptrdiff_t linesize, int block_w, int block_h,
1052                                    int src_x, int src_y, int w, int h);
1053
1054 static av_always_inline void gmc(uint8_t *dst, uint8_t *src,
1055                                  int stride, int h, int ox, int oy,
1056                                  int dxx, int dxy, int dyx, int dyy,
1057                                  int shift, int r, int width, int height,
1058                                  emulated_edge_mc_func *emu_edge_fn)
1059 {
1060     const int w    = 8;
1061     const int ix   = ox  >> (16 + shift);
1062     const int iy   = oy  >> (16 + shift);
1063     const int oxs  = ox  >> 4;
1064     const int oys  = oy  >> 4;
1065     const int dxxs = dxx >> 4;
1066     const int dxys = dxy >> 4;
1067     const int dyxs = dyx >> 4;
1068     const int dyys = dyy >> 4;
1069     const uint16_t r4[4]   = { r, r, r, r };
1070     const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys };
1071     const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys };
1072     const uint64_t shift2 = 2 * shift;
1073 #define MAX_STRIDE 4096U
1074 #define MAX_H 8U
1075     uint8_t edge_buf[(MAX_H + 1) * MAX_STRIDE];
1076     int x, y;
1077
1078     const int dxw = (dxx - (1 << (16 + shift))) * (w - 1);
1079     const int dyh = (dyy - (1 << (16 + shift))) * (h - 1);
1080     const int dxh = dxy * (h - 1);
1081     const int dyw = dyx * (w - 1);
1082     int need_emu =  (unsigned)ix >= width  - w ||
1083                     (unsigned)iy >= height - h;
1084
1085     if ( // non-constant fullpel offset (3% of blocks)
1086         ((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) |
1087          (oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift)
1088         // uses more than 16 bits of subpel mv (only at huge resolution)
1089         || (dxx | dxy | dyx | dyy) & 15
1090         || (need_emu && (h > MAX_H || stride > MAX_STRIDE))) {
1091         // FIXME could still use mmx for some of the rows
1092         ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy,
1093                  shift, r, width, height);
1094         return;
1095     }
1096
1097     src += ix + iy * stride;
1098     if (need_emu) {
1099         emu_edge_fn(edge_buf, src, stride, w + 1, h + 1, ix, iy, width, height);
1100         src = edge_buf;
1101     }
1102
1103     __asm__ volatile (
1104         "movd         %0, %%mm6         \n\t"
1105         "pxor      %%mm7, %%mm7         \n\t"
1106         "punpcklwd %%mm6, %%mm6         \n\t"
1107         "punpcklwd %%mm6, %%mm6         \n\t"
1108         :: "r"(1<<shift)
1109     );
1110
1111     for (x = 0; x < w; x += 4) {
1112         uint16_t dx4[4] = { oxs - dxys + dxxs * (x + 0),
1113                             oxs - dxys + dxxs * (x + 1),
1114                             oxs - dxys + dxxs * (x + 2),
1115                             oxs - dxys + dxxs * (x + 3) };
1116         uint16_t dy4[4] = { oys - dyys + dyxs * (x + 0),
1117                             oys - dyys + dyxs * (x + 1),
1118                             oys - dyys + dyxs * (x + 2),
1119                             oys - dyys + dyxs * (x + 3) };
1120
1121         for (y = 0; y < h; y++) {
1122             __asm__ volatile (
1123                 "movq      %0, %%mm4    \n\t"
1124                 "movq      %1, %%mm5    \n\t"
1125                 "paddw     %2, %%mm4    \n\t"
1126                 "paddw     %3, %%mm5    \n\t"
1127                 "movq   %%mm4, %0       \n\t"
1128                 "movq   %%mm5, %1       \n\t"
1129                 "psrlw    $12, %%mm4    \n\t"
1130                 "psrlw    $12, %%mm5    \n\t"
1131                 : "+m"(*dx4), "+m"(*dy4)
1132                 : "m"(*dxy4), "m"(*dyy4)
1133             );
1134
1135             __asm__ volatile (
1136                 "movq      %%mm6, %%mm2 \n\t"
1137                 "movq      %%mm6, %%mm1 \n\t"
1138                 "psubw     %%mm4, %%mm2 \n\t"
1139                 "psubw     %%mm5, %%mm1 \n\t"
1140                 "movq      %%mm2, %%mm0 \n\t"
1141                 "movq      %%mm4, %%mm3 \n\t"
1142                 "pmullw    %%mm1, %%mm0 \n\t" // (s - dx) * (s - dy)
1143                 "pmullw    %%mm5, %%mm3 \n\t" // dx * dy
1144                 "pmullw    %%mm5, %%mm2 \n\t" // (s - dx) * dy
1145                 "pmullw    %%mm4, %%mm1 \n\t" // dx * (s - dy)
1146
1147                 "movd         %4, %%mm5 \n\t"
1148                 "movd         %3, %%mm4 \n\t"
1149                 "punpcklbw %%mm7, %%mm5 \n\t"
1150                 "punpcklbw %%mm7, %%mm4 \n\t"
1151                 "pmullw    %%mm5, %%mm3 \n\t" // src[1, 1] * dx * dy
1152                 "pmullw    %%mm4, %%mm2 \n\t" // src[0, 1] * (s - dx) * dy
1153
1154                 "movd         %2, %%mm5 \n\t"
1155                 "movd         %1, %%mm4 \n\t"
1156                 "punpcklbw %%mm7, %%mm5 \n\t"
1157                 "punpcklbw %%mm7, %%mm4 \n\t"
1158                 "pmullw    %%mm5, %%mm1 \n\t" // src[1, 0] * dx * (s - dy)
1159                 "pmullw    %%mm4, %%mm0 \n\t" // src[0, 0] * (s - dx) * (s - dy)
1160                 "paddw        %5, %%mm1 \n\t"
1161                 "paddw     %%mm3, %%mm2 \n\t"
1162                 "paddw     %%mm1, %%mm0 \n\t"
1163                 "paddw     %%mm2, %%mm0 \n\t"
1164
1165                 "psrlw        %6, %%mm0 \n\t"
1166                 "packuswb  %%mm0, %%mm0 \n\t"
1167                 "movd      %%mm0, %0    \n\t"
1168
1169                 : "=m"(dst[x + y * stride])
1170                 : "m"(src[0]), "m"(src[1]),
1171                   "m"(src[stride]), "m"(src[stride + 1]),
1172                   "m"(*r4), "m"(shift2)
1173             );
1174             src += stride;
1175         }
1176         src += 4 - h * stride;
1177     }
1178 }
1179
1180
1181 #if CONFIG_VIDEODSP
1182 #if HAVE_YASM
1183 #if ARCH_X86_32
1184 static void gmc_mmx(uint8_t *dst, uint8_t *src,
1185                     int stride, int h, int ox, int oy,
1186                     int dxx, int dxy, int dyx, int dyy,
1187                     int shift, int r, int width, int height)
1188 {
1189     gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
1190         width, height, &ff_emulated_edge_mc_8);
1191 }
1192 #endif
1193 static void gmc_sse(uint8_t *dst, uint8_t *src,
1194                     int stride, int h, int ox, int oy,
1195                     int dxx, int dxy, int dyx, int dyy,
1196                     int shift, int r, int width, int height)
1197 {
1198     gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
1199         width, height, &ff_emulated_edge_mc_8);
1200 }
1201 #else
1202 static void gmc_mmx(uint8_t *dst, uint8_t *src,
1203                     int stride, int h, int ox, int oy,
1204                     int dxx, int dxy, int dyx, int dyy,
1205                     int shift, int r, int width, int height)
1206 {
1207     gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
1208         width, height, &ff_emulated_edge_mc_8);
1209 }
1210 #endif
1211 #endif
1212
1213 /* CAVS-specific */
1214 void ff_put_cavs_qpel8_mc00_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1215 {
1216     put_pixels8_mmx(dst, src, stride, 8);
1217 }
1218
1219 void ff_avg_cavs_qpel8_mc00_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1220 {
1221     avg_pixels8_mmx(dst, src, stride, 8);
1222 }
1223
1224 void ff_put_cavs_qpel16_mc00_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1225 {
1226     put_pixels16_mmx(dst, src, stride, 16);
1227 }
1228
1229 void ff_avg_cavs_qpel16_mc00_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1230 {
1231     avg_pixels16_mmx(dst, src, stride, 16);
1232 }
1233
1234 /* VC-1-specific */
1235 void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src,
1236                                ptrdiff_t stride, int rnd)
1237 {
1238     put_pixels8_mmx(dst, src, stride, 8);
1239 }
1240
1241 #if CONFIG_DIRAC_DECODER
1242 #define DIRAC_PIXOP(OPNAME2, OPNAME, EXT)\
1243 void ff_ ## OPNAME2 ## _dirac_pixels8_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
1244 {\
1245     if (h&3)\
1246         ff_ ## OPNAME2 ## _dirac_pixels8_c(dst, src, stride, h);\
1247     else\
1248         OPNAME ## _pixels8_ ## EXT(dst, src[0], stride, h);\
1249 }\
1250 void ff_ ## OPNAME2 ## _dirac_pixels16_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
1251 {\
1252     if (h&3)\
1253         ff_ ## OPNAME2 ## _dirac_pixels16_c(dst, src, stride, h);\
1254     else\
1255         OPNAME ## _pixels16_ ## EXT(dst, src[0], stride, h);\
1256 }\
1257 void ff_ ## OPNAME2 ## _dirac_pixels32_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
1258 {\
1259     if (h&3) {\
1260         ff_ ## OPNAME2 ## _dirac_pixels32_c(dst, src, stride, h);\
1261     } else {\
1262         OPNAME ## _pixels16_ ## EXT(dst   , src[0]   , stride, h);\
1263         OPNAME ## _pixels16_ ## EXT(dst+16, src[0]+16, stride, h);\
1264     }\
1265 }
1266
1267 #if HAVE_MMX_INLINE
1268 DIRAC_PIXOP(put, put, mmx)
1269 DIRAC_PIXOP(avg, avg, mmx)
1270 #endif
1271
1272 #if HAVE_YASM
1273 DIRAC_PIXOP(avg, ff_avg, mmxext)
1274
1275 void ff_put_dirac_pixels16_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
1276 {
1277     if (h&3)
1278         ff_put_dirac_pixels16_c(dst, src, stride, h);
1279     else
1280     ff_put_pixels16_sse2(dst, src[0], stride, h);
1281 }
1282 void ff_avg_dirac_pixels16_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
1283 {
1284     if (h&3)
1285         ff_avg_dirac_pixels16_c(dst, src, stride, h);
1286     else
1287     ff_avg_pixels16_sse2(dst, src[0], stride, h);
1288 }
1289 void ff_put_dirac_pixels32_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
1290 {
1291     if (h&3) {
1292         ff_put_dirac_pixels32_c(dst, src, stride, h);
1293     } else {
1294     ff_put_pixels16_sse2(dst   , src[0]   , stride, h);
1295     ff_put_pixels16_sse2(dst+16, src[0]+16, stride, h);
1296     }
1297 }
1298 void ff_avg_dirac_pixels32_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
1299 {
1300     if (h&3) {
1301         ff_avg_dirac_pixels32_c(dst, src, stride, h);
1302     } else {
1303     ff_avg_pixels16_sse2(dst   , src[0]   , stride, h);
1304     ff_avg_pixels16_sse2(dst+16, src[0]+16, stride, h);
1305     }
1306 }
1307 #endif
1308 #endif
1309
1310 static void vector_clipf_sse(float *dst, const float *src,
1311                              float min, float max, int len)
1312 {
1313     x86_reg i = (len - 16) * 4;
1314     __asm__ volatile (
1315         "movss          %3, %%xmm4      \n\t"
1316         "movss          %4, %%xmm5      \n\t"
1317         "shufps $0, %%xmm4, %%xmm4      \n\t"
1318         "shufps $0, %%xmm5, %%xmm5      \n\t"
1319         "1:                             \n\t"
1320         "movaps   (%2, %0), %%xmm0      \n\t" // 3/1 on intel
1321         "movaps 16(%2, %0), %%xmm1      \n\t"
1322         "movaps 32(%2, %0), %%xmm2      \n\t"
1323         "movaps 48(%2, %0), %%xmm3      \n\t"
1324         "maxps      %%xmm4, %%xmm0      \n\t"
1325         "maxps      %%xmm4, %%xmm1      \n\t"
1326         "maxps      %%xmm4, %%xmm2      \n\t"
1327         "maxps      %%xmm4, %%xmm3      \n\t"
1328         "minps      %%xmm5, %%xmm0      \n\t"
1329         "minps      %%xmm5, %%xmm1      \n\t"
1330         "minps      %%xmm5, %%xmm2      \n\t"
1331         "minps      %%xmm5, %%xmm3      \n\t"
1332         "movaps     %%xmm0,   (%1, %0)  \n\t"
1333         "movaps     %%xmm1, 16(%1, %0)  \n\t"
1334         "movaps     %%xmm2, 32(%1, %0)  \n\t"
1335         "movaps     %%xmm3, 48(%1, %0)  \n\t"
1336         "sub           $64, %0          \n\t"
1337         "jge            1b              \n\t"
1338         : "+&r"(i)
1339         : "r"(dst), "r"(src), "m"(min), "m"(max)
1340         : "memory"
1341     );
1342 }
1343
1344 #endif /* HAVE_INLINE_ASM */
1345
1346 void ff_h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale);
1347 void ff_h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale);
1348
1349 int32_t ff_scalarproduct_int16_mmxext(const int16_t *v1, const int16_t *v2,
1350                                       int order);
1351 int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2,
1352                                     int order);
1353 int32_t ff_scalarproduct_and_madd_int16_mmxext(int16_t *v1, const int16_t *v2,
1354                                                const int16_t *v3,
1355                                                int order, int mul);
1356 int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2,
1357                                              const int16_t *v3,
1358                                              int order, int mul);
1359 int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2,
1360                                               const int16_t *v3,
1361                                               int order, int mul);
1362
1363 void ff_apply_window_int16_round_mmxext(int16_t *output, const int16_t *input,
1364                                         const int16_t *window, unsigned int len);
1365 void ff_apply_window_int16_round_sse2(int16_t *output, const int16_t *input,
1366                                       const int16_t *window, unsigned int len);
1367 void ff_apply_window_int16_mmxext(int16_t *output, const int16_t *input,
1368                                   const int16_t *window, unsigned int len);
1369 void ff_apply_window_int16_sse2(int16_t *output, const int16_t *input,
1370                                 const int16_t *window, unsigned int len);
1371 void ff_apply_window_int16_ssse3(int16_t *output, const int16_t *input,
1372                                  const int16_t *window, unsigned int len);
1373 void ff_apply_window_int16_ssse3_atom(int16_t *output, const int16_t *input,
1374                                       const int16_t *window, unsigned int len);
1375
1376 void ff_bswap32_buf_ssse3(uint32_t *dst, const uint32_t *src, int w);
1377 void ff_bswap32_buf_sse2(uint32_t *dst, const uint32_t *src, int w);
1378
1379 void ff_add_hfyu_median_prediction_mmxext(uint8_t *dst, const uint8_t *top,
1380                                           const uint8_t *diff, int w,
1381                                           int *left, int *left_top);
1382 int  ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src,
1383                                        int w, int left);
1384 int  ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src,
1385                                       int w, int left);
1386
1387 void ff_vector_clip_int32_mmx     (int32_t *dst, const int32_t *src,
1388                                    int32_t min, int32_t max, unsigned int len);
1389 void ff_vector_clip_int32_sse2    (int32_t *dst, const int32_t *src,
1390                                    int32_t min, int32_t max, unsigned int len);
1391 void ff_vector_clip_int32_int_sse2(int32_t *dst, const int32_t *src,
1392                                    int32_t min, int32_t max, unsigned int len);
1393 void ff_vector_clip_int32_sse4    (int32_t *dst, const int32_t *src,
1394                                    int32_t min, int32_t max, unsigned int len);
1395
1396 #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX)                          \
1397     do {                                                                     \
1398     c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \
1399     c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \
1400     c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \
1401     c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \
1402     c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \
1403     c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \
1404     c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \
1405     c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \
1406     c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \
1407     c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \
1408     c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \
1409     c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \
1410     c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \
1411     c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \
1412     c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \
1413     c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU; \
1414     } while (0)
1415
1416 static av_cold void dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx,
1417                                      int mm_flags)
1418 {
1419     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1420
1421 #if HAVE_INLINE_ASM
1422     c->put_pixels_clamped        = ff_put_pixels_clamped_mmx;
1423     c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx;
1424     c->add_pixels_clamped        = ff_add_pixels_clamped_mmx;
1425
1426     if (!high_bit_depth) {
1427         c->clear_block  = clear_block_mmx;
1428         c->clear_blocks = clear_blocks_mmx;
1429         c->draw_edges   = draw_edges_mmx;
1430     }
1431
1432 #if CONFIG_VIDEODSP && (ARCH_X86_32 || !HAVE_YASM)
1433     c->gmc = gmc_mmx;
1434 #endif
1435
1436     c->add_bytes = add_bytes_mmx;
1437 #endif /* HAVE_INLINE_ASM */
1438
1439 #if HAVE_YASM
1440     if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
1441         c->h263_v_loop_filter = ff_h263_v_loop_filter_mmx;
1442         c->h263_h_loop_filter = ff_h263_h_loop_filter_mmx;
1443     }
1444
1445     c->vector_clip_int32 = ff_vector_clip_int32_mmx;
1446 #endif
1447
1448 }
1449
1450 static av_cold void dsputil_init_mmxext(DSPContext *c, AVCodecContext *avctx,
1451                                         int mm_flags)
1452 {
1453
1454 #if HAVE_YASM
1455     SET_QPEL_FUNCS(avg_qpel,        0, 16, mmxext, );
1456     SET_QPEL_FUNCS(avg_qpel,        1,  8, mmxext, );
1457
1458     SET_QPEL_FUNCS(put_qpel,        0, 16, mmxext, );
1459     SET_QPEL_FUNCS(put_qpel,        1,  8, mmxext, );
1460     SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmxext, );
1461     SET_QPEL_FUNCS(put_no_rnd_qpel, 1,  8, mmxext, );
1462 #endif /* HAVE_YASM */
1463
1464 #if HAVE_MMXEXT_EXTERNAL
1465     /* slower than cmov version on AMD */
1466     if (!(mm_flags & AV_CPU_FLAG_3DNOW))
1467         c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmxext;
1468
1469     c->scalarproduct_int16          = ff_scalarproduct_int16_mmxext;
1470     c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmxext;
1471
1472     if (avctx->flags & CODEC_FLAG_BITEXACT) {
1473         c->apply_window_int16 = ff_apply_window_int16_mmxext;
1474     } else {
1475         c->apply_window_int16 = ff_apply_window_int16_round_mmxext;
1476     }
1477 #endif /* HAVE_MMXEXT_EXTERNAL */
1478 }
1479
1480 static av_cold void dsputil_init_sse(DSPContext *c, AVCodecContext *avctx,
1481                                      int mm_flags)
1482 {
1483 #if HAVE_INLINE_ASM
1484     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1485
1486     if (!high_bit_depth) {
1487         if (!(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)) {
1488             /* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
1489             c->clear_block  = clear_block_sse;
1490             c->clear_blocks = clear_blocks_sse;
1491         }
1492     }
1493
1494     c->vector_clipf = vector_clipf_sse;
1495 #endif /* HAVE_INLINE_ASM */
1496
1497 #if HAVE_YASM
1498 #if HAVE_INLINE_ASM && CONFIG_VIDEODSP
1499     c->gmc = gmc_sse;
1500 #endif
1501 #endif /* HAVE_YASM */
1502 }
1503
1504 static av_cold void dsputil_init_sse2(DSPContext *c, AVCodecContext *avctx,
1505                                       int mm_flags)
1506 {
1507     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1508
1509 #if HAVE_SSE2_INLINE
1510     if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX) {
1511         c->idct_put              = ff_idct_xvid_sse2_put;
1512         c->idct_add              = ff_idct_xvid_sse2_add;
1513         c->idct                  = ff_idct_xvid_sse2;
1514         c->idct_permutation_type = FF_SSE2_IDCT_PERM;
1515     }
1516 #endif /* HAVE_SSE2_INLINE */
1517
1518 #if HAVE_SSE2_EXTERNAL
1519     c->scalarproduct_int16          = ff_scalarproduct_int16_sse2;
1520     c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
1521     if (mm_flags & AV_CPU_FLAG_ATOM) {
1522         c->vector_clip_int32 = ff_vector_clip_int32_int_sse2;
1523     } else {
1524         c->vector_clip_int32 = ff_vector_clip_int32_sse2;
1525     }
1526     if (avctx->flags & CODEC_FLAG_BITEXACT) {
1527         c->apply_window_int16 = ff_apply_window_int16_sse2;
1528     } else if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
1529         c->apply_window_int16 = ff_apply_window_int16_round_sse2;
1530     }
1531     c->bswap_buf = ff_bswap32_buf_sse2;
1532 #endif /* HAVE_SSE2_EXTERNAL */
1533 }
1534
1535 static av_cold void dsputil_init_ssse3(DSPContext *c, AVCodecContext *avctx,
1536                                        int mm_flags)
1537 {
1538 #if HAVE_SSSE3_EXTERNAL
1539     c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
1540     if (mm_flags & AV_CPU_FLAG_SSE4) // not really sse4, just slow on Conroe
1541         c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
1542
1543     if (mm_flags & AV_CPU_FLAG_ATOM)
1544         c->apply_window_int16 = ff_apply_window_int16_ssse3_atom;
1545     else
1546         c->apply_window_int16 = ff_apply_window_int16_ssse3;
1547     if (!(mm_flags & (AV_CPU_FLAG_SSE42|AV_CPU_FLAG_3DNOW))) // cachesplit
1548         c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
1549     c->bswap_buf = ff_bswap32_buf_ssse3;
1550 #endif /* HAVE_SSSE3_EXTERNAL */
1551 }
1552
1553 static av_cold void dsputil_init_sse4(DSPContext *c, AVCodecContext *avctx,
1554                                       int mm_flags)
1555 {
1556 #if HAVE_SSE4_EXTERNAL
1557     c->vector_clip_int32 = ff_vector_clip_int32_sse4;
1558 #endif /* HAVE_SSE4_EXTERNAL */
1559 }
1560
1561 av_cold void ff_dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx)
1562 {
1563     int mm_flags = av_get_cpu_flags();
1564
1565 #if HAVE_7REGS && HAVE_INLINE_ASM
1566     if (mm_flags & AV_CPU_FLAG_CMOV)
1567         c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
1568 #endif
1569
1570     if (mm_flags & AV_CPU_FLAG_MMX) {
1571 #if HAVE_INLINE_ASM
1572         const int idct_algo = avctx->idct_algo;
1573
1574         if (avctx->lowres == 0 && avctx->bits_per_raw_sample <= 8) {
1575             if (idct_algo == FF_IDCT_AUTO || idct_algo == FF_IDCT_SIMPLEMMX) {
1576                 c->idct_put              = ff_simple_idct_put_mmx;
1577                 c->idct_add              = ff_simple_idct_add_mmx;
1578                 c->idct                  = ff_simple_idct_mmx;
1579                 c->idct_permutation_type = FF_SIMPLE_IDCT_PERM;
1580             } else if (idct_algo == FF_IDCT_XVIDMMX) {
1581                 if (mm_flags & AV_CPU_FLAG_SSE2) {
1582                     c->idct_put              = ff_idct_xvid_sse2_put;
1583                     c->idct_add              = ff_idct_xvid_sse2_add;
1584                     c->idct                  = ff_idct_xvid_sse2;
1585                     c->idct_permutation_type = FF_SSE2_IDCT_PERM;
1586                 } else if (mm_flags & AV_CPU_FLAG_MMXEXT) {
1587                     c->idct_put              = ff_idct_xvid_mmxext_put;
1588                     c->idct_add              = ff_idct_xvid_mmxext_add;
1589                     c->idct                  = ff_idct_xvid_mmxext;
1590                 } else {
1591                     c->idct_put              = ff_idct_xvid_mmx_put;
1592                     c->idct_add              = ff_idct_xvid_mmx_add;
1593                     c->idct                  = ff_idct_xvid_mmx;
1594                 }
1595             }
1596         }
1597 #endif /* HAVE_INLINE_ASM */
1598
1599         dsputil_init_mmx(c, avctx, mm_flags);
1600     }
1601
1602     if (mm_flags & AV_CPU_FLAG_MMXEXT)
1603         dsputil_init_mmxext(c, avctx, mm_flags);
1604
1605     if (mm_flags & AV_CPU_FLAG_SSE)
1606         dsputil_init_sse(c, avctx, mm_flags);
1607
1608     if (mm_flags & AV_CPU_FLAG_SSE2)
1609         dsputil_init_sse2(c, avctx, mm_flags);
1610
1611     if (mm_flags & AV_CPU_FLAG_SSSE3)
1612         dsputil_init_ssse3(c, avctx, mm_flags);
1613
1614     if (mm_flags & AV_CPU_FLAG_SSE4)
1615         dsputil_init_sse4(c, avctx, mm_flags);
1616
1617     if (CONFIG_ENCODERS)
1618         ff_dsputilenc_init_mmx(c, avctx);
1619 }