x86: Move constants to the only place where they are used
[ffmpeg.git] / libavcodec / x86 / dsputil_mmx.c
1 /*
2  * MMX optimized DSP utils
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
7  *
8  * This file is part of Libav.
9  *
10  * Libav is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * Libav is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with Libav; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24
25 #include "libavutil/attributes.h"
26 #include "libavutil/cpu.h"
27 #include "libavutil/x86/asm.h"
28 #include "libavcodec/dsputil.h"
29 #include "libavcodec/h264dsp.h"
30 #include "libavcodec/mpegvideo.h"
31 #include "libavcodec/simple_idct.h"
32 #include "dsputil_mmx.h"
33 #include "idct_xvid.h"
34
35 //#undef NDEBUG
36 //#include <assert.h>
37
38 /* pixel operations */
39 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_15)   =   0x000F000F000F000FULL;
40 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_17)   = { 0x0011001100110011ULL, 0x0011001100110011ULL };
41 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_20)   =   0x0014001400140014ULL;
42 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_42)   =   0x002A002A002A002AULL;
43 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_53)   =   0x0035003500350035ULL;
44 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_96)   =   0x0060006000600060ULL;
45 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_128)  =   0x0080008000800080ULL;
46 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_255)  =   0x00ff00ff00ff00ffULL;
47 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_512)  = { 0x0200020002000200ULL, 0x0200020002000200ULL };
48 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_1019) = { 0x03FB03FB03FB03FBULL, 0x03FB03FB03FB03FBULL };
49
50 DECLARE_ALIGNED(8,  const uint64_t, ff_pb_3F)   =   0x3F3F3F3F3F3F3F3FULL;
51 DECLARE_ALIGNED(8,  const uint64_t, ff_pb_FC)   =   0xFCFCFCFCFCFCFCFCULL;
52
53 DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 };
54 DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 };
55
56
57 #if HAVE_YASM
58 void ff_put_pixels8_x2_mmxext(uint8_t *block, const uint8_t *pixels,
59                               ptrdiff_t line_size, int h);
60 void ff_put_pixels8_x2_3dnow(uint8_t *block, const uint8_t *pixels,
61                              ptrdiff_t line_size, int h);
62 void ff_put_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
63                               int dstStride, int src1Stride, int h);
64 void ff_put_no_rnd_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1,
65                                      uint8_t *src2, int dstStride,
66                                      int src1Stride, int h);
67 void ff_avg_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
68                               int dstStride, int src1Stride, int h);
69 void ff_put_pixels16_x2_mmxext(uint8_t *block, const uint8_t *pixels,
70                                ptrdiff_t line_size, int h);
71 void ff_put_pixels16_x2_3dnow(uint8_t *block, const uint8_t *pixels,
72                               ptrdiff_t line_size, int h);
73 void ff_put_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
74                                int dstStride, int src1Stride, int h);
75 void ff_avg_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
76                                int dstStride, int src1Stride, int h);
77 void ff_put_no_rnd_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
78                                       int dstStride, int src1Stride, int h);
79 void ff_put_no_rnd_pixels8_x2_mmxext(uint8_t *block, const uint8_t *pixels,
80                                      ptrdiff_t line_size, int h);
81 void ff_put_no_rnd_pixels8_x2_3dnow(uint8_t *block, const uint8_t *pixels,
82                                     ptrdiff_t line_size, int h);
83 void ff_put_no_rnd_pixels8_x2_exact_mmxext(uint8_t *block,
84                                            const uint8_t *pixels,
85                                            ptrdiff_t line_size, int h);
86 void ff_put_no_rnd_pixels8_x2_exact_3dnow(uint8_t *block,
87                                           const uint8_t *pixels,
88                                           ptrdiff_t line_size, int h);
89 void ff_put_pixels8_y2_mmxext(uint8_t *block, const uint8_t *pixels,
90                               ptrdiff_t line_size, int h);
91 void ff_put_pixels8_y2_3dnow(uint8_t *block, const uint8_t *pixels,
92                              ptrdiff_t line_size, int h);
93 void ff_put_no_rnd_pixels8_y2_mmxext(uint8_t *block, const uint8_t *pixels,
94                                      ptrdiff_t line_size, int h);
95 void ff_put_no_rnd_pixels8_y2_3dnow(uint8_t *block, const uint8_t *pixels,
96                                     ptrdiff_t line_size, int h);
97 void ff_put_no_rnd_pixels8_y2_exact_mmxext(uint8_t *block,
98                                            const uint8_t *pixels,
99                                            ptrdiff_t line_size, int h);
100 void ff_put_no_rnd_pixels8_y2_exact_3dnow(uint8_t *block,
101                                           const uint8_t *pixels,
102                                           ptrdiff_t line_size, int h);
103 void ff_avg_pixels8_3dnow(uint8_t *block, const uint8_t *pixels,
104                           ptrdiff_t line_size, int h);
105 void ff_avg_pixels8_x2_mmxext(uint8_t *block, const uint8_t *pixels,
106                               ptrdiff_t line_size, int h);
107 void ff_avg_pixels8_x2_3dnow(uint8_t *block, const uint8_t *pixels,
108                              ptrdiff_t line_size, int h);
109 void ff_avg_pixels8_y2_mmxext(uint8_t *block, const uint8_t *pixels,
110                               ptrdiff_t line_size, int h);
111 void ff_avg_pixels8_y2_3dnow(uint8_t *block, const uint8_t *pixels,
112                              ptrdiff_t line_size, int h);
113 void ff_avg_pixels8_xy2_mmxext(uint8_t *block, const uint8_t *pixels,
114                                ptrdiff_t line_size, int h);
115 void ff_avg_pixels8_xy2_3dnow(uint8_t *block, const uint8_t *pixels,
116                               ptrdiff_t line_size, int h);
117
118 static void ff_put_pixels16_mmxext(uint8_t *block, const uint8_t *pixels,
119                                    ptrdiff_t line_size, int h)
120 {
121     ff_put_pixels8_mmxext(block,     pixels,     line_size, h);
122     ff_put_pixels8_mmxext(block + 8, pixels + 8, line_size, h);
123 }
124
125 void ff_put_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
126                                          int dstStride, int srcStride, int h);
127 void ff_avg_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
128                                          int dstStride, int srcStride, int h);
129 void ff_put_no_rnd_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
130                                                  int dstStride, int srcStride,
131                                                  int h);
132 void ff_put_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
133                                         int dstStride, int srcStride, int h);
134 void ff_avg_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
135                                         int dstStride, int srcStride, int h);
136 void ff_put_no_rnd_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
137                                                 int dstStride, int srcStride,
138                                                 int h);
139 void ff_put_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
140                                          int dstStride, int srcStride);
141 void ff_avg_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
142                                          int dstStride, int srcStride);
143 void ff_put_no_rnd_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
144                                                  int dstStride, int srcStride);
145 void ff_put_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
146                                         int dstStride, int srcStride);
147 void ff_avg_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
148                                         int dstStride, int srcStride);
149 void ff_put_no_rnd_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
150                                                 int dstStride, int srcStride);
151 #define ff_put_no_rnd_pixels16_mmxext ff_put_pixels16_mmxext
152 #define ff_put_no_rnd_pixels8_mmxext ff_put_pixels8_mmxext
153 #endif /* HAVE_YASM */
154
155
156 #if HAVE_INLINE_ASM
157
158 #define JUMPALIGN()     __asm__ volatile (".p2align 3"::)
159 #define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%"#regd", %%"#regd ::)
160
161 #define MOVQ_BFE(regd)                                  \
162     __asm__ volatile (                                  \
163         "pcmpeqd %%"#regd", %%"#regd"   \n\t"           \
164         "paddb   %%"#regd", %%"#regd"   \n\t" ::)
165
166 #ifndef PIC
167 #define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%"#regd" \n\t" :: "m"(ff_bone))
168 #define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%"#regd" \n\t" :: "m"(ff_wtwo))
169 #else
170 // for shared library it's better to use this way for accessing constants
171 // pcmpeqd -> -1
172 #define MOVQ_BONE(regd)                                 \
173     __asm__ volatile (                                  \
174         "pcmpeqd  %%"#regd", %%"#regd"  \n\t"           \
175         "psrlw          $15, %%"#regd"  \n\t"           \
176         "packuswb %%"#regd", %%"#regd"  \n\t" ::)
177
178 #define MOVQ_WTWO(regd)                                 \
179     __asm__ volatile (                                  \
180         "pcmpeqd %%"#regd", %%"#regd"   \n\t"           \
181         "psrlw         $15, %%"#regd"   \n\t"           \
182         "psllw          $1, %%"#regd"   \n\t"::)
183
184 #endif
185
186 // using regr as temporary and for the output result
187 // first argument is unmodifed and second is trashed
188 // regfe is supposed to contain 0xfefefefefefefefe
189 #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe)                \
190     "movq   "#rega", "#regr"            \n\t"                    \
191     "pand   "#regb", "#regr"            \n\t"                    \
192     "pxor   "#rega", "#regb"            \n\t"                    \
193     "pand  "#regfe", "#regb"            \n\t"                    \
194     "psrlq       $1, "#regb"            \n\t"                    \
195     "paddb  "#regb", "#regr"            \n\t"
196
197 #define PAVGB_MMX(rega, regb, regr, regfe)                       \
198     "movq   "#rega", "#regr"            \n\t"                    \
199     "por    "#regb", "#regr"            \n\t"                    \
200     "pxor   "#rega", "#regb"            \n\t"                    \
201     "pand  "#regfe", "#regb"            \n\t"                    \
202     "psrlq       $1, "#regb"            \n\t"                    \
203     "psubb  "#regb", "#regr"            \n\t"
204
205 // mm6 is supposed to contain 0xfefefefefefefefe
206 #define PAVGBP_MMX_NO_RND(rega, regb, regr,  regc, regd, regp)   \
207     "movq  "#rega", "#regr"             \n\t"                    \
208     "movq  "#regc", "#regp"             \n\t"                    \
209     "pand  "#regb", "#regr"             \n\t"                    \
210     "pand  "#regd", "#regp"             \n\t"                    \
211     "pxor  "#rega", "#regb"             \n\t"                    \
212     "pxor  "#regc", "#regd"             \n\t"                    \
213     "pand    %%mm6, "#regb"             \n\t"                    \
214     "pand    %%mm6, "#regd"             \n\t"                    \
215     "psrlq      $1, "#regb"             \n\t"                    \
216     "psrlq      $1, "#regd"             \n\t"                    \
217     "paddb "#regb", "#regr"             \n\t"                    \
218     "paddb "#regd", "#regp"             \n\t"
219
220 #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp)           \
221     "movq  "#rega", "#regr"             \n\t"                    \
222     "movq  "#regc", "#regp"             \n\t"                    \
223     "por   "#regb", "#regr"             \n\t"                    \
224     "por   "#regd", "#regp"             \n\t"                    \
225     "pxor  "#rega", "#regb"             \n\t"                    \
226     "pxor  "#regc", "#regd"             \n\t"                    \
227     "pand    %%mm6, "#regb"             \n\t"                    \
228     "pand    %%mm6, "#regd"             \n\t"                    \
229     "psrlq      $1, "#regd"             \n\t"                    \
230     "psrlq      $1, "#regb"             \n\t"                    \
231     "psubb "#regb", "#regr"             \n\t"                    \
232     "psubb "#regd", "#regp"             \n\t"
233
234 /***********************************/
235 /* MMX no rounding */
236 #define NO_RND 1
237 #define DEF(x, y) x ## _no_rnd_ ## y ## _mmx
238 #define SET_RND  MOVQ_WONE
239 #define PAVGBP(a, b, c, d, e, f)        PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
240 #define PAVGB(a, b, c, e)               PAVGB_MMX_NO_RND(a, b, c, e)
241 #define OP_AVG(a, b, c, e)              PAVGB_MMX(a, b, c, e)
242
243 #include "dsputil_rnd_template.c"
244
245 #undef DEF
246 #undef SET_RND
247 #undef PAVGBP
248 #undef PAVGB
249 #undef NO_RND
250 /***********************************/
251 /* MMX rounding */
252
253 #define DEF(x, y) x ## _ ## y ## _mmx
254 #define SET_RND  MOVQ_WTWO
255 #define PAVGBP(a, b, c, d, e, f)        PAVGBP_MMX(a, b, c, d, e, f)
256 #define PAVGB(a, b, c, e)               PAVGB_MMX(a, b, c, e)
257
258 #include "dsputil_rnd_template.c"
259
260 #undef DEF
261 #undef SET_RND
262 #undef PAVGBP
263 #undef PAVGB
264 #undef OP_AVG
265
266 #endif /* HAVE_INLINE_ASM */
267
268
269 #if HAVE_YASM
270
271 /***********************************/
272 /* 3Dnow specific */
273
274 #define DEF(x) x ## _3dnow
275
276 #include "dsputil_avg_template.c"
277
278 #undef DEF
279
280 /***********************************/
281 /* MMXEXT specific */
282
283 #define DEF(x) x ## _mmxext
284
285 #include "dsputil_avg_template.c"
286
287 #undef DEF
288
289 #endif /* HAVE_YASM */
290
291
292 #if HAVE_INLINE_ASM
293 #define put_no_rnd_pixels16_mmx put_pixels16_mmx
294 #define put_no_rnd_pixels8_mmx put_pixels8_mmx
295
296 /***********************************/
297 /* standard MMX */
298
299 void ff_put_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
300                                int line_size)
301 {
302     const int16_t *p;
303     uint8_t *pix;
304
305     /* read the pixels */
306     p   = block;
307     pix = pixels;
308     /* unrolled loop */
309     __asm__ volatile (
310         "movq      (%3), %%mm0          \n\t"
311         "movq     8(%3), %%mm1          \n\t"
312         "movq    16(%3), %%mm2          \n\t"
313         "movq    24(%3), %%mm3          \n\t"
314         "movq    32(%3), %%mm4          \n\t"
315         "movq    40(%3), %%mm5          \n\t"
316         "movq    48(%3), %%mm6          \n\t"
317         "movq    56(%3), %%mm7          \n\t"
318         "packuswb %%mm1, %%mm0          \n\t"
319         "packuswb %%mm3, %%mm2          \n\t"
320         "packuswb %%mm5, %%mm4          \n\t"
321         "packuswb %%mm7, %%mm6          \n\t"
322         "movq     %%mm0, (%0)           \n\t"
323         "movq     %%mm2, (%0, %1)       \n\t"
324         "movq     %%mm4, (%0, %1, 2)    \n\t"
325         "movq     %%mm6, (%0, %2)       \n\t"
326         :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3),
327            "r"(p)
328         : "memory");
329     pix += line_size * 4;
330     p   += 32;
331
332     // if here would be an exact copy of the code above
333     // compiler would generate some very strange code
334     // thus using "r"
335     __asm__ volatile (
336         "movq       (%3), %%mm0         \n\t"
337         "movq      8(%3), %%mm1         \n\t"
338         "movq     16(%3), %%mm2         \n\t"
339         "movq     24(%3), %%mm3         \n\t"
340         "movq     32(%3), %%mm4         \n\t"
341         "movq     40(%3), %%mm5         \n\t"
342         "movq     48(%3), %%mm6         \n\t"
343         "movq     56(%3), %%mm7         \n\t"
344         "packuswb  %%mm1, %%mm0         \n\t"
345         "packuswb  %%mm3, %%mm2         \n\t"
346         "packuswb  %%mm5, %%mm4         \n\t"
347         "packuswb  %%mm7, %%mm6         \n\t"
348         "movq      %%mm0, (%0)          \n\t"
349         "movq      %%mm2, (%0, %1)      \n\t"
350         "movq      %%mm4, (%0, %1, 2)   \n\t"
351         "movq      %%mm6, (%0, %2)      \n\t"
352         :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3), "r"(p)
353         : "memory");
354 }
355
356 #define put_signed_pixels_clamped_mmx_half(off)             \
357     "movq          "#off"(%2), %%mm1        \n\t"           \
358     "movq     16 + "#off"(%2), %%mm2        \n\t"           \
359     "movq     32 + "#off"(%2), %%mm3        \n\t"           \
360     "movq     48 + "#off"(%2), %%mm4        \n\t"           \
361     "packsswb  8 + "#off"(%2), %%mm1        \n\t"           \
362     "packsswb 24 + "#off"(%2), %%mm2        \n\t"           \
363     "packsswb 40 + "#off"(%2), %%mm3        \n\t"           \
364     "packsswb 56 + "#off"(%2), %%mm4        \n\t"           \
365     "paddb              %%mm0, %%mm1        \n\t"           \
366     "paddb              %%mm0, %%mm2        \n\t"           \
367     "paddb              %%mm0, %%mm3        \n\t"           \
368     "paddb              %%mm0, %%mm4        \n\t"           \
369     "movq               %%mm1, (%0)         \n\t"           \
370     "movq               %%mm2, (%0, %3)     \n\t"           \
371     "movq               %%mm3, (%0, %3, 2)  \n\t"           \
372     "movq               %%mm4, (%0, %1)     \n\t"
373
374 void ff_put_signed_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
375                                       int line_size)
376 {
377     x86_reg line_skip = line_size;
378     x86_reg line_skip3;
379
380     __asm__ volatile (
381         "movq "MANGLE(ff_pb_80)", %%mm0     \n\t"
382         "lea         (%3, %3, 2), %1        \n\t"
383         put_signed_pixels_clamped_mmx_half(0)
384         "lea         (%0, %3, 4), %0        \n\t"
385         put_signed_pixels_clamped_mmx_half(64)
386         : "+&r"(pixels), "=&r"(line_skip3)
387         : "r"(block), "r"(line_skip)
388         : "memory");
389 }
390
391 void ff_add_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
392                                int line_size)
393 {
394     const int16_t *p;
395     uint8_t *pix;
396     int i;
397
398     /* read the pixels */
399     p   = block;
400     pix = pixels;
401     MOVQ_ZERO(mm7);
402     i = 4;
403     do {
404         __asm__ volatile (
405             "movq        (%2), %%mm0    \n\t"
406             "movq       8(%2), %%mm1    \n\t"
407             "movq      16(%2), %%mm2    \n\t"
408             "movq      24(%2), %%mm3    \n\t"
409             "movq          %0, %%mm4    \n\t"
410             "movq          %1, %%mm6    \n\t"
411             "movq       %%mm4, %%mm5    \n\t"
412             "punpcklbw  %%mm7, %%mm4    \n\t"
413             "punpckhbw  %%mm7, %%mm5    \n\t"
414             "paddsw     %%mm4, %%mm0    \n\t"
415             "paddsw     %%mm5, %%mm1    \n\t"
416             "movq       %%mm6, %%mm5    \n\t"
417             "punpcklbw  %%mm7, %%mm6    \n\t"
418             "punpckhbw  %%mm7, %%mm5    \n\t"
419             "paddsw     %%mm6, %%mm2    \n\t"
420             "paddsw     %%mm5, %%mm3    \n\t"
421             "packuswb   %%mm1, %%mm0    \n\t"
422             "packuswb   %%mm3, %%mm2    \n\t"
423             "movq       %%mm0, %0       \n\t"
424             "movq       %%mm2, %1       \n\t"
425             : "+m"(*pix), "+m"(*(pix + line_size))
426             : "r"(p)
427             : "memory");
428         pix += line_size * 2;
429         p   += 16;
430     } while (--i);
431 }
432
433 static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels,
434                             ptrdiff_t line_size, int h)
435 {
436     __asm__ volatile (
437         "lea   (%3, %3), %%"REG_a"      \n\t"
438         ".p2align     3                 \n\t"
439         "1:                             \n\t"
440         "movq  (%1    ), %%mm0          \n\t"
441         "movq  (%1, %3), %%mm1          \n\t"
442         "movq     %%mm0, (%2)           \n\t"
443         "movq     %%mm1, (%2, %3)       \n\t"
444         "add  %%"REG_a", %1             \n\t"
445         "add  %%"REG_a", %2             \n\t"
446         "movq  (%1    ), %%mm0          \n\t"
447         "movq  (%1, %3), %%mm1          \n\t"
448         "movq     %%mm0, (%2)           \n\t"
449         "movq     %%mm1, (%2, %3)       \n\t"
450         "add  %%"REG_a", %1             \n\t"
451         "add  %%"REG_a", %2             \n\t"
452         "subl        $4, %0             \n\t"
453         "jnz         1b                 \n\t"
454         : "+g"(h), "+r"(pixels),  "+r"(block)
455         : "r"((x86_reg)line_size)
456         : "%"REG_a, "memory"
457         );
458 }
459
460 static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels,
461                              ptrdiff_t line_size, int h)
462 {
463     __asm__ volatile (
464         "lea   (%3, %3), %%"REG_a"      \n\t"
465         ".p2align     3                 \n\t"
466         "1:                             \n\t"
467         "movq  (%1    ), %%mm0          \n\t"
468         "movq 8(%1    ), %%mm4          \n\t"
469         "movq  (%1, %3), %%mm1          \n\t"
470         "movq 8(%1, %3), %%mm5          \n\t"
471         "movq     %%mm0,  (%2)          \n\t"
472         "movq     %%mm4, 8(%2)          \n\t"
473         "movq     %%mm1,  (%2, %3)      \n\t"
474         "movq     %%mm5, 8(%2, %3)      \n\t"
475         "add  %%"REG_a", %1             \n\t"
476         "add  %%"REG_a", %2             \n\t"
477         "movq  (%1    ), %%mm0          \n\t"
478         "movq 8(%1    ), %%mm4          \n\t"
479         "movq  (%1, %3), %%mm1          \n\t"
480         "movq 8(%1, %3), %%mm5          \n\t"
481         "movq     %%mm0,  (%2)          \n\t"
482         "movq     %%mm4, 8(%2)          \n\t"
483         "movq     %%mm1,  (%2, %3)      \n\t"
484         "movq     %%mm5, 8(%2, %3)      \n\t"
485         "add  %%"REG_a", %1             \n\t"
486         "add  %%"REG_a", %2             \n\t"
487         "subl        $4, %0             \n\t"
488         "jnz         1b                 \n\t"
489         : "+g"(h), "+r"(pixels),  "+r"(block)
490         : "r"((x86_reg)line_size)
491         : "%"REG_a, "memory"
492         );
493 }
494
495 #define CLEAR_BLOCKS(name, n)                           \
496 static void name(int16_t *blocks)                       \
497 {                                                       \
498     __asm__ volatile (                                  \
499         "pxor %%mm7, %%mm7              \n\t"           \
500         "mov     %1,        %%"REG_a"   \n\t"           \
501         "1:                             \n\t"           \
502         "movq %%mm7,   (%0, %%"REG_a")  \n\t"           \
503         "movq %%mm7,  8(%0, %%"REG_a")  \n\t"           \
504         "movq %%mm7, 16(%0, %%"REG_a")  \n\t"           \
505         "movq %%mm7, 24(%0, %%"REG_a")  \n\t"           \
506         "add    $32, %%"REG_a"          \n\t"           \
507         "js      1b                     \n\t"           \
508         :: "r"(((uint8_t *)blocks) + 128 * n),          \
509            "i"(-128 * n)                                \
510         : "%"REG_a                                      \
511         );                                              \
512 }
513 CLEAR_BLOCKS(clear_blocks_mmx, 6)
514 CLEAR_BLOCKS(clear_block_mmx, 1)
515
516 static void clear_block_sse(int16_t *block)
517 {
518     __asm__ volatile (
519         "xorps  %%xmm0, %%xmm0          \n"
520         "movaps %%xmm0,    (%0)         \n"
521         "movaps %%xmm0,  16(%0)         \n"
522         "movaps %%xmm0,  32(%0)         \n"
523         "movaps %%xmm0,  48(%0)         \n"
524         "movaps %%xmm0,  64(%0)         \n"
525         "movaps %%xmm0,  80(%0)         \n"
526         "movaps %%xmm0,  96(%0)         \n"
527         "movaps %%xmm0, 112(%0)         \n"
528         :: "r"(block)
529         : "memory"
530     );
531 }
532
533 static void clear_blocks_sse(int16_t *blocks)
534 {
535     __asm__ volatile (
536         "xorps  %%xmm0, %%xmm0              \n"
537         "mov        %1,         %%"REG_a"   \n"
538         "1:                                 \n"
539         "movaps %%xmm0,    (%0, %%"REG_a")  \n"
540         "movaps %%xmm0,  16(%0, %%"REG_a")  \n"
541         "movaps %%xmm0,  32(%0, %%"REG_a")  \n"
542         "movaps %%xmm0,  48(%0, %%"REG_a")  \n"
543         "movaps %%xmm0,  64(%0, %%"REG_a")  \n"
544         "movaps %%xmm0,  80(%0, %%"REG_a")  \n"
545         "movaps %%xmm0,  96(%0, %%"REG_a")  \n"
546         "movaps %%xmm0, 112(%0, %%"REG_a")  \n"
547         "add      $128,         %%"REG_a"   \n"
548         "js         1b                      \n"
549         :: "r"(((uint8_t *)blocks) + 128 * 6),
550            "i"(-128 * 6)
551         : "%"REG_a
552     );
553 }
554
555 static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w)
556 {
557     x86_reg i = 0;
558     __asm__ volatile (
559         "jmp          2f                \n\t"
560         "1:                             \n\t"
561         "movq   (%1, %0), %%mm0         \n\t"
562         "movq   (%2, %0), %%mm1         \n\t"
563         "paddb     %%mm0, %%mm1         \n\t"
564         "movq      %%mm1, (%2, %0)      \n\t"
565         "movq  8(%1, %0), %%mm0         \n\t"
566         "movq  8(%2, %0), %%mm1         \n\t"
567         "paddb     %%mm0, %%mm1         \n\t"
568         "movq      %%mm1, 8(%2, %0)     \n\t"
569         "add         $16, %0            \n\t"
570         "2:                             \n\t"
571         "cmp          %3, %0            \n\t"
572         "js           1b                \n\t"
573         : "+r"(i)
574         : "r"(src), "r"(dst), "r"((x86_reg)w - 15)
575     );
576     for ( ; i < w; i++)
577         dst[i + 0] += src[i + 0];
578 }
579
580 #if HAVE_7REGS
581 static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top,
582                                             const uint8_t *diff, int w,
583                                             int *left, int *left_top)
584 {
585     x86_reg w2 = -w;
586     x86_reg x;
587     int l  = *left     & 0xff;
588     int tl = *left_top & 0xff;
589     int t;
590     __asm__ volatile (
591         "mov          %7, %3            \n"
592         "1:                             \n"
593         "movzbl (%3, %4), %2            \n"
594         "mov          %2, %k3           \n"
595         "sub         %b1, %b3           \n"
596         "add         %b0, %b3           \n"
597         "mov          %2, %1            \n"
598         "cmp          %0, %2            \n"
599         "cmovg        %0, %2            \n"
600         "cmovg        %1, %0            \n"
601         "cmp         %k3, %0            \n"
602         "cmovg       %k3, %0            \n"
603         "mov          %7, %3            \n"
604         "cmp          %2, %0            \n"
605         "cmovl        %2, %0            \n"
606         "add    (%6, %4), %b0           \n"
607         "mov         %b0, (%5, %4)      \n"
608         "inc          %4                \n"
609         "jl           1b                \n"
610         : "+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
611         : "r"(dst + w), "r"(diff + w), "rm"(top + w)
612     );
613     *left     = l;
614     *left_top = tl;
615 }
616 #endif
617
618 /* Draw the edges of width 'w' of an image of size width, height
619  * this MMX version can only handle w == 8 || w == 16. */
620 static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height,
621                            int w, int h, int sides)
622 {
623     uint8_t *ptr, *last_line;
624     int i;
625
626     last_line = buf + (height - 1) * wrap;
627     /* left and right */
628     ptr = buf;
629     if (w == 8) {
630         __asm__ volatile (
631             "1:                             \n\t"
632             "movd            (%0), %%mm0    \n\t"
633             "punpcklbw      %%mm0, %%mm0    \n\t"
634             "punpcklwd      %%mm0, %%mm0    \n\t"
635             "punpckldq      %%mm0, %%mm0    \n\t"
636             "movq           %%mm0, -8(%0)   \n\t"
637             "movq      -8(%0, %2), %%mm1    \n\t"
638             "punpckhbw      %%mm1, %%mm1    \n\t"
639             "punpckhwd      %%mm1, %%mm1    \n\t"
640             "punpckhdq      %%mm1, %%mm1    \n\t"
641             "movq           %%mm1, (%0, %2) \n\t"
642             "add               %1, %0       \n\t"
643             "cmp               %3, %0       \n\t"
644             "jb                1b           \n\t"
645             : "+r"(ptr)
646             : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
647             );
648     } else {
649         __asm__ volatile (
650             "1:                                 \n\t"
651             "movd            (%0), %%mm0        \n\t"
652             "punpcklbw      %%mm0, %%mm0        \n\t"
653             "punpcklwd      %%mm0, %%mm0        \n\t"
654             "punpckldq      %%mm0, %%mm0        \n\t"
655             "movq           %%mm0, -8(%0)       \n\t"
656             "movq           %%mm0, -16(%0)      \n\t"
657             "movq      -8(%0, %2), %%mm1        \n\t"
658             "punpckhbw      %%mm1, %%mm1        \n\t"
659             "punpckhwd      %%mm1, %%mm1        \n\t"
660             "punpckhdq      %%mm1, %%mm1        \n\t"
661             "movq           %%mm1,  (%0, %2)    \n\t"
662             "movq           %%mm1, 8(%0, %2)    \n\t"
663             "add               %1, %0           \n\t"
664             "cmp               %3, %0           \n\t"
665             "jb                1b               \n\t"
666             : "+r"(ptr)
667             : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
668             );
669     }
670
671     /* top and bottom (and hopefully also the corners) */
672     if (sides & EDGE_TOP) {
673         for (i = 0; i < h; i += 4) {
674             ptr = buf - (i + 1) * wrap - w;
675             __asm__ volatile (
676                 "1:                             \n\t"
677                 "movq (%1, %0), %%mm0           \n\t"
678                 "movq    %%mm0, (%0)            \n\t"
679                 "movq    %%mm0, (%0, %2)        \n\t"
680                 "movq    %%mm0, (%0, %2, 2)     \n\t"
681                 "movq    %%mm0, (%0, %3)        \n\t"
682                 "add        $8, %0              \n\t"
683                 "cmp        %4, %0              \n\t"
684                 "jb         1b                  \n\t"
685                 : "+r"(ptr)
686                 : "r"((x86_reg)buf - (x86_reg)ptr - w), "r"((x86_reg) -wrap),
687                   "r"((x86_reg) -wrap * 3), "r"(ptr + width + 2 * w)
688                 );
689         }
690     }
691
692     if (sides & EDGE_BOTTOM) {
693         for (i = 0; i < h; i += 4) {
694             ptr = last_line + (i + 1) * wrap - w;
695             __asm__ volatile (
696                 "1:                             \n\t"
697                 "movq (%1, %0), %%mm0           \n\t"
698                 "movq    %%mm0, (%0)            \n\t"
699                 "movq    %%mm0, (%0, %2)        \n\t"
700                 "movq    %%mm0, (%0, %2, 2)     \n\t"
701                 "movq    %%mm0, (%0, %3)        \n\t"
702                 "add        $8, %0              \n\t"
703                 "cmp        %4, %0              \n\t"
704                 "jb         1b                  \n\t"
705                 : "+r"(ptr)
706                 : "r"((x86_reg)last_line - (x86_reg)ptr - w),
707                   "r"((x86_reg)wrap), "r"((x86_reg)wrap * 3),
708                   "r"(ptr + width + 2 * w)
709                 );
710         }
711     }
712 }
713 #endif /* HAVE_INLINE_ASM */
714
715
716 #if HAVE_YASM
717 #define QPEL_OP(OPNAME, ROUNDER, RND, MMX)                              \
718 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src,   \
719                                           ptrdiff_t stride)             \
720 {                                                                       \
721     ff_ ## OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);              \
722 }                                                                       \
723                                                                         \
724 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src,    \
725                                          ptrdiff_t stride)              \
726 {                                                                       \
727     uint64_t temp[8];                                                   \
728     uint8_t * const half = (uint8_t*)temp;                              \
729     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8,        \
730                                                    stride, 8);          \
731     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src, half,                 \
732                                         stride, stride, 8);             \
733 }                                                                       \
734                                                                         \
735 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src,    \
736                                          ptrdiff_t stride)              \
737 {                                                                       \
738     ff_ ## OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride,    \
739                                                    stride, 8);          \
740 }                                                                       \
741                                                                         \
742 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src,    \
743                                          ptrdiff_t stride)              \
744 {                                                                       \
745     uint64_t temp[8];                                                   \
746     uint8_t * const half = (uint8_t*)temp;                              \
747     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8,        \
748                                                    stride, 8);          \
749     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src + 1, half, stride,     \
750                                         stride, 8);                     \
751 }                                                                       \
752                                                                         \
753 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src,    \
754                                          ptrdiff_t stride)              \
755 {                                                                       \
756     uint64_t temp[8];                                                   \
757     uint8_t * const half = (uint8_t*)temp;                              \
758     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src,           \
759                                                    8, stride);          \
760     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src, half,                 \
761                                         stride, stride, 8);             \
762 }                                                                       \
763                                                                         \
764 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src,    \
765                                          ptrdiff_t stride)              \
766 {                                                                       \
767     ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src,            \
768                                                    stride, stride);     \
769 }                                                                       \
770                                                                         \
771 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src,    \
772                                          ptrdiff_t stride)              \
773 {                                                                       \
774     uint64_t temp[8];                                                   \
775     uint8_t * const half = (uint8_t*)temp;                              \
776     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src,           \
777                                                    8, stride);          \
778     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src + stride, half, stride,\
779                                         stride, 8);                     \
780 }                                                                       \
781                                                                         \
782 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src,    \
783                                          ptrdiff_t stride)              \
784 {                                                                       \
785     uint64_t half[8 + 9];                                               \
786     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
787     uint8_t * const halfHV = ((uint8_t*)half);                          \
788     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
789                                                    stride, 9);          \
790     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8,           \
791                                         stride, 9);                     \
792     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
793     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV,             \
794                                         stride, 8, 8);                  \
795 }                                                                       \
796                                                                         \
797 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src,    \
798                                          ptrdiff_t stride)              \
799 {                                                                       \
800     uint64_t half[8 + 9];                                               \
801     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
802     uint8_t * const halfHV = ((uint8_t*)half);                          \
803     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
804                                                    stride, 9);          \
805     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8,       \
806                                         stride, 9);                     \
807     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
808     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV,             \
809                                         stride, 8, 8);                  \
810 }                                                                       \
811                                                                         \
812 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src,    \
813                                          ptrdiff_t stride)              \
814 {                                                                       \
815     uint64_t half[8 + 9];                                               \
816     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
817     uint8_t * const halfHV = ((uint8_t*)half);                          \
818     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
819                                                    stride, 9);          \
820     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8,           \
821                                         stride, 9);                     \
822     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
823     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV,         \
824                                         stride, 8, 8);                  \
825 }                                                                       \
826                                                                         \
827 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src,    \
828                                          ptrdiff_t stride)              \
829 {                                                                       \
830     uint64_t half[8 + 9];                                               \
831     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
832     uint8_t * const halfHV = ((uint8_t*)half);                          \
833     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
834                                                    stride, 9);          \
835     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8,       \
836                                         stride, 9);                     \
837     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
838     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV,         \
839                                         stride, 8, 8);                  \
840 }                                                                       \
841                                                                         \
842 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src,    \
843                                          ptrdiff_t stride)              \
844 {                                                                       \
845     uint64_t half[8 + 9];                                               \
846     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
847     uint8_t * const halfHV = ((uint8_t*)half);                          \
848     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
849                                                    stride, 9);          \
850     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
851     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV,             \
852                                         stride, 8, 8);                  \
853 }                                                                       \
854                                                                         \
855 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src,    \
856                                          ptrdiff_t stride)              \
857 {                                                                       \
858     uint64_t half[8 + 9];                                               \
859     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
860     uint8_t * const halfHV = ((uint8_t*)half);                          \
861     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
862                                                    stride, 9);          \
863     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
864     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV,         \
865                                         stride, 8, 8);                  \
866 }                                                                       \
867                                                                         \
868 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src,    \
869                                          ptrdiff_t stride)              \
870 {                                                                       \
871     uint64_t half[8 + 9];                                               \
872     uint8_t * const halfH = ((uint8_t*)half);                           \
873     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
874                                                    stride, 9);          \
875     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH,              \
876                                         8, stride, 9);                  \
877     ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH,          \
878                                                    stride, 8);          \
879 }                                                                       \
880                                                                         \
881 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src,    \
882                                          ptrdiff_t stride)              \
883 {                                                                       \
884     uint64_t half[8 + 9];                                               \
885     uint8_t * const halfH = ((uint8_t*)half);                           \
886     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
887                                                    stride, 9);          \
888     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8,       \
889                                         stride, 9);                     \
890     ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH,          \
891                                                    stride, 8);          \
892 }                                                                       \
893                                                                         \
894 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src,    \
895                                          ptrdiff_t stride)              \
896 {                                                                       \
897     uint64_t half[9];                                                   \
898     uint8_t * const halfH = ((uint8_t*)half);                           \
899     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
900                                                    stride, 9);          \
901     ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH,          \
902                                                    stride, 8);          \
903 }                                                                       \
904                                                                         \
905 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src,  \
906                                            ptrdiff_t stride)            \
907 {                                                                       \
908     ff_ ## OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);            \
909 }                                                                       \
910                                                                         \
911 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src,   \
912                                           ptrdiff_t stride)             \
913 {                                                                       \
914     uint64_t temp[32];                                                  \
915     uint8_t * const half = (uint8_t*)temp;                              \
916     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16,      \
917                                                     stride, 16);        \
918     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride,        \
919                                          stride, 16);                   \
920 }                                                                       \
921                                                                         \
922 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src,   \
923                                           ptrdiff_t stride)             \
924 {                                                                       \
925     ff_ ## OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src,           \
926                                                     stride, stride, 16);\
927 }                                                                       \
928                                                                         \
929 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src,   \
930                                           ptrdiff_t stride)             \
931 {                                                                       \
932     uint64_t temp[32];                                                  \
933     uint8_t * const half = (uint8_t*)temp;                              \
934     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16,      \
935                                                     stride, 16);        \
936     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src + 1, half,            \
937                                          stride, stride, 16);           \
938 }                                                                       \
939                                                                         \
940 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src,   \
941                                           ptrdiff_t stride)             \
942 {                                                                       \
943     uint64_t temp[32];                                                  \
944     uint8_t * const half = (uint8_t*)temp;                              \
945     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16,      \
946                                                     stride);            \
947     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride,        \
948                                          stride, 16);                   \
949 }                                                                       \
950                                                                         \
951 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src,   \
952                                           ptrdiff_t stride)             \
953 {                                                                       \
954     ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src,           \
955                                                     stride, stride);    \
956 }                                                                       \
957                                                                         \
958 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src,   \
959                                           ptrdiff_t stride)             \
960 {                                                                       \
961     uint64_t temp[32];                                                  \
962     uint8_t * const half = (uint8_t*)temp;                              \
963     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16,      \
964                                                     stride);            \
965     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half,         \
966                                          stride, stride, 16);           \
967 }                                                                       \
968                                                                         \
969 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src,   \
970                                           ptrdiff_t stride)             \
971 {                                                                       \
972     uint64_t half[16 * 2 + 17 * 2];                                     \
973     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
974     uint8_t * const halfHV = ((uint8_t*)half);                          \
975     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
976                                                     stride, 17);        \
977     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16,         \
978                                          stride, 17);                   \
979     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
980                                                     16, 16);            \
981     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV,            \
982                                          stride, 16, 16);               \
983 }                                                                       \
984                                                                         \
985 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src,   \
986                                           ptrdiff_t stride)             \
987 {                                                                       \
988     uint64_t half[16 * 2 + 17 * 2];                                     \
989     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
990     uint8_t * const halfHV = ((uint8_t*)half);                          \
991     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
992                                                     stride, 17);        \
993     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16,     \
994                                          stride, 17);                   \
995     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
996                                                     16, 16);            \
997     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV,            \
998                                          stride, 16, 16);               \
999 }                                                                       \
1000                                                                         \
1001 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src,   \
1002                                           ptrdiff_t stride)             \
1003 {                                                                       \
1004     uint64_t half[16 * 2 + 17 * 2];                                     \
1005     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
1006     uint8_t * const halfHV = ((uint8_t*)half);                          \
1007     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
1008                                                     stride, 17);        \
1009     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16,         \
1010                                          stride, 17);                   \
1011     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
1012                                                     16, 16);            \
1013     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV,       \
1014                                          stride, 16, 16);               \
1015 }                                                                       \
1016                                                                         \
1017 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src,   \
1018                                           ptrdiff_t stride)             \
1019 {                                                                       \
1020     uint64_t half[16 * 2 + 17 * 2];                                     \
1021     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
1022     uint8_t * const halfHV = ((uint8_t*)half);                          \
1023     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
1024                                                     stride, 17);        \
1025     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16,     \
1026                                          stride, 17);                   \
1027     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
1028                                                     16, 16);            \
1029     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV,       \
1030                                          stride, 16, 16);               \
1031 }                                                                       \
1032                                                                         \
1033 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src,   \
1034                                           ptrdiff_t stride)             \
1035 {                                                                       \
1036     uint64_t half[16 * 2 + 17 * 2];                                     \
1037     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
1038     uint8_t * const halfHV = ((uint8_t*)half);                          \
1039     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
1040                                                     stride, 17);        \
1041     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
1042                                                     16, 16);            \
1043     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV,            \
1044                                          stride, 16, 16);               \
1045 }                                                                       \
1046                                                                         \
1047 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src,   \
1048                                           ptrdiff_t stride)             \
1049 {                                                                       \
1050     uint64_t half[16 * 2 + 17 * 2];                                     \
1051     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
1052     uint8_t * const halfHV = ((uint8_t*)half);                          \
1053     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
1054                                                     stride, 17);        \
1055     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
1056                                                     16, 16);            \
1057     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV,       \
1058                                          stride, 16, 16);               \
1059 }                                                                       \
1060                                                                         \
1061 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src,   \
1062                                           ptrdiff_t stride)             \
1063 {                                                                       \
1064     uint64_t half[17 * 2];                                              \
1065     uint8_t * const halfH = ((uint8_t*)half);                           \
1066     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
1067                                                     stride, 17);        \
1068     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16,         \
1069                                          stride, 17);                   \
1070     ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH,         \
1071                                                     stride, 16);        \
1072 }                                                                       \
1073                                                                         \
1074 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src,   \
1075                                           ptrdiff_t stride)             \
1076 {                                                                       \
1077     uint64_t half[17 * 2];                                              \
1078     uint8_t * const halfH = ((uint8_t*)half);                           \
1079     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
1080                                                     stride, 17);        \
1081     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16,     \
1082                                          stride, 17);                   \
1083     ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH,         \
1084                                                     stride, 16);        \
1085 }                                                                       \
1086                                                                         \
1087 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src,   \
1088                                           ptrdiff_t stride)             \
1089 {                                                                       \
1090     uint64_t half[17 * 2];                                              \
1091     uint8_t * const halfH = ((uint8_t*)half);                           \
1092     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
1093                                                     stride, 17);        \
1094     ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH,         \
1095                                                     stride, 16);        \
1096 }
1097
1098 QPEL_OP(put_,          ff_pw_16, _,        mmxext)
1099 QPEL_OP(avg_,          ff_pw_16, _,        mmxext)
1100 QPEL_OP(put_no_rnd_,   ff_pw_15, _no_rnd_, mmxext)
1101 #endif /* HAVE_YASM */
1102
1103
1104 #if HAVE_INLINE_ASM
1105 void ff_put_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1106 {
1107   put_pixels8_xy2_mmx(dst, src, stride, 8);
1108 }
1109 void ff_put_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1110 {
1111   put_pixels16_xy2_mmx(dst, src, stride, 16);
1112 }
1113 void ff_avg_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1114 {
1115   avg_pixels8_xy2_mmx(dst, src, stride, 8);
1116 }
1117 void ff_avg_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1118 {
1119   avg_pixels16_xy2_mmx(dst, src, stride, 16);
1120 }
1121
1122 static void gmc_mmx(uint8_t *dst, uint8_t *src,
1123                     int stride, int h, int ox, int oy,
1124                     int dxx, int dxy, int dyx, int dyy,
1125                     int shift, int r, int width, int height)
1126 {
1127     const int w    = 8;
1128     const int ix   = ox  >> (16 + shift);
1129     const int iy   = oy  >> (16 + shift);
1130     const int oxs  = ox  >> 4;
1131     const int oys  = oy  >> 4;
1132     const int dxxs = dxx >> 4;
1133     const int dxys = dxy >> 4;
1134     const int dyxs = dyx >> 4;
1135     const int dyys = dyy >> 4;
1136     const uint16_t r4[4]   = { r, r, r, r };
1137     const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys };
1138     const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys };
1139     const uint64_t shift2 = 2 * shift;
1140     int x, y;
1141
1142     const int dxw = (dxx - (1 << (16 + shift))) * (w - 1);
1143     const int dyh = (dyy - (1 << (16 + shift))) * (h - 1);
1144     const int dxh = dxy * (h - 1);
1145     const int dyw = dyx * (w - 1);
1146     if ( // non-constant fullpel offset (3% of blocks)
1147         ((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) |
1148          (oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift)
1149         // uses more than 16 bits of subpel mv (only at huge resolution)
1150         || (dxx | dxy | dyx | dyy) & 15 ||
1151         (unsigned)ix >= width  - w ||
1152         (unsigned)iy >= height - h) {
1153         // FIXME could still use mmx for some of the rows
1154         ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy,
1155                  shift, r, width, height);
1156         return;
1157     }
1158
1159     src += ix + iy * stride;
1160
1161     __asm__ volatile (
1162         "movd         %0, %%mm6         \n\t"
1163         "pxor      %%mm7, %%mm7         \n\t"
1164         "punpcklwd %%mm6, %%mm6         \n\t"
1165         "punpcklwd %%mm6, %%mm6         \n\t"
1166         :: "r"(1<<shift)
1167     );
1168
1169     for (x = 0; x < w; x += 4) {
1170         uint16_t dx4[4] = { oxs - dxys + dxxs * (x + 0),
1171                             oxs - dxys + dxxs * (x + 1),
1172                             oxs - dxys + dxxs * (x + 2),
1173                             oxs - dxys + dxxs * (x + 3) };
1174         uint16_t dy4[4] = { oys - dyys + dyxs * (x + 0),
1175                             oys - dyys + dyxs * (x + 1),
1176                             oys - dyys + dyxs * (x + 2),
1177                             oys - dyys + dyxs * (x + 3) };
1178
1179         for (y = 0; y < h; y++) {
1180             __asm__ volatile (
1181                 "movq      %0, %%mm4    \n\t"
1182                 "movq      %1, %%mm5    \n\t"
1183                 "paddw     %2, %%mm4    \n\t"
1184                 "paddw     %3, %%mm5    \n\t"
1185                 "movq   %%mm4, %0       \n\t"
1186                 "movq   %%mm5, %1       \n\t"
1187                 "psrlw    $12, %%mm4    \n\t"
1188                 "psrlw    $12, %%mm5    \n\t"
1189                 : "+m"(*dx4), "+m"(*dy4)
1190                 : "m"(*dxy4), "m"(*dyy4)
1191             );
1192
1193             __asm__ volatile (
1194                 "movq      %%mm6, %%mm2 \n\t"
1195                 "movq      %%mm6, %%mm1 \n\t"
1196                 "psubw     %%mm4, %%mm2 \n\t"
1197                 "psubw     %%mm5, %%mm1 \n\t"
1198                 "movq      %%mm2, %%mm0 \n\t"
1199                 "movq      %%mm4, %%mm3 \n\t"
1200                 "pmullw    %%mm1, %%mm0 \n\t" // (s - dx) * (s - dy)
1201                 "pmullw    %%mm5, %%mm3 \n\t" // dx * dy
1202                 "pmullw    %%mm5, %%mm2 \n\t" // (s - dx) * dy
1203                 "pmullw    %%mm4, %%mm1 \n\t" // dx * (s - dy)
1204
1205                 "movd         %4, %%mm5 \n\t"
1206                 "movd         %3, %%mm4 \n\t"
1207                 "punpcklbw %%mm7, %%mm5 \n\t"
1208                 "punpcklbw %%mm7, %%mm4 \n\t"
1209                 "pmullw    %%mm5, %%mm3 \n\t" // src[1, 1] * dx * dy
1210                 "pmullw    %%mm4, %%mm2 \n\t" // src[0, 1] * (s - dx) * dy
1211
1212                 "movd         %2, %%mm5 \n\t"
1213                 "movd         %1, %%mm4 \n\t"
1214                 "punpcklbw %%mm7, %%mm5 \n\t"
1215                 "punpcklbw %%mm7, %%mm4 \n\t"
1216                 "pmullw    %%mm5, %%mm1 \n\t" // src[1, 0] * dx * (s - dy)
1217                 "pmullw    %%mm4, %%mm0 \n\t" // src[0, 0] * (s - dx) * (s - dy)
1218                 "paddw        %5, %%mm1 \n\t"
1219                 "paddw     %%mm3, %%mm2 \n\t"
1220                 "paddw     %%mm1, %%mm0 \n\t"
1221                 "paddw     %%mm2, %%mm0 \n\t"
1222
1223                 "psrlw        %6, %%mm0 \n\t"
1224                 "packuswb  %%mm0, %%mm0 \n\t"
1225                 "movd      %%mm0, %0    \n\t"
1226
1227                 : "=m"(dst[x + y * stride])
1228                 : "m"(src[0]), "m"(src[1]),
1229                   "m"(src[stride]), "m"(src[stride + 1]),
1230                   "m"(*r4), "m"(shift2)
1231             );
1232             src += stride;
1233         }
1234         src += 4 - h * stride;
1235     }
1236 }
1237
1238 /* CAVS-specific */
1239 void ff_put_cavs_qpel8_mc00_mmxext(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1240 {
1241     put_pixels8_mmx(dst, src, stride, 8);
1242 }
1243
1244 void ff_avg_cavs_qpel8_mc00_mmxext(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1245 {
1246     avg_pixels8_mmx(dst, src, stride, 8);
1247 }
1248
1249 void ff_put_cavs_qpel16_mc00_mmxext(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1250 {
1251     put_pixels16_mmx(dst, src, stride, 16);
1252 }
1253
1254 void ff_avg_cavs_qpel16_mc00_mmxext(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1255 {
1256     avg_pixels16_mmx(dst, src, stride, 16);
1257 }
1258
1259 /* VC-1-specific */
1260 void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src,
1261                                ptrdiff_t stride, int rnd)
1262 {
1263     put_pixels8_mmx(dst, src, stride, 8);
1264 }
1265
1266 static void vector_clipf_sse(float *dst, const float *src,
1267                              float min, float max, int len)
1268 {
1269     x86_reg i = (len - 16) * 4;
1270     __asm__ volatile (
1271         "movss          %3, %%xmm4      \n\t"
1272         "movss          %4, %%xmm5      \n\t"
1273         "shufps $0, %%xmm4, %%xmm4      \n\t"
1274         "shufps $0, %%xmm5, %%xmm5      \n\t"
1275         "1:                             \n\t"
1276         "movaps   (%2, %0), %%xmm0      \n\t" // 3/1 on intel
1277         "movaps 16(%2, %0), %%xmm1      \n\t"
1278         "movaps 32(%2, %0), %%xmm2      \n\t"
1279         "movaps 48(%2, %0), %%xmm3      \n\t"
1280         "maxps      %%xmm4, %%xmm0      \n\t"
1281         "maxps      %%xmm4, %%xmm1      \n\t"
1282         "maxps      %%xmm4, %%xmm2      \n\t"
1283         "maxps      %%xmm4, %%xmm3      \n\t"
1284         "minps      %%xmm5, %%xmm0      \n\t"
1285         "minps      %%xmm5, %%xmm1      \n\t"
1286         "minps      %%xmm5, %%xmm2      \n\t"
1287         "minps      %%xmm5, %%xmm3      \n\t"
1288         "movaps     %%xmm0,   (%1, %0)  \n\t"
1289         "movaps     %%xmm1, 16(%1, %0)  \n\t"
1290         "movaps     %%xmm2, 32(%1, %0)  \n\t"
1291         "movaps     %%xmm3, 48(%1, %0)  \n\t"
1292         "sub           $64, %0          \n\t"
1293         "jge            1b              \n\t"
1294         : "+&r"(i)
1295         : "r"(dst), "r"(src), "m"(min), "m"(max)
1296         : "memory"
1297     );
1298 }
1299
1300 #endif /* HAVE_INLINE_ASM */
1301
1302 void ff_h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale);
1303 void ff_h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale);
1304
1305 int32_t ff_scalarproduct_int16_mmxext(const int16_t *v1, const int16_t *v2,
1306                                       int order);
1307 int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2,
1308                                     int order);
1309 int32_t ff_scalarproduct_and_madd_int16_mmxext(int16_t *v1, const int16_t *v2,
1310                                                const int16_t *v3,
1311                                                int order, int mul);
1312 int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2,
1313                                              const int16_t *v3,
1314                                              int order, int mul);
1315 int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2,
1316                                               const int16_t *v3,
1317                                               int order, int mul);
1318
1319 void ff_apply_window_int16_round_mmxext(int16_t *output, const int16_t *input,
1320                                         const int16_t *window, unsigned int len);
1321 void ff_apply_window_int16_round_sse2(int16_t *output, const int16_t *input,
1322                                       const int16_t *window, unsigned int len);
1323 void ff_apply_window_int16_mmxext(int16_t *output, const int16_t *input,
1324                                   const int16_t *window, unsigned int len);
1325 void ff_apply_window_int16_sse2(int16_t *output, const int16_t *input,
1326                                 const int16_t *window, unsigned int len);
1327 void ff_apply_window_int16_ssse3(int16_t *output, const int16_t *input,
1328                                  const int16_t *window, unsigned int len);
1329 void ff_apply_window_int16_ssse3_atom(int16_t *output, const int16_t *input,
1330                                       const int16_t *window, unsigned int len);
1331
1332 void ff_bswap32_buf_ssse3(uint32_t *dst, const uint32_t *src, int w);
1333 void ff_bswap32_buf_sse2(uint32_t *dst, const uint32_t *src, int w);
1334
1335 void ff_add_hfyu_median_prediction_mmxext(uint8_t *dst, const uint8_t *top,
1336                                           const uint8_t *diff, int w,
1337                                           int *left, int *left_top);
1338 int  ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src,
1339                                        int w, int left);
1340 int  ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src,
1341                                       int w, int left);
1342
1343 void ff_vector_clip_int32_mmx     (int32_t *dst, const int32_t *src,
1344                                    int32_t min, int32_t max, unsigned int len);
1345 void ff_vector_clip_int32_sse2    (int32_t *dst, const int32_t *src,
1346                                    int32_t min, int32_t max, unsigned int len);
1347 void ff_vector_clip_int32_int_sse2(int32_t *dst, const int32_t *src,
1348                                    int32_t min, int32_t max, unsigned int len);
1349 void ff_vector_clip_int32_sse4    (int32_t *dst, const int32_t *src,
1350                                    int32_t min, int32_t max, unsigned int len);
1351
1352 #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX)                          \
1353     do {                                                                     \
1354     c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \
1355     c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \
1356     c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \
1357     c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \
1358     c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \
1359     c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \
1360     c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \
1361     c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \
1362     c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \
1363     c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \
1364     c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \
1365     c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \
1366     c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \
1367     c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \
1368     c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \
1369     c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU; \
1370     } while (0)
1371
1372 #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU)                                     \
1373     do {                                                                        \
1374         c->PFX ## _pixels_tab IDX [0] = PFX ## _pixels ## SIZE ## _     ## CPU; \
1375         c->PFX ## _pixels_tab IDX [1] = PFX ## _pixels ## SIZE ## _x2_  ## CPU; \
1376         c->PFX ## _pixels_tab IDX [2] = PFX ## _pixels ## SIZE ## _y2_  ## CPU; \
1377         c->PFX ## _pixels_tab IDX [3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU; \
1378     } while (0)
1379
1380 static av_cold void dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx,
1381                                      int mm_flags)
1382 {
1383     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1384
1385 #if HAVE_INLINE_ASM
1386     c->put_pixels_clamped        = ff_put_pixels_clamped_mmx;
1387     c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx;
1388     c->add_pixels_clamped        = ff_add_pixels_clamped_mmx;
1389
1390     if (!high_bit_depth) {
1391         c->clear_block  = clear_block_mmx;
1392         c->clear_blocks = clear_blocks_mmx;
1393         c->draw_edges   = draw_edges_mmx;
1394
1395         SET_HPEL_FUNCS(put,        [0], 16, mmx);
1396         SET_HPEL_FUNCS(put_no_rnd, [0], 16, mmx);
1397         SET_HPEL_FUNCS(avg,        [0], 16, mmx);
1398         SET_HPEL_FUNCS(avg_no_rnd,    , 16, mmx);
1399         SET_HPEL_FUNCS(put,        [1],  8, mmx);
1400         SET_HPEL_FUNCS(put_no_rnd, [1],  8, mmx);
1401         SET_HPEL_FUNCS(avg,        [1],  8, mmx);
1402
1403         switch (avctx->idct_algo) {
1404         case FF_IDCT_AUTO:
1405         case FF_IDCT_SIMPLEMMX:
1406             c->idct_put              = ff_simple_idct_put_mmx;
1407             c->idct_add              = ff_simple_idct_add_mmx;
1408             c->idct                  = ff_simple_idct_mmx;
1409             c->idct_permutation_type = FF_SIMPLE_IDCT_PERM;
1410             break;
1411         case FF_IDCT_XVIDMMX:
1412             c->idct_put              = ff_idct_xvid_mmx_put;
1413             c->idct_add              = ff_idct_xvid_mmx_add;
1414             c->idct                  = ff_idct_xvid_mmx;
1415             break;
1416         }
1417     }
1418
1419     c->gmc = gmc_mmx;
1420
1421     c->add_bytes = add_bytes_mmx;
1422 #endif /* HAVE_INLINE_ASM */
1423
1424 #if HAVE_YASM
1425     if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
1426         c->h263_v_loop_filter = ff_h263_v_loop_filter_mmx;
1427         c->h263_h_loop_filter = ff_h263_h_loop_filter_mmx;
1428     }
1429
1430     c->vector_clip_int32 = ff_vector_clip_int32_mmx;
1431 #endif
1432
1433 }
1434
1435 static av_cold void dsputil_init_mmxext(DSPContext *c, AVCodecContext *avctx,
1436                                         int mm_flags)
1437 {
1438     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1439
1440 #if HAVE_YASM
1441     SET_QPEL_FUNCS(avg_qpel,        0, 16, mmxext, );
1442     SET_QPEL_FUNCS(avg_qpel,        1,  8, mmxext, );
1443
1444     SET_QPEL_FUNCS(put_qpel,        0, 16, mmxext, );
1445     SET_QPEL_FUNCS(put_qpel,        1,  8, mmxext, );
1446     SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmxext, );
1447     SET_QPEL_FUNCS(put_no_rnd_qpel, 1,  8, mmxext, );
1448
1449     if (!high_bit_depth) {
1450         c->put_pixels_tab[0][1] = ff_put_pixels16_x2_mmxext;
1451         c->put_pixels_tab[0][2] = ff_put_pixels16_y2_mmxext;
1452
1453         c->avg_pixels_tab[0][0] = ff_avg_pixels16_mmxext;
1454         c->avg_pixels_tab[0][1] = ff_avg_pixels16_x2_mmxext;
1455         c->avg_pixels_tab[0][2] = ff_avg_pixels16_y2_mmxext;
1456
1457         c->put_pixels_tab[1][1] = ff_put_pixels8_x2_mmxext;
1458         c->put_pixels_tab[1][2] = ff_put_pixels8_y2_mmxext;
1459
1460         c->avg_pixels_tab[1][0] = ff_avg_pixels8_mmxext;
1461         c->avg_pixels_tab[1][1] = ff_avg_pixels8_x2_mmxext;
1462         c->avg_pixels_tab[1][2] = ff_avg_pixels8_y2_mmxext;
1463     }
1464
1465     if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
1466         if (!high_bit_depth) {
1467             c->put_no_rnd_pixels_tab[0][1] = ff_put_no_rnd_pixels16_x2_mmxext;
1468             c->put_no_rnd_pixels_tab[0][2] = ff_put_no_rnd_pixels16_y2_mmxext;
1469             c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_mmxext;
1470             c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_mmxext;
1471
1472             c->avg_pixels_tab[0][3] = ff_avg_pixels16_xy2_mmxext;
1473             c->avg_pixels_tab[1][3] = ff_avg_pixels8_xy2_mmxext;
1474         }
1475     }
1476 #endif /* HAVE_YASM */
1477
1478 #if HAVE_INLINE_ASM
1479     if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX) {
1480         c->idct_put = ff_idct_xvid_mmxext_put;
1481         c->idct_add = ff_idct_xvid_mmxext_add;
1482         c->idct     = ff_idct_xvid_mmxext;
1483     }
1484 #endif /* HAVE_INLINE_ASM */
1485
1486 #if HAVE_MMXEXT_EXTERNAL
1487     if (CONFIG_VP3_DECODER && (avctx->codec_id == AV_CODEC_ID_VP3 ||
1488                                avctx->codec_id == AV_CODEC_ID_THEORA)) {
1489         c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_exact_mmxext;
1490         c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_exact_mmxext;
1491     }
1492
1493     /* slower than cmov version on AMD */
1494     if (!(mm_flags & AV_CPU_FLAG_3DNOW))
1495         c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmxext;
1496
1497     c->scalarproduct_int16          = ff_scalarproduct_int16_mmxext;
1498     c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmxext;
1499
1500     if (avctx->flags & CODEC_FLAG_BITEXACT) {
1501         c->apply_window_int16 = ff_apply_window_int16_mmxext;
1502     } else {
1503         c->apply_window_int16 = ff_apply_window_int16_round_mmxext;
1504     }
1505 #endif /* HAVE_MMXEXT_EXTERNAL */
1506 }
1507
1508 static av_cold void dsputil_init_3dnow(DSPContext *c, AVCodecContext *avctx,
1509                                        int mm_flags)
1510 {
1511 #if HAVE_YASM
1512     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1513
1514     if (!high_bit_depth) {
1515         c->put_pixels_tab[0][1] = ff_put_pixels16_x2_3dnow;
1516         c->put_pixels_tab[0][2] = ff_put_pixels16_y2_3dnow;
1517
1518         c->avg_pixels_tab[0][0] = ff_avg_pixels16_3dnow;
1519         c->avg_pixels_tab[0][1] = ff_avg_pixels16_x2_3dnow;
1520         c->avg_pixels_tab[0][2] = ff_avg_pixels16_y2_3dnow;
1521
1522         c->put_pixels_tab[1][1] = ff_put_pixels8_x2_3dnow;
1523         c->put_pixels_tab[1][2] = ff_put_pixels8_y2_3dnow;
1524
1525         c->avg_pixels_tab[1][0] = ff_avg_pixels8_3dnow;
1526         c->avg_pixels_tab[1][1] = ff_avg_pixels8_x2_3dnow;
1527         c->avg_pixels_tab[1][2] = ff_avg_pixels8_y2_3dnow;
1528
1529         if (!(avctx->flags & CODEC_FLAG_BITEXACT)){
1530             c->put_no_rnd_pixels_tab[0][1] = ff_put_no_rnd_pixels16_x2_3dnow;
1531             c->put_no_rnd_pixels_tab[0][2] = ff_put_no_rnd_pixels16_y2_3dnow;
1532             c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_3dnow;
1533             c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_3dnow;
1534
1535             c->avg_pixels_tab[0][3] = ff_avg_pixels16_xy2_3dnow;
1536             c->avg_pixels_tab[1][3] = ff_avg_pixels8_xy2_3dnow;
1537         }
1538     }
1539
1540     if (CONFIG_VP3_DECODER && (avctx->codec_id == AV_CODEC_ID_VP3 ||
1541                                avctx->codec_id == AV_CODEC_ID_THEORA)) {
1542         c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_exact_3dnow;
1543         c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_exact_3dnow;
1544     }
1545 #endif /* HAVE_YASM */
1546 }
1547
1548 static av_cold void dsputil_init_sse(DSPContext *c, AVCodecContext *avctx,
1549                                      int mm_flags)
1550 {
1551 #if HAVE_INLINE_ASM
1552     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1553
1554     if (!high_bit_depth) {
1555         if (!(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)) {
1556             /* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
1557             c->clear_block  = clear_block_sse;
1558             c->clear_blocks = clear_blocks_sse;
1559         }
1560     }
1561
1562     c->vector_clipf = vector_clipf_sse;
1563 #endif /* HAVE_INLINE_ASM */
1564 }
1565
1566 static av_cold void dsputil_init_sse2(DSPContext *c, AVCodecContext *avctx,
1567                                       int mm_flags)
1568 {
1569     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1570
1571 #if HAVE_SSE2_INLINE
1572     if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX) {
1573         c->idct_put              = ff_idct_xvid_sse2_put;
1574         c->idct_add              = ff_idct_xvid_sse2_add;
1575         c->idct                  = ff_idct_xvid_sse2;
1576         c->idct_permutation_type = FF_SSE2_IDCT_PERM;
1577     }
1578 #endif /* HAVE_SSE2_INLINE */
1579
1580 #if HAVE_SSE2_EXTERNAL
1581     if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
1582         // these functions are slower than mmx on AMD, but faster on Intel
1583         if (!high_bit_depth) {
1584             c->put_pixels_tab[0][0]        = ff_put_pixels16_sse2;
1585             c->put_no_rnd_pixels_tab[0][0] = ff_put_pixels16_sse2;
1586             c->avg_pixels_tab[0][0]        = ff_avg_pixels16_sse2;
1587         }
1588     }
1589
1590     c->scalarproduct_int16          = ff_scalarproduct_int16_sse2;
1591     c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
1592     if (mm_flags & AV_CPU_FLAG_ATOM) {
1593         c->vector_clip_int32 = ff_vector_clip_int32_int_sse2;
1594     } else {
1595         c->vector_clip_int32 = ff_vector_clip_int32_sse2;
1596     }
1597     if (avctx->flags & CODEC_FLAG_BITEXACT) {
1598         c->apply_window_int16 = ff_apply_window_int16_sse2;
1599     } else if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
1600         c->apply_window_int16 = ff_apply_window_int16_round_sse2;
1601     }
1602     c->bswap_buf = ff_bswap32_buf_sse2;
1603 #endif /* HAVE_SSE2_EXTERNAL */
1604 }
1605
1606 static av_cold void dsputil_init_ssse3(DSPContext *c, AVCodecContext *avctx,
1607                                        int mm_flags)
1608 {
1609 #if HAVE_SSSE3_EXTERNAL
1610     c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
1611     if (mm_flags & AV_CPU_FLAG_SSE4) // not really sse4, just slow on Conroe
1612         c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
1613
1614     if (mm_flags & AV_CPU_FLAG_ATOM)
1615         c->apply_window_int16 = ff_apply_window_int16_ssse3_atom;
1616     else
1617         c->apply_window_int16 = ff_apply_window_int16_ssse3;
1618     if (!(mm_flags & (AV_CPU_FLAG_SSE42|AV_CPU_FLAG_3DNOW))) // cachesplit
1619         c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
1620     c->bswap_buf = ff_bswap32_buf_ssse3;
1621 #endif /* HAVE_SSSE3_EXTERNAL */
1622 }
1623
1624 static av_cold void dsputil_init_sse4(DSPContext *c, AVCodecContext *avctx,
1625                                       int mm_flags)
1626 {
1627 #if HAVE_SSE4_EXTERNAL
1628     c->vector_clip_int32 = ff_vector_clip_int32_sse4;
1629 #endif /* HAVE_SSE4_EXTERNAL */
1630 }
1631
1632 av_cold void ff_dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx)
1633 {
1634     int mm_flags = av_get_cpu_flags();
1635
1636 #if HAVE_7REGS && HAVE_INLINE_ASM
1637     if (mm_flags & AV_CPU_FLAG_CMOV)
1638         c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
1639 #endif
1640
1641     if (mm_flags & AV_CPU_FLAG_MMX)
1642         dsputil_init_mmx(c, avctx, mm_flags);
1643
1644     if (mm_flags & AV_CPU_FLAG_MMXEXT)
1645         dsputil_init_mmxext(c, avctx, mm_flags);
1646
1647     if (mm_flags & AV_CPU_FLAG_3DNOW)
1648         dsputil_init_3dnow(c, avctx, mm_flags);
1649
1650     if (mm_flags & AV_CPU_FLAG_SSE)
1651         dsputil_init_sse(c, avctx, mm_flags);
1652
1653     if (mm_flags & AV_CPU_FLAG_SSE2)
1654         dsputil_init_sse2(c, avctx, mm_flags);
1655
1656     if (mm_flags & AV_CPU_FLAG_SSSE3)
1657         dsputil_init_ssse3(c, avctx, mm_flags);
1658
1659     if (mm_flags & AV_CPU_FLAG_SSE4)
1660         dsputil_init_sse4(c, avctx, mm_flags);
1661
1662     if (CONFIG_ENCODERS)
1663         ff_dsputilenc_init_mmx(c, avctx);
1664 }