x86: dsputil: Move cavs and vc1-specific functions where they belong
[ffmpeg.git] / libavcodec / x86 / dsputil_mmx.c
1 /*
2  * MMX optimized DSP utils
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
7  *
8  * This file is part of Libav.
9  *
10  * Libav is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * Libav is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with Libav; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24
25 #include "libavutil/attributes.h"
26 #include "libavutil/cpu.h"
27 #include "libavutil/x86/asm.h"
28 #include "libavcodec/dsputil.h"
29 #include "libavcodec/h264dsp.h"
30 #include "libavcodec/mpegvideo.h"
31 #include "libavcodec/simple_idct.h"
32 #include "dsputil_mmx.h"
33 #include "idct_xvid.h"
34
35 //#undef NDEBUG
36 //#include <assert.h>
37
38 /* pixel operations */
39 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_15)   =   0x000F000F000F000FULL;
40 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_17)   = { 0x0011001100110011ULL, 0x0011001100110011ULL };
41 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_20)   =   0x0014001400140014ULL;
42 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_42)   =   0x002A002A002A002AULL;
43 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_53)   =   0x0035003500350035ULL;
44 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_96)   =   0x0060006000600060ULL;
45 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_128)  =   0x0080008000800080ULL;
46 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_255)  =   0x00ff00ff00ff00ffULL;
47 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_512)  = { 0x0200020002000200ULL, 0x0200020002000200ULL };
48 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_1019) = { 0x03FB03FB03FB03FBULL, 0x03FB03FB03FB03FBULL };
49
50 DECLARE_ALIGNED(8,  const uint64_t, ff_pb_3F)   =   0x3F3F3F3F3F3F3F3FULL;
51 DECLARE_ALIGNED(8,  const uint64_t, ff_pb_FC)   =   0xFCFCFCFCFCFCFCFCULL;
52
53 DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 };
54 DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 };
55
56
57 void ff_put_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
58                               int dstStride, int src1Stride, int h);
59 void ff_put_no_rnd_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1,
60                                      uint8_t *src2, int dstStride,
61                                      int src1Stride, int h);
62 void ff_avg_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
63                               int dstStride, int src1Stride, int h);
64 void ff_put_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
65                                int dstStride, int src1Stride, int h);
66 void ff_avg_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
67                                int dstStride, int src1Stride, int h);
68 void ff_put_no_rnd_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
69                                       int dstStride, int src1Stride, int h);
70 void ff_put_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
71                                          int dstStride, int srcStride, int h);
72 void ff_avg_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
73                                          int dstStride, int srcStride, int h);
74 void ff_put_no_rnd_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
75                                                  int dstStride, int srcStride,
76                                                  int h);
77 void ff_put_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
78                                         int dstStride, int srcStride, int h);
79 void ff_avg_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
80                                         int dstStride, int srcStride, int h);
81 void ff_put_no_rnd_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
82                                                 int dstStride, int srcStride,
83                                                 int h);
84 void ff_put_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
85                                          int dstStride, int srcStride);
86 void ff_avg_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
87                                          int dstStride, int srcStride);
88 void ff_put_no_rnd_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
89                                                  int dstStride, int srcStride);
90 void ff_put_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
91                                         int dstStride, int srcStride);
92 void ff_avg_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
93                                         int dstStride, int srcStride);
94 void ff_put_no_rnd_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
95                                                 int dstStride, int srcStride);
96 #define ff_put_no_rnd_pixels16_mmxext ff_put_pixels16_mmxext
97 #define ff_put_no_rnd_pixels8_mmxext ff_put_pixels8_mmxext
98
99
100 #if HAVE_INLINE_ASM
101
102 /***********************************/
103 /* MMX rounding */
104
105 #define DEF(x, y) x ## _ ## y ## _mmx
106 #define SET_RND  MOVQ_WTWO
107 #define PAVGBP(a, b, c, d, e, f)        PAVGBP_MMX(a, b, c, d, e, f)
108 #define PAVGB(a, b, c, e)               PAVGB_MMX(a, b, c, e)
109 #define OP_AVG(a, b, c, e)              PAVGB_MMX(a, b, c, e)
110
111 #include "rnd_template.c"
112
113 #undef DEF
114 #undef SET_RND
115 #undef PAVGBP
116 #undef PAVGB
117 #undef OP_AVG
118
119 /***********************************/
120 /* standard MMX */
121
122 void ff_put_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
123                                int line_size)
124 {
125     const int16_t *p;
126     uint8_t *pix;
127
128     /* read the pixels */
129     p   = block;
130     pix = pixels;
131     /* unrolled loop */
132     __asm__ volatile (
133         "movq      (%3), %%mm0          \n\t"
134         "movq     8(%3), %%mm1          \n\t"
135         "movq    16(%3), %%mm2          \n\t"
136         "movq    24(%3), %%mm3          \n\t"
137         "movq    32(%3), %%mm4          \n\t"
138         "movq    40(%3), %%mm5          \n\t"
139         "movq    48(%3), %%mm6          \n\t"
140         "movq    56(%3), %%mm7          \n\t"
141         "packuswb %%mm1, %%mm0          \n\t"
142         "packuswb %%mm3, %%mm2          \n\t"
143         "packuswb %%mm5, %%mm4          \n\t"
144         "packuswb %%mm7, %%mm6          \n\t"
145         "movq     %%mm0, (%0)           \n\t"
146         "movq     %%mm2, (%0, %1)       \n\t"
147         "movq     %%mm4, (%0, %1, 2)    \n\t"
148         "movq     %%mm6, (%0, %2)       \n\t"
149         :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3),
150            "r"(p)
151         : "memory");
152     pix += line_size * 4;
153     p   += 32;
154
155     // if here would be an exact copy of the code above
156     // compiler would generate some very strange code
157     // thus using "r"
158     __asm__ volatile (
159         "movq       (%3), %%mm0         \n\t"
160         "movq      8(%3), %%mm1         \n\t"
161         "movq     16(%3), %%mm2         \n\t"
162         "movq     24(%3), %%mm3         \n\t"
163         "movq     32(%3), %%mm4         \n\t"
164         "movq     40(%3), %%mm5         \n\t"
165         "movq     48(%3), %%mm6         \n\t"
166         "movq     56(%3), %%mm7         \n\t"
167         "packuswb  %%mm1, %%mm0         \n\t"
168         "packuswb  %%mm3, %%mm2         \n\t"
169         "packuswb  %%mm5, %%mm4         \n\t"
170         "packuswb  %%mm7, %%mm6         \n\t"
171         "movq      %%mm0, (%0)          \n\t"
172         "movq      %%mm2, (%0, %1)      \n\t"
173         "movq      %%mm4, (%0, %1, 2)   \n\t"
174         "movq      %%mm6, (%0, %2)      \n\t"
175         :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3), "r"(p)
176         : "memory");
177 }
178
179 #define put_signed_pixels_clamped_mmx_half(off)             \
180     "movq          "#off"(%2), %%mm1        \n\t"           \
181     "movq     16 + "#off"(%2), %%mm2        \n\t"           \
182     "movq     32 + "#off"(%2), %%mm3        \n\t"           \
183     "movq     48 + "#off"(%2), %%mm4        \n\t"           \
184     "packsswb  8 + "#off"(%2), %%mm1        \n\t"           \
185     "packsswb 24 + "#off"(%2), %%mm2        \n\t"           \
186     "packsswb 40 + "#off"(%2), %%mm3        \n\t"           \
187     "packsswb 56 + "#off"(%2), %%mm4        \n\t"           \
188     "paddb              %%mm0, %%mm1        \n\t"           \
189     "paddb              %%mm0, %%mm2        \n\t"           \
190     "paddb              %%mm0, %%mm3        \n\t"           \
191     "paddb              %%mm0, %%mm4        \n\t"           \
192     "movq               %%mm1, (%0)         \n\t"           \
193     "movq               %%mm2, (%0, %3)     \n\t"           \
194     "movq               %%mm3, (%0, %3, 2)  \n\t"           \
195     "movq               %%mm4, (%0, %1)     \n\t"
196
197 void ff_put_signed_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
198                                       int line_size)
199 {
200     x86_reg line_skip = line_size;
201     x86_reg line_skip3;
202
203     __asm__ volatile (
204         "movq "MANGLE(ff_pb_80)", %%mm0     \n\t"
205         "lea         (%3, %3, 2), %1        \n\t"
206         put_signed_pixels_clamped_mmx_half(0)
207         "lea         (%0, %3, 4), %0        \n\t"
208         put_signed_pixels_clamped_mmx_half(64)
209         : "+&r"(pixels), "=&r"(line_skip3)
210         : "r"(block), "r"(line_skip)
211         : "memory");
212 }
213
214 void ff_add_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
215                                int line_size)
216 {
217     const int16_t *p;
218     uint8_t *pix;
219     int i;
220
221     /* read the pixels */
222     p   = block;
223     pix = pixels;
224     MOVQ_ZERO(mm7);
225     i = 4;
226     do {
227         __asm__ volatile (
228             "movq        (%2), %%mm0    \n\t"
229             "movq       8(%2), %%mm1    \n\t"
230             "movq      16(%2), %%mm2    \n\t"
231             "movq      24(%2), %%mm3    \n\t"
232             "movq          %0, %%mm4    \n\t"
233             "movq          %1, %%mm6    \n\t"
234             "movq       %%mm4, %%mm5    \n\t"
235             "punpcklbw  %%mm7, %%mm4    \n\t"
236             "punpckhbw  %%mm7, %%mm5    \n\t"
237             "paddsw     %%mm4, %%mm0    \n\t"
238             "paddsw     %%mm5, %%mm1    \n\t"
239             "movq       %%mm6, %%mm5    \n\t"
240             "punpcklbw  %%mm7, %%mm6    \n\t"
241             "punpckhbw  %%mm7, %%mm5    \n\t"
242             "paddsw     %%mm6, %%mm2    \n\t"
243             "paddsw     %%mm5, %%mm3    \n\t"
244             "packuswb   %%mm1, %%mm0    \n\t"
245             "packuswb   %%mm3, %%mm2    \n\t"
246             "movq       %%mm0, %0       \n\t"
247             "movq       %%mm2, %1       \n\t"
248             : "+m"(*pix), "+m"(*(pix + line_size))
249             : "r"(p)
250             : "memory");
251         pix += line_size * 2;
252         p   += 16;
253     } while (--i);
254 }
255
256 #define CLEAR_BLOCKS(name, n)                           \
257 static void name(int16_t *blocks)                       \
258 {                                                       \
259     __asm__ volatile (                                  \
260         "pxor %%mm7, %%mm7              \n\t"           \
261         "mov     %1,        %%"REG_a"   \n\t"           \
262         "1:                             \n\t"           \
263         "movq %%mm7,   (%0, %%"REG_a")  \n\t"           \
264         "movq %%mm7,  8(%0, %%"REG_a")  \n\t"           \
265         "movq %%mm7, 16(%0, %%"REG_a")  \n\t"           \
266         "movq %%mm7, 24(%0, %%"REG_a")  \n\t"           \
267         "add    $32, %%"REG_a"          \n\t"           \
268         "js      1b                     \n\t"           \
269         :: "r"(((uint8_t *)blocks) + 128 * n),          \
270            "i"(-128 * n)                                \
271         : "%"REG_a                                      \
272         );                                              \
273 }
274 CLEAR_BLOCKS(clear_blocks_mmx, 6)
275 CLEAR_BLOCKS(clear_block_mmx, 1)
276
277 static void clear_block_sse(int16_t *block)
278 {
279     __asm__ volatile (
280         "xorps  %%xmm0, %%xmm0          \n"
281         "movaps %%xmm0,    (%0)         \n"
282         "movaps %%xmm0,  16(%0)         \n"
283         "movaps %%xmm0,  32(%0)         \n"
284         "movaps %%xmm0,  48(%0)         \n"
285         "movaps %%xmm0,  64(%0)         \n"
286         "movaps %%xmm0,  80(%0)         \n"
287         "movaps %%xmm0,  96(%0)         \n"
288         "movaps %%xmm0, 112(%0)         \n"
289         :: "r"(block)
290         : "memory"
291     );
292 }
293
294 static void clear_blocks_sse(int16_t *blocks)
295 {
296     __asm__ volatile (
297         "xorps  %%xmm0, %%xmm0              \n"
298         "mov        %1,         %%"REG_a"   \n"
299         "1:                                 \n"
300         "movaps %%xmm0,    (%0, %%"REG_a")  \n"
301         "movaps %%xmm0,  16(%0, %%"REG_a")  \n"
302         "movaps %%xmm0,  32(%0, %%"REG_a")  \n"
303         "movaps %%xmm0,  48(%0, %%"REG_a")  \n"
304         "movaps %%xmm0,  64(%0, %%"REG_a")  \n"
305         "movaps %%xmm0,  80(%0, %%"REG_a")  \n"
306         "movaps %%xmm0,  96(%0, %%"REG_a")  \n"
307         "movaps %%xmm0, 112(%0, %%"REG_a")  \n"
308         "add      $128,         %%"REG_a"   \n"
309         "js         1b                      \n"
310         :: "r"(((uint8_t *)blocks) + 128 * 6),
311            "i"(-128 * 6)
312         : "%"REG_a
313     );
314 }
315
316 static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w)
317 {
318     x86_reg i = 0;
319     __asm__ volatile (
320         "jmp          2f                \n\t"
321         "1:                             \n\t"
322         "movq   (%1, %0), %%mm0         \n\t"
323         "movq   (%2, %0), %%mm1         \n\t"
324         "paddb     %%mm0, %%mm1         \n\t"
325         "movq      %%mm1, (%2, %0)      \n\t"
326         "movq  8(%1, %0), %%mm0         \n\t"
327         "movq  8(%2, %0), %%mm1         \n\t"
328         "paddb     %%mm0, %%mm1         \n\t"
329         "movq      %%mm1, 8(%2, %0)     \n\t"
330         "add         $16, %0            \n\t"
331         "2:                             \n\t"
332         "cmp          %3, %0            \n\t"
333         "js           1b                \n\t"
334         : "+r"(i)
335         : "r"(src), "r"(dst), "r"((x86_reg)w - 15)
336     );
337     for ( ; i < w; i++)
338         dst[i + 0] += src[i + 0];
339 }
340
341 #if HAVE_7REGS
342 static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top,
343                                             const uint8_t *diff, int w,
344                                             int *left, int *left_top)
345 {
346     x86_reg w2 = -w;
347     x86_reg x;
348     int l  = *left     & 0xff;
349     int tl = *left_top & 0xff;
350     int t;
351     __asm__ volatile (
352         "mov          %7, %3            \n"
353         "1:                             \n"
354         "movzbl (%3, %4), %2            \n"
355         "mov          %2, %k3           \n"
356         "sub         %b1, %b3           \n"
357         "add         %b0, %b3           \n"
358         "mov          %2, %1            \n"
359         "cmp          %0, %2            \n"
360         "cmovg        %0, %2            \n"
361         "cmovg        %1, %0            \n"
362         "cmp         %k3, %0            \n"
363         "cmovg       %k3, %0            \n"
364         "mov          %7, %3            \n"
365         "cmp          %2, %0            \n"
366         "cmovl        %2, %0            \n"
367         "add    (%6, %4), %b0           \n"
368         "mov         %b0, (%5, %4)      \n"
369         "inc          %4                \n"
370         "jl           1b                \n"
371         : "+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
372         : "r"(dst + w), "r"(diff + w), "rm"(top + w)
373     );
374     *left     = l;
375     *left_top = tl;
376 }
377 #endif
378
379 /* Draw the edges of width 'w' of an image of size width, height
380  * this MMX version can only handle w == 8 || w == 16. */
381 static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height,
382                            int w, int h, int sides)
383 {
384     uint8_t *ptr, *last_line;
385     int i;
386
387     last_line = buf + (height - 1) * wrap;
388     /* left and right */
389     ptr = buf;
390     if (w == 8) {
391         __asm__ volatile (
392             "1:                             \n\t"
393             "movd            (%0), %%mm0    \n\t"
394             "punpcklbw      %%mm0, %%mm0    \n\t"
395             "punpcklwd      %%mm0, %%mm0    \n\t"
396             "punpckldq      %%mm0, %%mm0    \n\t"
397             "movq           %%mm0, -8(%0)   \n\t"
398             "movq      -8(%0, %2), %%mm1    \n\t"
399             "punpckhbw      %%mm1, %%mm1    \n\t"
400             "punpckhwd      %%mm1, %%mm1    \n\t"
401             "punpckhdq      %%mm1, %%mm1    \n\t"
402             "movq           %%mm1, (%0, %2) \n\t"
403             "add               %1, %0       \n\t"
404             "cmp               %3, %0       \n\t"
405             "jb                1b           \n\t"
406             : "+r"(ptr)
407             : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
408             );
409     } else {
410         __asm__ volatile (
411             "1:                                 \n\t"
412             "movd            (%0), %%mm0        \n\t"
413             "punpcklbw      %%mm0, %%mm0        \n\t"
414             "punpcklwd      %%mm0, %%mm0        \n\t"
415             "punpckldq      %%mm0, %%mm0        \n\t"
416             "movq           %%mm0, -8(%0)       \n\t"
417             "movq           %%mm0, -16(%0)      \n\t"
418             "movq      -8(%0, %2), %%mm1        \n\t"
419             "punpckhbw      %%mm1, %%mm1        \n\t"
420             "punpckhwd      %%mm1, %%mm1        \n\t"
421             "punpckhdq      %%mm1, %%mm1        \n\t"
422             "movq           %%mm1,  (%0, %2)    \n\t"
423             "movq           %%mm1, 8(%0, %2)    \n\t"
424             "add               %1, %0           \n\t"
425             "cmp               %3, %0           \n\t"
426             "jb                1b               \n\t"
427             : "+r"(ptr)
428             : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
429             );
430     }
431
432     /* top and bottom (and hopefully also the corners) */
433     if (sides & EDGE_TOP) {
434         for (i = 0; i < h; i += 4) {
435             ptr = buf - (i + 1) * wrap - w;
436             __asm__ volatile (
437                 "1:                             \n\t"
438                 "movq (%1, %0), %%mm0           \n\t"
439                 "movq    %%mm0, (%0)            \n\t"
440                 "movq    %%mm0, (%0, %2)        \n\t"
441                 "movq    %%mm0, (%0, %2, 2)     \n\t"
442                 "movq    %%mm0, (%0, %3)        \n\t"
443                 "add        $8, %0              \n\t"
444                 "cmp        %4, %0              \n\t"
445                 "jb         1b                  \n\t"
446                 : "+r"(ptr)
447                 : "r"((x86_reg)buf - (x86_reg)ptr - w), "r"((x86_reg) -wrap),
448                   "r"((x86_reg) -wrap * 3), "r"(ptr + width + 2 * w)
449                 );
450         }
451     }
452
453     if (sides & EDGE_BOTTOM) {
454         for (i = 0; i < h; i += 4) {
455             ptr = last_line + (i + 1) * wrap - w;
456             __asm__ volatile (
457                 "1:                             \n\t"
458                 "movq (%1, %0), %%mm0           \n\t"
459                 "movq    %%mm0, (%0)            \n\t"
460                 "movq    %%mm0, (%0, %2)        \n\t"
461                 "movq    %%mm0, (%0, %2, 2)     \n\t"
462                 "movq    %%mm0, (%0, %3)        \n\t"
463                 "add        $8, %0              \n\t"
464                 "cmp        %4, %0              \n\t"
465                 "jb         1b                  \n\t"
466                 : "+r"(ptr)
467                 : "r"((x86_reg)last_line - (x86_reg)ptr - w),
468                   "r"((x86_reg)wrap), "r"((x86_reg)wrap * 3),
469                   "r"(ptr + width + 2 * w)
470                 );
471         }
472     }
473 }
474 #endif /* HAVE_INLINE_ASM */
475
476
477 #if HAVE_YASM
478 static void ff_avg_pixels16_mmxext(uint8_t *block, const uint8_t *pixels,
479                                    int line_size, int h)
480 {
481     ff_avg_pixels8_mmxext(block,     pixels,     line_size, h);
482     ff_avg_pixels8_mmxext(block + 8, pixels + 8, line_size, h);
483 }
484
485 static void ff_put_pixels16_mmxext(uint8_t *block, const uint8_t *pixels,
486                                    ptrdiff_t line_size, int h)
487 {
488     ff_put_pixels8_mmxext(block,     pixels,     line_size, h);
489     ff_put_pixels8_mmxext(block + 8, pixels + 8, line_size, h);
490 }
491
492 #define QPEL_OP(OPNAME, ROUNDER, RND, MMX)                              \
493 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src,   \
494                                           ptrdiff_t stride)             \
495 {                                                                       \
496     ff_ ## OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);              \
497 }                                                                       \
498                                                                         \
499 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src,    \
500                                          ptrdiff_t stride)              \
501 {                                                                       \
502     uint64_t temp[8];                                                   \
503     uint8_t * const half = (uint8_t*)temp;                              \
504     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8,        \
505                                                    stride, 8);          \
506     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src, half,                 \
507                                         stride, stride, 8);             \
508 }                                                                       \
509                                                                         \
510 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src,    \
511                                          ptrdiff_t stride)              \
512 {                                                                       \
513     ff_ ## OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride,    \
514                                                    stride, 8);          \
515 }                                                                       \
516                                                                         \
517 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src,    \
518                                          ptrdiff_t stride)              \
519 {                                                                       \
520     uint64_t temp[8];                                                   \
521     uint8_t * const half = (uint8_t*)temp;                              \
522     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8,        \
523                                                    stride, 8);          \
524     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src + 1, half, stride,     \
525                                         stride, 8);                     \
526 }                                                                       \
527                                                                         \
528 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src,    \
529                                          ptrdiff_t stride)              \
530 {                                                                       \
531     uint64_t temp[8];                                                   \
532     uint8_t * const half = (uint8_t*)temp;                              \
533     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src,           \
534                                                    8, stride);          \
535     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src, half,                 \
536                                         stride, stride, 8);             \
537 }                                                                       \
538                                                                         \
539 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src,    \
540                                          ptrdiff_t stride)              \
541 {                                                                       \
542     ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src,            \
543                                                    stride, stride);     \
544 }                                                                       \
545                                                                         \
546 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src,    \
547                                          ptrdiff_t stride)              \
548 {                                                                       \
549     uint64_t temp[8];                                                   \
550     uint8_t * const half = (uint8_t*)temp;                              \
551     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src,           \
552                                                    8, stride);          \
553     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src + stride, half, stride,\
554                                         stride, 8);                     \
555 }                                                                       \
556                                                                         \
557 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src,    \
558                                          ptrdiff_t stride)              \
559 {                                                                       \
560     uint64_t half[8 + 9];                                               \
561     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
562     uint8_t * const halfHV = ((uint8_t*)half);                          \
563     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
564                                                    stride, 9);          \
565     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8,           \
566                                         stride, 9);                     \
567     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
568     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV,             \
569                                         stride, 8, 8);                  \
570 }                                                                       \
571                                                                         \
572 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src,    \
573                                          ptrdiff_t stride)              \
574 {                                                                       \
575     uint64_t half[8 + 9];                                               \
576     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
577     uint8_t * const halfHV = ((uint8_t*)half);                          \
578     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
579                                                    stride, 9);          \
580     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8,       \
581                                         stride, 9);                     \
582     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
583     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV,             \
584                                         stride, 8, 8);                  \
585 }                                                                       \
586                                                                         \
587 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src,    \
588                                          ptrdiff_t stride)              \
589 {                                                                       \
590     uint64_t half[8 + 9];                                               \
591     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
592     uint8_t * const halfHV = ((uint8_t*)half);                          \
593     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
594                                                    stride, 9);          \
595     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8,           \
596                                         stride, 9);                     \
597     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
598     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV,         \
599                                         stride, 8, 8);                  \
600 }                                                                       \
601                                                                         \
602 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src,    \
603                                          ptrdiff_t stride)              \
604 {                                                                       \
605     uint64_t half[8 + 9];                                               \
606     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
607     uint8_t * const halfHV = ((uint8_t*)half);                          \
608     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
609                                                    stride, 9);          \
610     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8,       \
611                                         stride, 9);                     \
612     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
613     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV,         \
614                                         stride, 8, 8);                  \
615 }                                                                       \
616                                                                         \
617 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src,    \
618                                          ptrdiff_t stride)              \
619 {                                                                       \
620     uint64_t half[8 + 9];                                               \
621     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
622     uint8_t * const halfHV = ((uint8_t*)half);                          \
623     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
624                                                    stride, 9);          \
625     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
626     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV,             \
627                                         stride, 8, 8);                  \
628 }                                                                       \
629                                                                         \
630 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src,    \
631                                          ptrdiff_t stride)              \
632 {                                                                       \
633     uint64_t half[8 + 9];                                               \
634     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
635     uint8_t * const halfHV = ((uint8_t*)half);                          \
636     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
637                                                    stride, 9);          \
638     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
639     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV,         \
640                                         stride, 8, 8);                  \
641 }                                                                       \
642                                                                         \
643 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src,    \
644                                          ptrdiff_t stride)              \
645 {                                                                       \
646     uint64_t half[8 + 9];                                               \
647     uint8_t * const halfH = ((uint8_t*)half);                           \
648     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
649                                                    stride, 9);          \
650     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH,              \
651                                         8, stride, 9);                  \
652     ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH,          \
653                                                    stride, 8);          \
654 }                                                                       \
655                                                                         \
656 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src,    \
657                                          ptrdiff_t stride)              \
658 {                                                                       \
659     uint64_t half[8 + 9];                                               \
660     uint8_t * const halfH = ((uint8_t*)half);                           \
661     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
662                                                    stride, 9);          \
663     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8,       \
664                                         stride, 9);                     \
665     ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH,          \
666                                                    stride, 8);          \
667 }                                                                       \
668                                                                         \
669 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src,    \
670                                          ptrdiff_t stride)              \
671 {                                                                       \
672     uint64_t half[9];                                                   \
673     uint8_t * const halfH = ((uint8_t*)half);                           \
674     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
675                                                    stride, 9);          \
676     ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH,          \
677                                                    stride, 8);          \
678 }                                                                       \
679                                                                         \
680 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src,  \
681                                            ptrdiff_t stride)            \
682 {                                                                       \
683     ff_ ## OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);            \
684 }                                                                       \
685                                                                         \
686 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src,   \
687                                           ptrdiff_t stride)             \
688 {                                                                       \
689     uint64_t temp[32];                                                  \
690     uint8_t * const half = (uint8_t*)temp;                              \
691     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16,      \
692                                                     stride, 16);        \
693     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride,        \
694                                          stride, 16);                   \
695 }                                                                       \
696                                                                         \
697 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src,   \
698                                           ptrdiff_t stride)             \
699 {                                                                       \
700     ff_ ## OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src,           \
701                                                     stride, stride, 16);\
702 }                                                                       \
703                                                                         \
704 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src,   \
705                                           ptrdiff_t stride)             \
706 {                                                                       \
707     uint64_t temp[32];                                                  \
708     uint8_t * const half = (uint8_t*)temp;                              \
709     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16,      \
710                                                     stride, 16);        \
711     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src + 1, half,            \
712                                          stride, stride, 16);           \
713 }                                                                       \
714                                                                         \
715 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src,   \
716                                           ptrdiff_t stride)             \
717 {                                                                       \
718     uint64_t temp[32];                                                  \
719     uint8_t * const half = (uint8_t*)temp;                              \
720     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16,      \
721                                                     stride);            \
722     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride,        \
723                                          stride, 16);                   \
724 }                                                                       \
725                                                                         \
726 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src,   \
727                                           ptrdiff_t stride)             \
728 {                                                                       \
729     ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src,           \
730                                                     stride, stride);    \
731 }                                                                       \
732                                                                         \
733 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src,   \
734                                           ptrdiff_t stride)             \
735 {                                                                       \
736     uint64_t temp[32];                                                  \
737     uint8_t * const half = (uint8_t*)temp;                              \
738     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16,      \
739                                                     stride);            \
740     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half,         \
741                                          stride, stride, 16);           \
742 }                                                                       \
743                                                                         \
744 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src,   \
745                                           ptrdiff_t stride)             \
746 {                                                                       \
747     uint64_t half[16 * 2 + 17 * 2];                                     \
748     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
749     uint8_t * const halfHV = ((uint8_t*)half);                          \
750     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
751                                                     stride, 17);        \
752     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16,         \
753                                          stride, 17);                   \
754     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
755                                                     16, 16);            \
756     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV,            \
757                                          stride, 16, 16);               \
758 }                                                                       \
759                                                                         \
760 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src,   \
761                                           ptrdiff_t stride)             \
762 {                                                                       \
763     uint64_t half[16 * 2 + 17 * 2];                                     \
764     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
765     uint8_t * const halfHV = ((uint8_t*)half);                          \
766     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
767                                                     stride, 17);        \
768     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16,     \
769                                          stride, 17);                   \
770     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
771                                                     16, 16);            \
772     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV,            \
773                                          stride, 16, 16);               \
774 }                                                                       \
775                                                                         \
776 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src,   \
777                                           ptrdiff_t stride)             \
778 {                                                                       \
779     uint64_t half[16 * 2 + 17 * 2];                                     \
780     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
781     uint8_t * const halfHV = ((uint8_t*)half);                          \
782     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
783                                                     stride, 17);        \
784     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16,         \
785                                          stride, 17);                   \
786     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
787                                                     16, 16);            \
788     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV,       \
789                                          stride, 16, 16);               \
790 }                                                                       \
791                                                                         \
792 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src,   \
793                                           ptrdiff_t stride)             \
794 {                                                                       \
795     uint64_t half[16 * 2 + 17 * 2];                                     \
796     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
797     uint8_t * const halfHV = ((uint8_t*)half);                          \
798     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
799                                                     stride, 17);        \
800     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16,     \
801                                          stride, 17);                   \
802     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
803                                                     16, 16);            \
804     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV,       \
805                                          stride, 16, 16);               \
806 }                                                                       \
807                                                                         \
808 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src,   \
809                                           ptrdiff_t stride)             \
810 {                                                                       \
811     uint64_t half[16 * 2 + 17 * 2];                                     \
812     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
813     uint8_t * const halfHV = ((uint8_t*)half);                          \
814     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
815                                                     stride, 17);        \
816     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
817                                                     16, 16);            \
818     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV,            \
819                                          stride, 16, 16);               \
820 }                                                                       \
821                                                                         \
822 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src,   \
823                                           ptrdiff_t stride)             \
824 {                                                                       \
825     uint64_t half[16 * 2 + 17 * 2];                                     \
826     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
827     uint8_t * const halfHV = ((uint8_t*)half);                          \
828     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
829                                                     stride, 17);        \
830     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
831                                                     16, 16);            \
832     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV,       \
833                                          stride, 16, 16);               \
834 }                                                                       \
835                                                                         \
836 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src,   \
837                                           ptrdiff_t stride)             \
838 {                                                                       \
839     uint64_t half[17 * 2];                                              \
840     uint8_t * const halfH = ((uint8_t*)half);                           \
841     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
842                                                     stride, 17);        \
843     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16,         \
844                                          stride, 17);                   \
845     ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH,         \
846                                                     stride, 16);        \
847 }                                                                       \
848                                                                         \
849 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src,   \
850                                           ptrdiff_t stride)             \
851 {                                                                       \
852     uint64_t half[17 * 2];                                              \
853     uint8_t * const halfH = ((uint8_t*)half);                           \
854     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
855                                                     stride, 17);        \
856     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16,     \
857                                          stride, 17);                   \
858     ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH,         \
859                                                     stride, 16);        \
860 }                                                                       \
861                                                                         \
862 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src,   \
863                                           ptrdiff_t stride)             \
864 {                                                                       \
865     uint64_t half[17 * 2];                                              \
866     uint8_t * const halfH = ((uint8_t*)half);                           \
867     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
868                                                     stride, 17);        \
869     ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH,         \
870                                                     stride, 16);        \
871 }
872
873 QPEL_OP(put_,          ff_pw_16, _,        mmxext)
874 QPEL_OP(avg_,          ff_pw_16, _,        mmxext)
875 QPEL_OP(put_no_rnd_,   ff_pw_15, _no_rnd_, mmxext)
876 #endif /* HAVE_YASM */
877
878
879 #if HAVE_INLINE_ASM
880 void ff_put_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
881 {
882   put_pixels8_xy2_mmx(dst, src, stride, 8);
883 }
884 void ff_put_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
885 {
886   put_pixels16_xy2_mmx(dst, src, stride, 16);
887 }
888 void ff_avg_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
889 {
890   avg_pixels8_xy2_mmx(dst, src, stride, 8);
891 }
892 void ff_avg_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
893 {
894   avg_pixels16_xy2_mmx(dst, src, stride, 16);
895 }
896
897 static void gmc_mmx(uint8_t *dst, uint8_t *src,
898                     int stride, int h, int ox, int oy,
899                     int dxx, int dxy, int dyx, int dyy,
900                     int shift, int r, int width, int height)
901 {
902     const int w    = 8;
903     const int ix   = ox  >> (16 + shift);
904     const int iy   = oy  >> (16 + shift);
905     const int oxs  = ox  >> 4;
906     const int oys  = oy  >> 4;
907     const int dxxs = dxx >> 4;
908     const int dxys = dxy >> 4;
909     const int dyxs = dyx >> 4;
910     const int dyys = dyy >> 4;
911     const uint16_t r4[4]   = { r, r, r, r };
912     const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys };
913     const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys };
914     const uint64_t shift2 = 2 * shift;
915     int x, y;
916
917     const int dxw = (dxx - (1 << (16 + shift))) * (w - 1);
918     const int dyh = (dyy - (1 << (16 + shift))) * (h - 1);
919     const int dxh = dxy * (h - 1);
920     const int dyw = dyx * (w - 1);
921     if ( // non-constant fullpel offset (3% of blocks)
922         ((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) |
923          (oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift)
924         // uses more than 16 bits of subpel mv (only at huge resolution)
925         || (dxx | dxy | dyx | dyy) & 15 ||
926         (unsigned)ix >= width  - w ||
927         (unsigned)iy >= height - h) {
928         // FIXME could still use mmx for some of the rows
929         ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy,
930                  shift, r, width, height);
931         return;
932     }
933
934     src += ix + iy * stride;
935
936     __asm__ volatile (
937         "movd         %0, %%mm6         \n\t"
938         "pxor      %%mm7, %%mm7         \n\t"
939         "punpcklwd %%mm6, %%mm6         \n\t"
940         "punpcklwd %%mm6, %%mm6         \n\t"
941         :: "r"(1<<shift)
942     );
943
944     for (x = 0; x < w; x += 4) {
945         uint16_t dx4[4] = { oxs - dxys + dxxs * (x + 0),
946                             oxs - dxys + dxxs * (x + 1),
947                             oxs - dxys + dxxs * (x + 2),
948                             oxs - dxys + dxxs * (x + 3) };
949         uint16_t dy4[4] = { oys - dyys + dyxs * (x + 0),
950                             oys - dyys + dyxs * (x + 1),
951                             oys - dyys + dyxs * (x + 2),
952                             oys - dyys + dyxs * (x + 3) };
953
954         for (y = 0; y < h; y++) {
955             __asm__ volatile (
956                 "movq      %0, %%mm4    \n\t"
957                 "movq      %1, %%mm5    \n\t"
958                 "paddw     %2, %%mm4    \n\t"
959                 "paddw     %3, %%mm5    \n\t"
960                 "movq   %%mm4, %0       \n\t"
961                 "movq   %%mm5, %1       \n\t"
962                 "psrlw    $12, %%mm4    \n\t"
963                 "psrlw    $12, %%mm5    \n\t"
964                 : "+m"(*dx4), "+m"(*dy4)
965                 : "m"(*dxy4), "m"(*dyy4)
966             );
967
968             __asm__ volatile (
969                 "movq      %%mm6, %%mm2 \n\t"
970                 "movq      %%mm6, %%mm1 \n\t"
971                 "psubw     %%mm4, %%mm2 \n\t"
972                 "psubw     %%mm5, %%mm1 \n\t"
973                 "movq      %%mm2, %%mm0 \n\t"
974                 "movq      %%mm4, %%mm3 \n\t"
975                 "pmullw    %%mm1, %%mm0 \n\t" // (s - dx) * (s - dy)
976                 "pmullw    %%mm5, %%mm3 \n\t" // dx * dy
977                 "pmullw    %%mm5, %%mm2 \n\t" // (s - dx) * dy
978                 "pmullw    %%mm4, %%mm1 \n\t" // dx * (s - dy)
979
980                 "movd         %4, %%mm5 \n\t"
981                 "movd         %3, %%mm4 \n\t"
982                 "punpcklbw %%mm7, %%mm5 \n\t"
983                 "punpcklbw %%mm7, %%mm4 \n\t"
984                 "pmullw    %%mm5, %%mm3 \n\t" // src[1, 1] * dx * dy
985                 "pmullw    %%mm4, %%mm2 \n\t" // src[0, 1] * (s - dx) * dy
986
987                 "movd         %2, %%mm5 \n\t"
988                 "movd         %1, %%mm4 \n\t"
989                 "punpcklbw %%mm7, %%mm5 \n\t"
990                 "punpcklbw %%mm7, %%mm4 \n\t"
991                 "pmullw    %%mm5, %%mm1 \n\t" // src[1, 0] * dx * (s - dy)
992                 "pmullw    %%mm4, %%mm0 \n\t" // src[0, 0] * (s - dx) * (s - dy)
993                 "paddw        %5, %%mm1 \n\t"
994                 "paddw     %%mm3, %%mm2 \n\t"
995                 "paddw     %%mm1, %%mm0 \n\t"
996                 "paddw     %%mm2, %%mm0 \n\t"
997
998                 "psrlw        %6, %%mm0 \n\t"
999                 "packuswb  %%mm0, %%mm0 \n\t"
1000                 "movd      %%mm0, %0    \n\t"
1001
1002                 : "=m"(dst[x + y * stride])
1003                 : "m"(src[0]), "m"(src[1]),
1004                   "m"(src[stride]), "m"(src[stride + 1]),
1005                   "m"(*r4), "m"(shift2)
1006             );
1007             src += stride;
1008         }
1009         src += 4 - h * stride;
1010     }
1011 }
1012
1013 static void vector_clipf_sse(float *dst, const float *src,
1014                              float min, float max, int len)
1015 {
1016     x86_reg i = (len - 16) * 4;
1017     __asm__ volatile (
1018         "movss          %3, %%xmm4      \n\t"
1019         "movss          %4, %%xmm5      \n\t"
1020         "shufps $0, %%xmm4, %%xmm4      \n\t"
1021         "shufps $0, %%xmm5, %%xmm5      \n\t"
1022         "1:                             \n\t"
1023         "movaps   (%2, %0), %%xmm0      \n\t" // 3/1 on intel
1024         "movaps 16(%2, %0), %%xmm1      \n\t"
1025         "movaps 32(%2, %0), %%xmm2      \n\t"
1026         "movaps 48(%2, %0), %%xmm3      \n\t"
1027         "maxps      %%xmm4, %%xmm0      \n\t"
1028         "maxps      %%xmm4, %%xmm1      \n\t"
1029         "maxps      %%xmm4, %%xmm2      \n\t"
1030         "maxps      %%xmm4, %%xmm3      \n\t"
1031         "minps      %%xmm5, %%xmm0      \n\t"
1032         "minps      %%xmm5, %%xmm1      \n\t"
1033         "minps      %%xmm5, %%xmm2      \n\t"
1034         "minps      %%xmm5, %%xmm3      \n\t"
1035         "movaps     %%xmm0,   (%1, %0)  \n\t"
1036         "movaps     %%xmm1, 16(%1, %0)  \n\t"
1037         "movaps     %%xmm2, 32(%1, %0)  \n\t"
1038         "movaps     %%xmm3, 48(%1, %0)  \n\t"
1039         "sub           $64, %0          \n\t"
1040         "jge            1b              \n\t"
1041         : "+&r"(i)
1042         : "r"(dst), "r"(src), "m"(min), "m"(max)
1043         : "memory"
1044     );
1045 }
1046
1047 #endif /* HAVE_INLINE_ASM */
1048
1049 void ff_h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale);
1050 void ff_h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale);
1051
1052 int32_t ff_scalarproduct_int16_mmxext(const int16_t *v1, const int16_t *v2,
1053                                       int order);
1054 int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2,
1055                                     int order);
1056 int32_t ff_scalarproduct_and_madd_int16_mmxext(int16_t *v1, const int16_t *v2,
1057                                                const int16_t *v3,
1058                                                int order, int mul);
1059 int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2,
1060                                              const int16_t *v3,
1061                                              int order, int mul);
1062 int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2,
1063                                               const int16_t *v3,
1064                                               int order, int mul);
1065
1066 void ff_apply_window_int16_round_mmxext(int16_t *output, const int16_t *input,
1067                                         const int16_t *window, unsigned int len);
1068 void ff_apply_window_int16_round_sse2(int16_t *output, const int16_t *input,
1069                                       const int16_t *window, unsigned int len);
1070 void ff_apply_window_int16_mmxext(int16_t *output, const int16_t *input,
1071                                   const int16_t *window, unsigned int len);
1072 void ff_apply_window_int16_sse2(int16_t *output, const int16_t *input,
1073                                 const int16_t *window, unsigned int len);
1074 void ff_apply_window_int16_ssse3(int16_t *output, const int16_t *input,
1075                                  const int16_t *window, unsigned int len);
1076 void ff_apply_window_int16_ssse3_atom(int16_t *output, const int16_t *input,
1077                                       const int16_t *window, unsigned int len);
1078
1079 void ff_bswap32_buf_ssse3(uint32_t *dst, const uint32_t *src, int w);
1080 void ff_bswap32_buf_sse2(uint32_t *dst, const uint32_t *src, int w);
1081
1082 void ff_add_hfyu_median_prediction_mmxext(uint8_t *dst, const uint8_t *top,
1083                                           const uint8_t *diff, int w,
1084                                           int *left, int *left_top);
1085 int  ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src,
1086                                        int w, int left);
1087 int  ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src,
1088                                       int w, int left);
1089
1090 void ff_vector_clip_int32_mmx     (int32_t *dst, const int32_t *src,
1091                                    int32_t min, int32_t max, unsigned int len);
1092 void ff_vector_clip_int32_sse2    (int32_t *dst, const int32_t *src,
1093                                    int32_t min, int32_t max, unsigned int len);
1094 void ff_vector_clip_int32_int_sse2(int32_t *dst, const int32_t *src,
1095                                    int32_t min, int32_t max, unsigned int len);
1096 void ff_vector_clip_int32_sse4    (int32_t *dst, const int32_t *src,
1097                                    int32_t min, int32_t max, unsigned int len);
1098
1099 #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX)                          \
1100     do {                                                                     \
1101     c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \
1102     c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \
1103     c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \
1104     c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \
1105     c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \
1106     c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \
1107     c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \
1108     c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \
1109     c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \
1110     c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \
1111     c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \
1112     c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \
1113     c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \
1114     c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \
1115     c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \
1116     c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU; \
1117     } while (0)
1118
1119 static av_cold void dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx,
1120                                      int mm_flags)
1121 {
1122 #if HAVE_INLINE_ASM
1123     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1124
1125     c->put_pixels_clamped        = ff_put_pixels_clamped_mmx;
1126     c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx;
1127     c->add_pixels_clamped        = ff_add_pixels_clamped_mmx;
1128
1129     if (!high_bit_depth) {
1130         c->clear_block  = clear_block_mmx;
1131         c->clear_blocks = clear_blocks_mmx;
1132         c->draw_edges   = draw_edges_mmx;
1133
1134         switch (avctx->idct_algo) {
1135         case FF_IDCT_AUTO:
1136         case FF_IDCT_SIMPLEMMX:
1137             c->idct_put              = ff_simple_idct_put_mmx;
1138             c->idct_add              = ff_simple_idct_add_mmx;
1139             c->idct                  = ff_simple_idct_mmx;
1140             c->idct_permutation_type = FF_SIMPLE_IDCT_PERM;
1141             break;
1142         case FF_IDCT_XVIDMMX:
1143             c->idct_put              = ff_idct_xvid_mmx_put;
1144             c->idct_add              = ff_idct_xvid_mmx_add;
1145             c->idct                  = ff_idct_xvid_mmx;
1146             break;
1147         }
1148     }
1149
1150     c->gmc = gmc_mmx;
1151
1152     c->add_bytes = add_bytes_mmx;
1153 #endif /* HAVE_INLINE_ASM */
1154
1155 #if HAVE_YASM
1156     if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
1157         c->h263_v_loop_filter = ff_h263_v_loop_filter_mmx;
1158         c->h263_h_loop_filter = ff_h263_h_loop_filter_mmx;
1159     }
1160
1161     c->vector_clip_int32 = ff_vector_clip_int32_mmx;
1162 #endif /* HAVE_YASM */
1163 }
1164
1165 static av_cold void dsputil_init_mmxext(DSPContext *c, AVCodecContext *avctx,
1166                                         int mm_flags)
1167 {
1168 #if HAVE_INLINE_ASM
1169     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1170
1171     if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX) {
1172         c->idct_put = ff_idct_xvid_mmxext_put;
1173         c->idct_add = ff_idct_xvid_mmxext_add;
1174         c->idct     = ff_idct_xvid_mmxext;
1175     }
1176 #endif /* HAVE_INLINE_ASM */
1177
1178 #if HAVE_MMXEXT_EXTERNAL
1179     SET_QPEL_FUNCS(avg_qpel,        0, 16, mmxext, );
1180     SET_QPEL_FUNCS(avg_qpel,        1,  8, mmxext, );
1181
1182     SET_QPEL_FUNCS(put_qpel,        0, 16, mmxext, );
1183     SET_QPEL_FUNCS(put_qpel,        1,  8, mmxext, );
1184     SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmxext, );
1185     SET_QPEL_FUNCS(put_no_rnd_qpel, 1,  8, mmxext, );
1186
1187     /* slower than cmov version on AMD */
1188     if (!(mm_flags & AV_CPU_FLAG_3DNOW))
1189         c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmxext;
1190
1191     c->scalarproduct_int16          = ff_scalarproduct_int16_mmxext;
1192     c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmxext;
1193
1194     if (avctx->flags & CODEC_FLAG_BITEXACT) {
1195         c->apply_window_int16 = ff_apply_window_int16_mmxext;
1196     } else {
1197         c->apply_window_int16 = ff_apply_window_int16_round_mmxext;
1198     }
1199 #endif /* HAVE_MMXEXT_EXTERNAL */
1200 }
1201
1202 static av_cold void dsputil_init_sse(DSPContext *c, AVCodecContext *avctx,
1203                                      int mm_flags)
1204 {
1205 #if HAVE_INLINE_ASM
1206     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1207
1208     if (!high_bit_depth) {
1209         if (!(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)) {
1210             /* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
1211             c->clear_block  = clear_block_sse;
1212             c->clear_blocks = clear_blocks_sse;
1213         }
1214     }
1215
1216     c->vector_clipf = vector_clipf_sse;
1217 #endif /* HAVE_INLINE_ASM */
1218 }
1219
1220 static av_cold void dsputil_init_sse2(DSPContext *c, AVCodecContext *avctx,
1221                                       int mm_flags)
1222 {
1223 #if HAVE_SSE2_INLINE
1224     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1225
1226     if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX) {
1227         c->idct_put              = ff_idct_xvid_sse2_put;
1228         c->idct_add              = ff_idct_xvid_sse2_add;
1229         c->idct                  = ff_idct_xvid_sse2;
1230         c->idct_permutation_type = FF_SSE2_IDCT_PERM;
1231     }
1232 #endif /* HAVE_SSE2_INLINE */
1233
1234 #if HAVE_SSE2_EXTERNAL
1235     c->scalarproduct_int16          = ff_scalarproduct_int16_sse2;
1236     c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
1237     if (mm_flags & AV_CPU_FLAG_ATOM) {
1238         c->vector_clip_int32 = ff_vector_clip_int32_int_sse2;
1239     } else {
1240         c->vector_clip_int32 = ff_vector_clip_int32_sse2;
1241     }
1242     if (avctx->flags & CODEC_FLAG_BITEXACT) {
1243         c->apply_window_int16 = ff_apply_window_int16_sse2;
1244     } else if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
1245         c->apply_window_int16 = ff_apply_window_int16_round_sse2;
1246     }
1247     c->bswap_buf = ff_bswap32_buf_sse2;
1248 #endif /* HAVE_SSE2_EXTERNAL */
1249 }
1250
1251 static av_cold void dsputil_init_ssse3(DSPContext *c, AVCodecContext *avctx,
1252                                        int mm_flags)
1253 {
1254 #if HAVE_SSSE3_EXTERNAL
1255     c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
1256     if (mm_flags & AV_CPU_FLAG_SSE4) // not really sse4, just slow on Conroe
1257         c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
1258
1259     if (mm_flags & AV_CPU_FLAG_ATOM)
1260         c->apply_window_int16 = ff_apply_window_int16_ssse3_atom;
1261     else
1262         c->apply_window_int16 = ff_apply_window_int16_ssse3;
1263     if (!(mm_flags & (AV_CPU_FLAG_SSE42|AV_CPU_FLAG_3DNOW))) // cachesplit
1264         c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
1265     c->bswap_buf = ff_bswap32_buf_ssse3;
1266 #endif /* HAVE_SSSE3_EXTERNAL */
1267 }
1268
1269 static av_cold void dsputil_init_sse4(DSPContext *c, AVCodecContext *avctx,
1270                                       int mm_flags)
1271 {
1272 #if HAVE_SSE4_EXTERNAL
1273     c->vector_clip_int32 = ff_vector_clip_int32_sse4;
1274 #endif /* HAVE_SSE4_EXTERNAL */
1275 }
1276
1277 av_cold void ff_dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx)
1278 {
1279     int mm_flags = av_get_cpu_flags();
1280
1281 #if HAVE_7REGS && HAVE_INLINE_ASM
1282     if (mm_flags & AV_CPU_FLAG_CMOV)
1283         c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
1284 #endif
1285
1286     if (mm_flags & AV_CPU_FLAG_MMX)
1287         dsputil_init_mmx(c, avctx, mm_flags);
1288
1289     if (mm_flags & AV_CPU_FLAG_MMXEXT)
1290         dsputil_init_mmxext(c, avctx, mm_flags);
1291
1292     if (mm_flags & AV_CPU_FLAG_SSE)
1293         dsputil_init_sse(c, avctx, mm_flags);
1294
1295     if (mm_flags & AV_CPU_FLAG_SSE2)
1296         dsputil_init_sse2(c, avctx, mm_flags);
1297
1298     if (mm_flags & AV_CPU_FLAG_SSSE3)
1299         dsputil_init_ssse3(c, avctx, mm_flags);
1300
1301     if (mm_flags & AV_CPU_FLAG_SSE4)
1302         dsputil_init_sse4(c, avctx, mm_flags);
1303
1304     if (CONFIG_ENCODERS)
1305         ff_dsputilenc_init_mmx(c, avctx);
1306 }