x86: dsputil: Move ff_pd assembly constants to the only place they are used
[ffmpeg.git] / libavcodec / x86 / dsputil_mmx.c
1 /*
2  * MMX optimized DSP utils
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
7  *
8  * This file is part of Libav.
9  *
10  * Libav is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * Libav is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with Libav; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24
25 #include "libavutil/attributes.h"
26 #include "libavutil/cpu.h"
27 #include "libavutil/x86/asm.h"
28 #include "libavcodec/dsputil.h"
29 #include "libavcodec/h264dsp.h"
30 #include "libavcodec/mpegvideo.h"
31 #include "libavcodec/simple_idct.h"
32 #include "dsputil_mmx.h"
33 #include "idct_xvid.h"
34
35 //#undef NDEBUG
36 //#include <assert.h>
37
38 /* pixel operations */
39 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_15)   =   0x000F000F000F000FULL;
40 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_17)   = { 0x0011001100110011ULL, 0x0011001100110011ULL };
41 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_20)   =   0x0014001400140014ULL;
42 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_42)   =   0x002A002A002A002AULL;
43 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_53)   =   0x0035003500350035ULL;
44 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_96)   =   0x0060006000600060ULL;
45 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_128)  =   0x0080008000800080ULL;
46 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_255)  =   0x00ff00ff00ff00ffULL;
47 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_512)  = { 0x0200020002000200ULL, 0x0200020002000200ULL };
48 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_1019) = { 0x03FB03FB03FB03FBULL, 0x03FB03FB03FB03FBULL };
49
50 DECLARE_ALIGNED(8,  const uint64_t, ff_pb_FC)   =   0xFCFCFCFCFCFCFCFCULL;
51
52
53 void ff_put_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
54                               int dstStride, int src1Stride, int h);
55 void ff_put_no_rnd_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1,
56                                      uint8_t *src2, int dstStride,
57                                      int src1Stride, int h);
58 void ff_avg_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
59                               int dstStride, int src1Stride, int h);
60 void ff_put_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
61                                int dstStride, int src1Stride, int h);
62 void ff_avg_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
63                                int dstStride, int src1Stride, int h);
64 void ff_put_no_rnd_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
65                                       int dstStride, int src1Stride, int h);
66 void ff_put_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
67                                          int dstStride, int srcStride, int h);
68 void ff_avg_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
69                                          int dstStride, int srcStride, int h);
70 void ff_put_no_rnd_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
71                                                  int dstStride, int srcStride,
72                                                  int h);
73 void ff_put_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
74                                         int dstStride, int srcStride, int h);
75 void ff_avg_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
76                                         int dstStride, int srcStride, int h);
77 void ff_put_no_rnd_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
78                                                 int dstStride, int srcStride,
79                                                 int h);
80 void ff_put_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
81                                          int dstStride, int srcStride);
82 void ff_avg_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
83                                          int dstStride, int srcStride);
84 void ff_put_no_rnd_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
85                                                  int dstStride, int srcStride);
86 void ff_put_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
87                                         int dstStride, int srcStride);
88 void ff_avg_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
89                                         int dstStride, int srcStride);
90 void ff_put_no_rnd_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
91                                                 int dstStride, int srcStride);
92 #define ff_put_no_rnd_pixels16_mmxext ff_put_pixels16_mmxext
93 #define ff_put_no_rnd_pixels8_mmxext ff_put_pixels8_mmxext
94
95
96 #if HAVE_INLINE_ASM
97
98 /***********************************/
99 /* standard MMX */
100
101 void ff_put_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
102                                int line_size)
103 {
104     const int16_t *p;
105     uint8_t *pix;
106
107     /* read the pixels */
108     p   = block;
109     pix = pixels;
110     /* unrolled loop */
111     __asm__ volatile (
112         "movq      (%3), %%mm0          \n\t"
113         "movq     8(%3), %%mm1          \n\t"
114         "movq    16(%3), %%mm2          \n\t"
115         "movq    24(%3), %%mm3          \n\t"
116         "movq    32(%3), %%mm4          \n\t"
117         "movq    40(%3), %%mm5          \n\t"
118         "movq    48(%3), %%mm6          \n\t"
119         "movq    56(%3), %%mm7          \n\t"
120         "packuswb %%mm1, %%mm0          \n\t"
121         "packuswb %%mm3, %%mm2          \n\t"
122         "packuswb %%mm5, %%mm4          \n\t"
123         "packuswb %%mm7, %%mm6          \n\t"
124         "movq     %%mm0, (%0)           \n\t"
125         "movq     %%mm2, (%0, %1)       \n\t"
126         "movq     %%mm4, (%0, %1, 2)    \n\t"
127         "movq     %%mm6, (%0, %2)       \n\t"
128         :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3),
129            "r"(p)
130         : "memory");
131     pix += line_size * 4;
132     p   += 32;
133
134     // if here would be an exact copy of the code above
135     // compiler would generate some very strange code
136     // thus using "r"
137     __asm__ volatile (
138         "movq       (%3), %%mm0         \n\t"
139         "movq      8(%3), %%mm1         \n\t"
140         "movq     16(%3), %%mm2         \n\t"
141         "movq     24(%3), %%mm3         \n\t"
142         "movq     32(%3), %%mm4         \n\t"
143         "movq     40(%3), %%mm5         \n\t"
144         "movq     48(%3), %%mm6         \n\t"
145         "movq     56(%3), %%mm7         \n\t"
146         "packuswb  %%mm1, %%mm0         \n\t"
147         "packuswb  %%mm3, %%mm2         \n\t"
148         "packuswb  %%mm5, %%mm4         \n\t"
149         "packuswb  %%mm7, %%mm6         \n\t"
150         "movq      %%mm0, (%0)          \n\t"
151         "movq      %%mm2, (%0, %1)      \n\t"
152         "movq      %%mm4, (%0, %1, 2)   \n\t"
153         "movq      %%mm6, (%0, %2)      \n\t"
154         :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3), "r"(p)
155         : "memory");
156 }
157
158 #define put_signed_pixels_clamped_mmx_half(off)             \
159     "movq          "#off"(%2), %%mm1        \n\t"           \
160     "movq     16 + "#off"(%2), %%mm2        \n\t"           \
161     "movq     32 + "#off"(%2), %%mm3        \n\t"           \
162     "movq     48 + "#off"(%2), %%mm4        \n\t"           \
163     "packsswb  8 + "#off"(%2), %%mm1        \n\t"           \
164     "packsswb 24 + "#off"(%2), %%mm2        \n\t"           \
165     "packsswb 40 + "#off"(%2), %%mm3        \n\t"           \
166     "packsswb 56 + "#off"(%2), %%mm4        \n\t"           \
167     "paddb              %%mm0, %%mm1        \n\t"           \
168     "paddb              %%mm0, %%mm2        \n\t"           \
169     "paddb              %%mm0, %%mm3        \n\t"           \
170     "paddb              %%mm0, %%mm4        \n\t"           \
171     "movq               %%mm1, (%0)         \n\t"           \
172     "movq               %%mm2, (%0, %3)     \n\t"           \
173     "movq               %%mm3, (%0, %3, 2)  \n\t"           \
174     "movq               %%mm4, (%0, %1)     \n\t"
175
176 void ff_put_signed_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
177                                       int line_size)
178 {
179     x86_reg line_skip = line_size;
180     x86_reg line_skip3;
181
182     __asm__ volatile (
183         "movq "MANGLE(ff_pb_80)", %%mm0     \n\t"
184         "lea         (%3, %3, 2), %1        \n\t"
185         put_signed_pixels_clamped_mmx_half(0)
186         "lea         (%0, %3, 4), %0        \n\t"
187         put_signed_pixels_clamped_mmx_half(64)
188         : "+&r"(pixels), "=&r"(line_skip3)
189         : "r"(block), "r"(line_skip)
190         : "memory");
191 }
192
193 void ff_add_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
194                                int line_size)
195 {
196     const int16_t *p;
197     uint8_t *pix;
198     int i;
199
200     /* read the pixels */
201     p   = block;
202     pix = pixels;
203     MOVQ_ZERO(mm7);
204     i = 4;
205     do {
206         __asm__ volatile (
207             "movq        (%2), %%mm0    \n\t"
208             "movq       8(%2), %%mm1    \n\t"
209             "movq      16(%2), %%mm2    \n\t"
210             "movq      24(%2), %%mm3    \n\t"
211             "movq          %0, %%mm4    \n\t"
212             "movq          %1, %%mm6    \n\t"
213             "movq       %%mm4, %%mm5    \n\t"
214             "punpcklbw  %%mm7, %%mm4    \n\t"
215             "punpckhbw  %%mm7, %%mm5    \n\t"
216             "paddsw     %%mm4, %%mm0    \n\t"
217             "paddsw     %%mm5, %%mm1    \n\t"
218             "movq       %%mm6, %%mm5    \n\t"
219             "punpcklbw  %%mm7, %%mm6    \n\t"
220             "punpckhbw  %%mm7, %%mm5    \n\t"
221             "paddsw     %%mm6, %%mm2    \n\t"
222             "paddsw     %%mm5, %%mm3    \n\t"
223             "packuswb   %%mm1, %%mm0    \n\t"
224             "packuswb   %%mm3, %%mm2    \n\t"
225             "movq       %%mm0, %0       \n\t"
226             "movq       %%mm2, %1       \n\t"
227             : "+m"(*pix), "+m"(*(pix + line_size))
228             : "r"(p)
229             : "memory");
230         pix += line_size * 2;
231         p   += 16;
232     } while (--i);
233 }
234
235 #define CLEAR_BLOCKS(name, n)                           \
236 static void name(int16_t *blocks)                       \
237 {                                                       \
238     __asm__ volatile (                                  \
239         "pxor %%mm7, %%mm7              \n\t"           \
240         "mov     %1,        %%"REG_a"   \n\t"           \
241         "1:                             \n\t"           \
242         "movq %%mm7,   (%0, %%"REG_a")  \n\t"           \
243         "movq %%mm7,  8(%0, %%"REG_a")  \n\t"           \
244         "movq %%mm7, 16(%0, %%"REG_a")  \n\t"           \
245         "movq %%mm7, 24(%0, %%"REG_a")  \n\t"           \
246         "add    $32, %%"REG_a"          \n\t"           \
247         "js      1b                     \n\t"           \
248         :: "r"(((uint8_t *)blocks) + 128 * n),          \
249            "i"(-128 * n)                                \
250         : "%"REG_a                                      \
251         );                                              \
252 }
253 CLEAR_BLOCKS(clear_blocks_mmx, 6)
254 CLEAR_BLOCKS(clear_block_mmx, 1)
255
256 static void clear_block_sse(int16_t *block)
257 {
258     __asm__ volatile (
259         "xorps  %%xmm0, %%xmm0          \n"
260         "movaps %%xmm0,    (%0)         \n"
261         "movaps %%xmm0,  16(%0)         \n"
262         "movaps %%xmm0,  32(%0)         \n"
263         "movaps %%xmm0,  48(%0)         \n"
264         "movaps %%xmm0,  64(%0)         \n"
265         "movaps %%xmm0,  80(%0)         \n"
266         "movaps %%xmm0,  96(%0)         \n"
267         "movaps %%xmm0, 112(%0)         \n"
268         :: "r"(block)
269         : "memory"
270     );
271 }
272
273 static void clear_blocks_sse(int16_t *blocks)
274 {
275     __asm__ volatile (
276         "xorps  %%xmm0, %%xmm0              \n"
277         "mov        %1,         %%"REG_a"   \n"
278         "1:                                 \n"
279         "movaps %%xmm0,    (%0, %%"REG_a")  \n"
280         "movaps %%xmm0,  16(%0, %%"REG_a")  \n"
281         "movaps %%xmm0,  32(%0, %%"REG_a")  \n"
282         "movaps %%xmm0,  48(%0, %%"REG_a")  \n"
283         "movaps %%xmm0,  64(%0, %%"REG_a")  \n"
284         "movaps %%xmm0,  80(%0, %%"REG_a")  \n"
285         "movaps %%xmm0,  96(%0, %%"REG_a")  \n"
286         "movaps %%xmm0, 112(%0, %%"REG_a")  \n"
287         "add      $128,         %%"REG_a"   \n"
288         "js         1b                      \n"
289         :: "r"(((uint8_t *)blocks) + 128 * 6),
290            "i"(-128 * 6)
291         : "%"REG_a
292     );
293 }
294
295 static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w)
296 {
297     x86_reg i = 0;
298     __asm__ volatile (
299         "jmp          2f                \n\t"
300         "1:                             \n\t"
301         "movq   (%1, %0), %%mm0         \n\t"
302         "movq   (%2, %0), %%mm1         \n\t"
303         "paddb     %%mm0, %%mm1         \n\t"
304         "movq      %%mm1, (%2, %0)      \n\t"
305         "movq  8(%1, %0), %%mm0         \n\t"
306         "movq  8(%2, %0), %%mm1         \n\t"
307         "paddb     %%mm0, %%mm1         \n\t"
308         "movq      %%mm1, 8(%2, %0)     \n\t"
309         "add         $16, %0            \n\t"
310         "2:                             \n\t"
311         "cmp          %3, %0            \n\t"
312         "js           1b                \n\t"
313         : "+r"(i)
314         : "r"(src), "r"(dst), "r"((x86_reg)w - 15)
315     );
316     for ( ; i < w; i++)
317         dst[i + 0] += src[i + 0];
318 }
319
320 #if HAVE_7REGS
321 static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top,
322                                             const uint8_t *diff, int w,
323                                             int *left, int *left_top)
324 {
325     x86_reg w2 = -w;
326     x86_reg x;
327     int l  = *left     & 0xff;
328     int tl = *left_top & 0xff;
329     int t;
330     __asm__ volatile (
331         "mov          %7, %3            \n"
332         "1:                             \n"
333         "movzbl (%3, %4), %2            \n"
334         "mov          %2, %k3           \n"
335         "sub         %b1, %b3           \n"
336         "add         %b0, %b3           \n"
337         "mov          %2, %1            \n"
338         "cmp          %0, %2            \n"
339         "cmovg        %0, %2            \n"
340         "cmovg        %1, %0            \n"
341         "cmp         %k3, %0            \n"
342         "cmovg       %k3, %0            \n"
343         "mov          %7, %3            \n"
344         "cmp          %2, %0            \n"
345         "cmovl        %2, %0            \n"
346         "add    (%6, %4), %b0           \n"
347         "mov         %b0, (%5, %4)      \n"
348         "inc          %4                \n"
349         "jl           1b                \n"
350         : "+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
351         : "r"(dst + w), "r"(diff + w), "rm"(top + w)
352     );
353     *left     = l;
354     *left_top = tl;
355 }
356 #endif
357
358 /* Draw the edges of width 'w' of an image of size width, height
359  * this MMX version can only handle w == 8 || w == 16. */
360 static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height,
361                            int w, int h, int sides)
362 {
363     uint8_t *ptr, *last_line;
364     int i;
365
366     last_line = buf + (height - 1) * wrap;
367     /* left and right */
368     ptr = buf;
369     if (w == 8) {
370         __asm__ volatile (
371             "1:                             \n\t"
372             "movd            (%0), %%mm0    \n\t"
373             "punpcklbw      %%mm0, %%mm0    \n\t"
374             "punpcklwd      %%mm0, %%mm0    \n\t"
375             "punpckldq      %%mm0, %%mm0    \n\t"
376             "movq           %%mm0, -8(%0)   \n\t"
377             "movq      -8(%0, %2), %%mm1    \n\t"
378             "punpckhbw      %%mm1, %%mm1    \n\t"
379             "punpckhwd      %%mm1, %%mm1    \n\t"
380             "punpckhdq      %%mm1, %%mm1    \n\t"
381             "movq           %%mm1, (%0, %2) \n\t"
382             "add               %1, %0       \n\t"
383             "cmp               %3, %0       \n\t"
384             "jb                1b           \n\t"
385             : "+r"(ptr)
386             : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
387             );
388     } else {
389         __asm__ volatile (
390             "1:                                 \n\t"
391             "movd            (%0), %%mm0        \n\t"
392             "punpcklbw      %%mm0, %%mm0        \n\t"
393             "punpcklwd      %%mm0, %%mm0        \n\t"
394             "punpckldq      %%mm0, %%mm0        \n\t"
395             "movq           %%mm0, -8(%0)       \n\t"
396             "movq           %%mm0, -16(%0)      \n\t"
397             "movq      -8(%0, %2), %%mm1        \n\t"
398             "punpckhbw      %%mm1, %%mm1        \n\t"
399             "punpckhwd      %%mm1, %%mm1        \n\t"
400             "punpckhdq      %%mm1, %%mm1        \n\t"
401             "movq           %%mm1,  (%0, %2)    \n\t"
402             "movq           %%mm1, 8(%0, %2)    \n\t"
403             "add               %1, %0           \n\t"
404             "cmp               %3, %0           \n\t"
405             "jb                1b               \n\t"
406             : "+r"(ptr)
407             : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
408             );
409     }
410
411     /* top and bottom (and hopefully also the corners) */
412     if (sides & EDGE_TOP) {
413         for (i = 0; i < h; i += 4) {
414             ptr = buf - (i + 1) * wrap - w;
415             __asm__ volatile (
416                 "1:                             \n\t"
417                 "movq (%1, %0), %%mm0           \n\t"
418                 "movq    %%mm0, (%0)            \n\t"
419                 "movq    %%mm0, (%0, %2)        \n\t"
420                 "movq    %%mm0, (%0, %2, 2)     \n\t"
421                 "movq    %%mm0, (%0, %3)        \n\t"
422                 "add        $8, %0              \n\t"
423                 "cmp        %4, %0              \n\t"
424                 "jb         1b                  \n\t"
425                 : "+r"(ptr)
426                 : "r"((x86_reg)buf - (x86_reg)ptr - w), "r"((x86_reg) -wrap),
427                   "r"((x86_reg) -wrap * 3), "r"(ptr + width + 2 * w)
428                 );
429         }
430     }
431
432     if (sides & EDGE_BOTTOM) {
433         for (i = 0; i < h; i += 4) {
434             ptr = last_line + (i + 1) * wrap - w;
435             __asm__ volatile (
436                 "1:                             \n\t"
437                 "movq (%1, %0), %%mm0           \n\t"
438                 "movq    %%mm0, (%0)            \n\t"
439                 "movq    %%mm0, (%0, %2)        \n\t"
440                 "movq    %%mm0, (%0, %2, 2)     \n\t"
441                 "movq    %%mm0, (%0, %3)        \n\t"
442                 "add        $8, %0              \n\t"
443                 "cmp        %4, %0              \n\t"
444                 "jb         1b                  \n\t"
445                 : "+r"(ptr)
446                 : "r"((x86_reg)last_line - (x86_reg)ptr - w),
447                   "r"((x86_reg)wrap), "r"((x86_reg)wrap * 3),
448                   "r"(ptr + width + 2 * w)
449                 );
450         }
451     }
452 }
453 #endif /* HAVE_INLINE_ASM */
454
455
456 #if HAVE_YASM
457 static void ff_avg_pixels16_mmxext(uint8_t *block, const uint8_t *pixels,
458                                    int line_size, int h)
459 {
460     ff_avg_pixels8_mmxext(block,     pixels,     line_size, h);
461     ff_avg_pixels8_mmxext(block + 8, pixels + 8, line_size, h);
462 }
463
464 static void ff_put_pixels16_mmxext(uint8_t *block, const uint8_t *pixels,
465                                    ptrdiff_t line_size, int h)
466 {
467     ff_put_pixels8_mmxext(block,     pixels,     line_size, h);
468     ff_put_pixels8_mmxext(block + 8, pixels + 8, line_size, h);
469 }
470
471 #define QPEL_OP(OPNAME, ROUNDER, RND, MMX)                              \
472 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src,   \
473                                           ptrdiff_t stride)             \
474 {                                                                       \
475     ff_ ## OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);              \
476 }                                                                       \
477                                                                         \
478 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src,    \
479                                          ptrdiff_t stride)              \
480 {                                                                       \
481     uint64_t temp[8];                                                   \
482     uint8_t * const half = (uint8_t*)temp;                              \
483     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8,        \
484                                                    stride, 8);          \
485     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src, half,                 \
486                                         stride, stride, 8);             \
487 }                                                                       \
488                                                                         \
489 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src,    \
490                                          ptrdiff_t stride)              \
491 {                                                                       \
492     ff_ ## OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride,    \
493                                                    stride, 8);          \
494 }                                                                       \
495                                                                         \
496 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src,    \
497                                          ptrdiff_t stride)              \
498 {                                                                       \
499     uint64_t temp[8];                                                   \
500     uint8_t * const half = (uint8_t*)temp;                              \
501     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8,        \
502                                                    stride, 8);          \
503     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src + 1, half, stride,     \
504                                         stride, 8);                     \
505 }                                                                       \
506                                                                         \
507 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src,    \
508                                          ptrdiff_t stride)              \
509 {                                                                       \
510     uint64_t temp[8];                                                   \
511     uint8_t * const half = (uint8_t*)temp;                              \
512     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src,           \
513                                                    8, stride);          \
514     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src, half,                 \
515                                         stride, stride, 8);             \
516 }                                                                       \
517                                                                         \
518 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src,    \
519                                          ptrdiff_t stride)              \
520 {                                                                       \
521     ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src,            \
522                                                    stride, stride);     \
523 }                                                                       \
524                                                                         \
525 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src,    \
526                                          ptrdiff_t stride)              \
527 {                                                                       \
528     uint64_t temp[8];                                                   \
529     uint8_t * const half = (uint8_t*)temp;                              \
530     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src,           \
531                                                    8, stride);          \
532     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src + stride, half, stride,\
533                                         stride, 8);                     \
534 }                                                                       \
535                                                                         \
536 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src,    \
537                                          ptrdiff_t stride)              \
538 {                                                                       \
539     uint64_t half[8 + 9];                                               \
540     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
541     uint8_t * const halfHV = ((uint8_t*)half);                          \
542     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
543                                                    stride, 9);          \
544     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8,           \
545                                         stride, 9);                     \
546     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
547     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV,             \
548                                         stride, 8, 8);                  \
549 }                                                                       \
550                                                                         \
551 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src,    \
552                                          ptrdiff_t stride)              \
553 {                                                                       \
554     uint64_t half[8 + 9];                                               \
555     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
556     uint8_t * const halfHV = ((uint8_t*)half);                          \
557     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
558                                                    stride, 9);          \
559     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8,       \
560                                         stride, 9);                     \
561     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
562     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV,             \
563                                         stride, 8, 8);                  \
564 }                                                                       \
565                                                                         \
566 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src,    \
567                                          ptrdiff_t stride)              \
568 {                                                                       \
569     uint64_t half[8 + 9];                                               \
570     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
571     uint8_t * const halfHV = ((uint8_t*)half);                          \
572     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
573                                                    stride, 9);          \
574     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8,           \
575                                         stride, 9);                     \
576     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
577     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV,         \
578                                         stride, 8, 8);                  \
579 }                                                                       \
580                                                                         \
581 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src,    \
582                                          ptrdiff_t stride)              \
583 {                                                                       \
584     uint64_t half[8 + 9];                                               \
585     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
586     uint8_t * const halfHV = ((uint8_t*)half);                          \
587     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
588                                                    stride, 9);          \
589     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8,       \
590                                         stride, 9);                     \
591     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
592     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV,         \
593                                         stride, 8, 8);                  \
594 }                                                                       \
595                                                                         \
596 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src,    \
597                                          ptrdiff_t stride)              \
598 {                                                                       \
599     uint64_t half[8 + 9];                                               \
600     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
601     uint8_t * const halfHV = ((uint8_t*)half);                          \
602     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
603                                                    stride, 9);          \
604     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
605     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV,             \
606                                         stride, 8, 8);                  \
607 }                                                                       \
608                                                                         \
609 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src,    \
610                                          ptrdiff_t stride)              \
611 {                                                                       \
612     uint64_t half[8 + 9];                                               \
613     uint8_t * const halfH  = ((uint8_t*)half) + 64;                     \
614     uint8_t * const halfHV = ((uint8_t*)half);                          \
615     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
616                                                    stride, 9);          \
617     ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
618     ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV,         \
619                                         stride, 8, 8);                  \
620 }                                                                       \
621                                                                         \
622 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src,    \
623                                          ptrdiff_t stride)              \
624 {                                                                       \
625     uint64_t half[8 + 9];                                               \
626     uint8_t * const halfH = ((uint8_t*)half);                           \
627     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
628                                                    stride, 9);          \
629     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH,              \
630                                         8, stride, 9);                  \
631     ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH,          \
632                                                    stride, 8);          \
633 }                                                                       \
634                                                                         \
635 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src,    \
636                                          ptrdiff_t stride)              \
637 {                                                                       \
638     uint64_t half[8 + 9];                                               \
639     uint8_t * const halfH = ((uint8_t*)half);                           \
640     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
641                                                    stride, 9);          \
642     ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8,       \
643                                         stride, 9);                     \
644     ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH,          \
645                                                    stride, 8);          \
646 }                                                                       \
647                                                                         \
648 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src,    \
649                                          ptrdiff_t stride)              \
650 {                                                                       \
651     uint64_t half[9];                                                   \
652     uint8_t * const halfH = ((uint8_t*)half);                           \
653     ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8,       \
654                                                    stride, 9);          \
655     ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH,          \
656                                                    stride, 8);          \
657 }                                                                       \
658                                                                         \
659 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src,  \
660                                            ptrdiff_t stride)            \
661 {                                                                       \
662     ff_ ## OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);            \
663 }                                                                       \
664                                                                         \
665 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src,   \
666                                           ptrdiff_t stride)             \
667 {                                                                       \
668     uint64_t temp[32];                                                  \
669     uint8_t * const half = (uint8_t*)temp;                              \
670     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16,      \
671                                                     stride, 16);        \
672     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride,        \
673                                          stride, 16);                   \
674 }                                                                       \
675                                                                         \
676 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src,   \
677                                           ptrdiff_t stride)             \
678 {                                                                       \
679     ff_ ## OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src,           \
680                                                     stride, stride, 16);\
681 }                                                                       \
682                                                                         \
683 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src,   \
684                                           ptrdiff_t stride)             \
685 {                                                                       \
686     uint64_t temp[32];                                                  \
687     uint8_t * const half = (uint8_t*)temp;                              \
688     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16,      \
689                                                     stride, 16);        \
690     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src + 1, half,            \
691                                          stride, stride, 16);           \
692 }                                                                       \
693                                                                         \
694 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src,   \
695                                           ptrdiff_t stride)             \
696 {                                                                       \
697     uint64_t temp[32];                                                  \
698     uint8_t * const half = (uint8_t*)temp;                              \
699     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16,      \
700                                                     stride);            \
701     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride,        \
702                                          stride, 16);                   \
703 }                                                                       \
704                                                                         \
705 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src,   \
706                                           ptrdiff_t stride)             \
707 {                                                                       \
708     ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src,           \
709                                                     stride, stride);    \
710 }                                                                       \
711                                                                         \
712 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src,   \
713                                           ptrdiff_t stride)             \
714 {                                                                       \
715     uint64_t temp[32];                                                  \
716     uint8_t * const half = (uint8_t*)temp;                              \
717     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16,      \
718                                                     stride);            \
719     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half,         \
720                                          stride, stride, 16);           \
721 }                                                                       \
722                                                                         \
723 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src,   \
724                                           ptrdiff_t stride)             \
725 {                                                                       \
726     uint64_t half[16 * 2 + 17 * 2];                                     \
727     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
728     uint8_t * const halfHV = ((uint8_t*)half);                          \
729     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
730                                                     stride, 17);        \
731     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16,         \
732                                          stride, 17);                   \
733     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
734                                                     16, 16);            \
735     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV,            \
736                                          stride, 16, 16);               \
737 }                                                                       \
738                                                                         \
739 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src,   \
740                                           ptrdiff_t stride)             \
741 {                                                                       \
742     uint64_t half[16 * 2 + 17 * 2];                                     \
743     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
744     uint8_t * const halfHV = ((uint8_t*)half);                          \
745     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
746                                                     stride, 17);        \
747     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16,     \
748                                          stride, 17);                   \
749     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
750                                                     16, 16);            \
751     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV,            \
752                                          stride, 16, 16);               \
753 }                                                                       \
754                                                                         \
755 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src,   \
756                                           ptrdiff_t stride)             \
757 {                                                                       \
758     uint64_t half[16 * 2 + 17 * 2];                                     \
759     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
760     uint8_t * const halfHV = ((uint8_t*)half);                          \
761     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
762                                                     stride, 17);        \
763     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16,         \
764                                          stride, 17);                   \
765     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
766                                                     16, 16);            \
767     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV,       \
768                                          stride, 16, 16);               \
769 }                                                                       \
770                                                                         \
771 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src,   \
772                                           ptrdiff_t stride)             \
773 {                                                                       \
774     uint64_t half[16 * 2 + 17 * 2];                                     \
775     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
776     uint8_t * const halfHV = ((uint8_t*)half);                          \
777     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
778                                                     stride, 17);        \
779     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16,     \
780                                          stride, 17);                   \
781     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
782                                                     16, 16);            \
783     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV,       \
784                                          stride, 16, 16);               \
785 }                                                                       \
786                                                                         \
787 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src,   \
788                                           ptrdiff_t stride)             \
789 {                                                                       \
790     uint64_t half[16 * 2 + 17 * 2];                                     \
791     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
792     uint8_t * const halfHV = ((uint8_t*)half);                          \
793     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
794                                                     stride, 17);        \
795     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
796                                                     16, 16);            \
797     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV,            \
798                                          stride, 16, 16);               \
799 }                                                                       \
800                                                                         \
801 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src,   \
802                                           ptrdiff_t stride)             \
803 {                                                                       \
804     uint64_t half[16 * 2 + 17 * 2];                                     \
805     uint8_t * const halfH  = ((uint8_t*)half) + 256;                    \
806     uint8_t * const halfHV = ((uint8_t*)half);                          \
807     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
808                                                     stride, 17);        \
809     ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH,      \
810                                                     16, 16);            \
811     ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV,       \
812                                          stride, 16, 16);               \
813 }                                                                       \
814                                                                         \
815 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src,   \
816                                           ptrdiff_t stride)             \
817 {                                                                       \
818     uint64_t half[17 * 2];                                              \
819     uint8_t * const halfH = ((uint8_t*)half);                           \
820     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
821                                                     stride, 17);        \
822     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16,         \
823                                          stride, 17);                   \
824     ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH,         \
825                                                     stride, 16);        \
826 }                                                                       \
827                                                                         \
828 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src,   \
829                                           ptrdiff_t stride)             \
830 {                                                                       \
831     uint64_t half[17 * 2];                                              \
832     uint8_t * const halfH = ((uint8_t*)half);                           \
833     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
834                                                     stride, 17);        \
835     ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16,     \
836                                          stride, 17);                   \
837     ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH,         \
838                                                     stride, 16);        \
839 }                                                                       \
840                                                                         \
841 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src,   \
842                                           ptrdiff_t stride)             \
843 {                                                                       \
844     uint64_t half[17 * 2];                                              \
845     uint8_t * const halfH = ((uint8_t*)half);                           \
846     ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16,     \
847                                                     stride, 17);        \
848     ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH,         \
849                                                     stride, 16);        \
850 }
851
852 QPEL_OP(put_,          ff_pw_16, _,        mmxext)
853 QPEL_OP(avg_,          ff_pw_16, _,        mmxext)
854 QPEL_OP(put_no_rnd_,   ff_pw_15, _no_rnd_, mmxext)
855 #endif /* HAVE_YASM */
856
857
858 #if HAVE_INLINE_ASM
859 static void gmc_mmx(uint8_t *dst, uint8_t *src,
860                     int stride, int h, int ox, int oy,
861                     int dxx, int dxy, int dyx, int dyy,
862                     int shift, int r, int width, int height)
863 {
864     const int w    = 8;
865     const int ix   = ox  >> (16 + shift);
866     const int iy   = oy  >> (16 + shift);
867     const int oxs  = ox  >> 4;
868     const int oys  = oy  >> 4;
869     const int dxxs = dxx >> 4;
870     const int dxys = dxy >> 4;
871     const int dyxs = dyx >> 4;
872     const int dyys = dyy >> 4;
873     const uint16_t r4[4]   = { r, r, r, r };
874     const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys };
875     const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys };
876     const uint64_t shift2 = 2 * shift;
877     int x, y;
878
879     const int dxw = (dxx - (1 << (16 + shift))) * (w - 1);
880     const int dyh = (dyy - (1 << (16 + shift))) * (h - 1);
881     const int dxh = dxy * (h - 1);
882     const int dyw = dyx * (w - 1);
883     if ( // non-constant fullpel offset (3% of blocks)
884         ((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) |
885          (oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift)
886         // uses more than 16 bits of subpel mv (only at huge resolution)
887         || (dxx | dxy | dyx | dyy) & 15 ||
888         (unsigned)ix >= width  - w ||
889         (unsigned)iy >= height - h) {
890         // FIXME could still use mmx for some of the rows
891         ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy,
892                  shift, r, width, height);
893         return;
894     }
895
896     src += ix + iy * stride;
897
898     __asm__ volatile (
899         "movd         %0, %%mm6         \n\t"
900         "pxor      %%mm7, %%mm7         \n\t"
901         "punpcklwd %%mm6, %%mm6         \n\t"
902         "punpcklwd %%mm6, %%mm6         \n\t"
903         :: "r"(1<<shift)
904     );
905
906     for (x = 0; x < w; x += 4) {
907         uint16_t dx4[4] = { oxs - dxys + dxxs * (x + 0),
908                             oxs - dxys + dxxs * (x + 1),
909                             oxs - dxys + dxxs * (x + 2),
910                             oxs - dxys + dxxs * (x + 3) };
911         uint16_t dy4[4] = { oys - dyys + dyxs * (x + 0),
912                             oys - dyys + dyxs * (x + 1),
913                             oys - dyys + dyxs * (x + 2),
914                             oys - dyys + dyxs * (x + 3) };
915
916         for (y = 0; y < h; y++) {
917             __asm__ volatile (
918                 "movq      %0, %%mm4    \n\t"
919                 "movq      %1, %%mm5    \n\t"
920                 "paddw     %2, %%mm4    \n\t"
921                 "paddw     %3, %%mm5    \n\t"
922                 "movq   %%mm4, %0       \n\t"
923                 "movq   %%mm5, %1       \n\t"
924                 "psrlw    $12, %%mm4    \n\t"
925                 "psrlw    $12, %%mm5    \n\t"
926                 : "+m"(*dx4), "+m"(*dy4)
927                 : "m"(*dxy4), "m"(*dyy4)
928             );
929
930             __asm__ volatile (
931                 "movq      %%mm6, %%mm2 \n\t"
932                 "movq      %%mm6, %%mm1 \n\t"
933                 "psubw     %%mm4, %%mm2 \n\t"
934                 "psubw     %%mm5, %%mm1 \n\t"
935                 "movq      %%mm2, %%mm0 \n\t"
936                 "movq      %%mm4, %%mm3 \n\t"
937                 "pmullw    %%mm1, %%mm0 \n\t" // (s - dx) * (s - dy)
938                 "pmullw    %%mm5, %%mm3 \n\t" // dx * dy
939                 "pmullw    %%mm5, %%mm2 \n\t" // (s - dx) * dy
940                 "pmullw    %%mm4, %%mm1 \n\t" // dx * (s - dy)
941
942                 "movd         %4, %%mm5 \n\t"
943                 "movd         %3, %%mm4 \n\t"
944                 "punpcklbw %%mm7, %%mm5 \n\t"
945                 "punpcklbw %%mm7, %%mm4 \n\t"
946                 "pmullw    %%mm5, %%mm3 \n\t" // src[1, 1] * dx * dy
947                 "pmullw    %%mm4, %%mm2 \n\t" // src[0, 1] * (s - dx) * dy
948
949                 "movd         %2, %%mm5 \n\t"
950                 "movd         %1, %%mm4 \n\t"
951                 "punpcklbw %%mm7, %%mm5 \n\t"
952                 "punpcklbw %%mm7, %%mm4 \n\t"
953                 "pmullw    %%mm5, %%mm1 \n\t" // src[1, 0] * dx * (s - dy)
954                 "pmullw    %%mm4, %%mm0 \n\t" // src[0, 0] * (s - dx) * (s - dy)
955                 "paddw        %5, %%mm1 \n\t"
956                 "paddw     %%mm3, %%mm2 \n\t"
957                 "paddw     %%mm1, %%mm0 \n\t"
958                 "paddw     %%mm2, %%mm0 \n\t"
959
960                 "psrlw        %6, %%mm0 \n\t"
961                 "packuswb  %%mm0, %%mm0 \n\t"
962                 "movd      %%mm0, %0    \n\t"
963
964                 : "=m"(dst[x + y * stride])
965                 : "m"(src[0]), "m"(src[1]),
966                   "m"(src[stride]), "m"(src[stride + 1]),
967                   "m"(*r4), "m"(shift2)
968             );
969             src += stride;
970         }
971         src += 4 - h * stride;
972     }
973 }
974
975 static void vector_clipf_sse(float *dst, const float *src,
976                              float min, float max, int len)
977 {
978     x86_reg i = (len - 16) * 4;
979     __asm__ volatile (
980         "movss          %3, %%xmm4      \n\t"
981         "movss          %4, %%xmm5      \n\t"
982         "shufps $0, %%xmm4, %%xmm4      \n\t"
983         "shufps $0, %%xmm5, %%xmm5      \n\t"
984         "1:                             \n\t"
985         "movaps   (%2, %0), %%xmm0      \n\t" // 3/1 on intel
986         "movaps 16(%2, %0), %%xmm1      \n\t"
987         "movaps 32(%2, %0), %%xmm2      \n\t"
988         "movaps 48(%2, %0), %%xmm3      \n\t"
989         "maxps      %%xmm4, %%xmm0      \n\t"
990         "maxps      %%xmm4, %%xmm1      \n\t"
991         "maxps      %%xmm4, %%xmm2      \n\t"
992         "maxps      %%xmm4, %%xmm3      \n\t"
993         "minps      %%xmm5, %%xmm0      \n\t"
994         "minps      %%xmm5, %%xmm1      \n\t"
995         "minps      %%xmm5, %%xmm2      \n\t"
996         "minps      %%xmm5, %%xmm3      \n\t"
997         "movaps     %%xmm0,   (%1, %0)  \n\t"
998         "movaps     %%xmm1, 16(%1, %0)  \n\t"
999         "movaps     %%xmm2, 32(%1, %0)  \n\t"
1000         "movaps     %%xmm3, 48(%1, %0)  \n\t"
1001         "sub           $64, %0          \n\t"
1002         "jge            1b              \n\t"
1003         : "+&r"(i)
1004         : "r"(dst), "r"(src), "m"(min), "m"(max)
1005         : "memory"
1006     );
1007 }
1008
1009 #endif /* HAVE_INLINE_ASM */
1010
1011 void ff_h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale);
1012 void ff_h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale);
1013
1014 int32_t ff_scalarproduct_int16_mmxext(const int16_t *v1, const int16_t *v2,
1015                                       int order);
1016 int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2,
1017                                     int order);
1018 int32_t ff_scalarproduct_and_madd_int16_mmxext(int16_t *v1, const int16_t *v2,
1019                                                const int16_t *v3,
1020                                                int order, int mul);
1021 int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2,
1022                                              const int16_t *v3,
1023                                              int order, int mul);
1024 int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2,
1025                                               const int16_t *v3,
1026                                               int order, int mul);
1027
1028 void ff_apply_window_int16_round_mmxext(int16_t *output, const int16_t *input,
1029                                         const int16_t *window, unsigned int len);
1030 void ff_apply_window_int16_round_sse2(int16_t *output, const int16_t *input,
1031                                       const int16_t *window, unsigned int len);
1032 void ff_apply_window_int16_mmxext(int16_t *output, const int16_t *input,
1033                                   const int16_t *window, unsigned int len);
1034 void ff_apply_window_int16_sse2(int16_t *output, const int16_t *input,
1035                                 const int16_t *window, unsigned int len);
1036 void ff_apply_window_int16_ssse3(int16_t *output, const int16_t *input,
1037                                  const int16_t *window, unsigned int len);
1038 void ff_apply_window_int16_ssse3_atom(int16_t *output, const int16_t *input,
1039                                       const int16_t *window, unsigned int len);
1040
1041 void ff_bswap32_buf_ssse3(uint32_t *dst, const uint32_t *src, int w);
1042 void ff_bswap32_buf_sse2(uint32_t *dst, const uint32_t *src, int w);
1043
1044 void ff_add_hfyu_median_prediction_mmxext(uint8_t *dst, const uint8_t *top,
1045                                           const uint8_t *diff, int w,
1046                                           int *left, int *left_top);
1047 int  ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src,
1048                                        int w, int left);
1049 int  ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src,
1050                                       int w, int left);
1051
1052 void ff_vector_clip_int32_mmx     (int32_t *dst, const int32_t *src,
1053                                    int32_t min, int32_t max, unsigned int len);
1054 void ff_vector_clip_int32_sse2    (int32_t *dst, const int32_t *src,
1055                                    int32_t min, int32_t max, unsigned int len);
1056 void ff_vector_clip_int32_int_sse2(int32_t *dst, const int32_t *src,
1057                                    int32_t min, int32_t max, unsigned int len);
1058 void ff_vector_clip_int32_sse4    (int32_t *dst, const int32_t *src,
1059                                    int32_t min, int32_t max, unsigned int len);
1060
1061 #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX)                          \
1062     do {                                                                     \
1063     c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \
1064     c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \
1065     c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \
1066     c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \
1067     c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \
1068     c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \
1069     c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \
1070     c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \
1071     c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \
1072     c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \
1073     c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \
1074     c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \
1075     c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \
1076     c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \
1077     c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \
1078     c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU; \
1079     } while (0)
1080
1081 static av_cold void dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx,
1082                                      int mm_flags)
1083 {
1084 #if HAVE_MMX_INLINE
1085     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1086
1087     c->put_pixels_clamped        = ff_put_pixels_clamped_mmx;
1088     c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx;
1089     c->add_pixels_clamped        = ff_add_pixels_clamped_mmx;
1090
1091     if (!high_bit_depth) {
1092         c->clear_block  = clear_block_mmx;
1093         c->clear_blocks = clear_blocks_mmx;
1094         c->draw_edges   = draw_edges_mmx;
1095
1096         switch (avctx->idct_algo) {
1097         case FF_IDCT_AUTO:
1098         case FF_IDCT_SIMPLEMMX:
1099             c->idct_put              = ff_simple_idct_put_mmx;
1100             c->idct_add              = ff_simple_idct_add_mmx;
1101             c->idct                  = ff_simple_idct_mmx;
1102             c->idct_permutation_type = FF_SIMPLE_IDCT_PERM;
1103             break;
1104         case FF_IDCT_XVIDMMX:
1105             c->idct_put              = ff_idct_xvid_mmx_put;
1106             c->idct_add              = ff_idct_xvid_mmx_add;
1107             c->idct                  = ff_idct_xvid_mmx;
1108             break;
1109         }
1110     }
1111
1112     c->gmc = gmc_mmx;
1113
1114     c->add_bytes = add_bytes_mmx;
1115 #endif /* HAVE_MMX_INLINE */
1116
1117 #if HAVE_MMX_EXTERNAL
1118     if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
1119         c->h263_v_loop_filter = ff_h263_v_loop_filter_mmx;
1120         c->h263_h_loop_filter = ff_h263_h_loop_filter_mmx;
1121     }
1122
1123     c->vector_clip_int32 = ff_vector_clip_int32_mmx;
1124 #endif /* HAVE_MMX_EXTERNAL */
1125 }
1126
1127 static av_cold void dsputil_init_mmxext(DSPContext *c, AVCodecContext *avctx,
1128                                         int mm_flags)
1129 {
1130 #if HAVE_MMXEXT_INLINE
1131     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1132
1133     if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX) {
1134         c->idct_put = ff_idct_xvid_mmxext_put;
1135         c->idct_add = ff_idct_xvid_mmxext_add;
1136         c->idct     = ff_idct_xvid_mmxext;
1137     }
1138 #endif /* HAVE_MMXEXT_INLINE */
1139
1140 #if HAVE_MMXEXT_EXTERNAL
1141     SET_QPEL_FUNCS(avg_qpel,        0, 16, mmxext, );
1142     SET_QPEL_FUNCS(avg_qpel,        1,  8, mmxext, );
1143
1144     SET_QPEL_FUNCS(put_qpel,        0, 16, mmxext, );
1145     SET_QPEL_FUNCS(put_qpel,        1,  8, mmxext, );
1146     SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmxext, );
1147     SET_QPEL_FUNCS(put_no_rnd_qpel, 1,  8, mmxext, );
1148
1149     /* slower than cmov version on AMD */
1150     if (!(mm_flags & AV_CPU_FLAG_3DNOW))
1151         c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmxext;
1152
1153     c->scalarproduct_int16          = ff_scalarproduct_int16_mmxext;
1154     c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmxext;
1155
1156     if (avctx->flags & CODEC_FLAG_BITEXACT) {
1157         c->apply_window_int16 = ff_apply_window_int16_mmxext;
1158     } else {
1159         c->apply_window_int16 = ff_apply_window_int16_round_mmxext;
1160     }
1161 #endif /* HAVE_MMXEXT_EXTERNAL */
1162 }
1163
1164 static av_cold void dsputil_init_sse(DSPContext *c, AVCodecContext *avctx,
1165                                      int mm_flags)
1166 {
1167 #if HAVE_SSE_INLINE
1168     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1169
1170     if (!high_bit_depth) {
1171         if (!(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)) {
1172             /* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
1173             c->clear_block  = clear_block_sse;
1174             c->clear_blocks = clear_blocks_sse;
1175         }
1176     }
1177
1178     c->vector_clipf = vector_clipf_sse;
1179 #endif /* HAVE_SSE_INLINE */
1180 }
1181
1182 static av_cold void dsputil_init_sse2(DSPContext *c, AVCodecContext *avctx,
1183                                       int mm_flags)
1184 {
1185 #if HAVE_SSE2_INLINE
1186     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1187
1188     if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX) {
1189         c->idct_put              = ff_idct_xvid_sse2_put;
1190         c->idct_add              = ff_idct_xvid_sse2_add;
1191         c->idct                  = ff_idct_xvid_sse2;
1192         c->idct_permutation_type = FF_SSE2_IDCT_PERM;
1193     }
1194 #endif /* HAVE_SSE2_INLINE */
1195
1196 #if HAVE_SSE2_EXTERNAL
1197     c->scalarproduct_int16          = ff_scalarproduct_int16_sse2;
1198     c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
1199     if (mm_flags & AV_CPU_FLAG_ATOM) {
1200         c->vector_clip_int32 = ff_vector_clip_int32_int_sse2;
1201     } else {
1202         c->vector_clip_int32 = ff_vector_clip_int32_sse2;
1203     }
1204     if (avctx->flags & CODEC_FLAG_BITEXACT) {
1205         c->apply_window_int16 = ff_apply_window_int16_sse2;
1206     } else if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
1207         c->apply_window_int16 = ff_apply_window_int16_round_sse2;
1208     }
1209     c->bswap_buf = ff_bswap32_buf_sse2;
1210 #endif /* HAVE_SSE2_EXTERNAL */
1211 }
1212
1213 static av_cold void dsputil_init_ssse3(DSPContext *c, AVCodecContext *avctx,
1214                                        int mm_flags)
1215 {
1216 #if HAVE_SSSE3_EXTERNAL
1217     c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
1218     if (mm_flags & AV_CPU_FLAG_SSE4) // not really sse4, just slow on Conroe
1219         c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
1220
1221     if (mm_flags & AV_CPU_FLAG_ATOM)
1222         c->apply_window_int16 = ff_apply_window_int16_ssse3_atom;
1223     else
1224         c->apply_window_int16 = ff_apply_window_int16_ssse3;
1225     if (!(mm_flags & (AV_CPU_FLAG_SSE42|AV_CPU_FLAG_3DNOW))) // cachesplit
1226         c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
1227     c->bswap_buf = ff_bswap32_buf_ssse3;
1228 #endif /* HAVE_SSSE3_EXTERNAL */
1229 }
1230
1231 static av_cold void dsputil_init_sse4(DSPContext *c, AVCodecContext *avctx,
1232                                       int mm_flags)
1233 {
1234 #if HAVE_SSE4_EXTERNAL
1235     c->vector_clip_int32 = ff_vector_clip_int32_sse4;
1236 #endif /* HAVE_SSE4_EXTERNAL */
1237 }
1238
1239 av_cold void ff_dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx)
1240 {
1241     int mm_flags = av_get_cpu_flags();
1242
1243 #if HAVE_7REGS && HAVE_INLINE_ASM
1244     if (mm_flags & AV_CPU_FLAG_CMOV)
1245         c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
1246 #endif
1247
1248     if (mm_flags & AV_CPU_FLAG_MMX)
1249         dsputil_init_mmx(c, avctx, mm_flags);
1250
1251     if (mm_flags & AV_CPU_FLAG_MMXEXT)
1252         dsputil_init_mmxext(c, avctx, mm_flags);
1253
1254     if (mm_flags & AV_CPU_FLAG_SSE)
1255         dsputil_init_sse(c, avctx, mm_flags);
1256
1257     if (mm_flags & AV_CPU_FLAG_SSE2)
1258         dsputil_init_sse2(c, avctx, mm_flags);
1259
1260     if (mm_flags & AV_CPU_FLAG_SSSE3)
1261         dsputil_init_ssse3(c, avctx, mm_flags);
1262
1263     if (mm_flags & AV_CPU_FLAG_SSE4)
1264         dsputil_init_sse4(c, avctx, mm_flags);
1265
1266     if (CONFIG_ENCODERS)
1267         ff_dsputilenc_init_mmx(c, avctx);
1268 }