x86/me_cmp: port mmxext and sse2 sad functions to yasm
[ffmpeg.git] / libavcodec / x86 / me_cmp_init.c
1 /*
2  * SIMD-optimized motion estimation
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24
25 #include "libavutil/attributes.h"
26 #include "libavutil/cpu.h"
27 #include "libavutil/x86/asm.h"
28 #include "libavutil/x86/cpu.h"
29 #include "libavcodec/me_cmp.h"
30 #include "libavcodec/mpegvideo.h"
31
32 int ff_sum_abs_dctelem_mmx(int16_t *block);
33 int ff_sum_abs_dctelem_mmxext(int16_t *block);
34 int ff_sum_abs_dctelem_sse2(int16_t *block);
35 int ff_sum_abs_dctelem_ssse3(int16_t *block);
36 int ff_sse8_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
37                 int line_size, int h);
38 int ff_sse16_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
39                  int line_size, int h);
40 int ff_sse16_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
41                   int line_size, int h);
42 int ff_hf_noise8_mmx(uint8_t *pix1, int lsize, int h);
43 int ff_hf_noise16_mmx(uint8_t *pix1, int lsize, int h);
44 int ff_sad8_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
45                    int stride, int h);
46 int ff_sad16_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
47                     int stride, int h);
48 int ff_sad16_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
49                   int stride, int h);
50 int ff_sad8_x2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
51                       int stride, int h);
52 int ff_sad16_x2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
53                        int stride, int h);
54 int ff_sad16_x2_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
55                      int stride, int h);
56 int ff_sad8_y2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
57                       int stride, int h);
58 int ff_sad16_y2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
59                        int stride, int h);
60 int ff_sad16_y2_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
61                      int stride, int h);
62 int ff_sad8_approx_xy2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
63                               int stride, int h);
64 int ff_sad16_approx_xy2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
65                                int stride, int h);
66 int ff_sad16_approx_xy2_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
67                              int stride, int h);
68
69 #define hadamard_func(cpu)                                              \
70     int ff_hadamard8_diff_ ## cpu(MpegEncContext *s, uint8_t *src1,     \
71                                   uint8_t *src2, int stride, int h);    \
72     int ff_hadamard8_diff16_ ## cpu(MpegEncContext *s, uint8_t *src1,   \
73                                     uint8_t *src2, int stride, int h);
74
75 hadamard_func(mmx)
76 hadamard_func(mmxext)
77 hadamard_func(sse2)
78 hadamard_func(ssse3)
79
80 #if HAVE_YASM
81 static int nsse16_mmx(MpegEncContext *c, uint8_t *pix1, uint8_t *pix2,
82                       int line_size, int h)
83 {
84     int score1, score2;
85
86     if (c)
87         score1 = c->mecc.sse[0](c, pix1, pix2, line_size, h);
88     else
89         score1 = ff_sse16_mmx(c, pix1, pix2, line_size, h);
90     score2 = ff_hf_noise16_mmx(pix1, line_size, h) + ff_hf_noise8_mmx(pix1+8, line_size, h)
91            - ff_hf_noise16_mmx(pix2, line_size, h) - ff_hf_noise8_mmx(pix2+8, line_size, h);
92
93     if (c)
94         return score1 + FFABS(score2) * c->avctx->nsse_weight;
95     else
96         return score1 + FFABS(score2) * 8;
97 }
98
99 static int nsse8_mmx(MpegEncContext *c, uint8_t *pix1, uint8_t *pix2,
100                      int line_size, int h)
101 {
102     int score1 = ff_sse8_mmx(c, pix1, pix2, line_size, h);
103     int score2 = ff_hf_noise8_mmx(pix1, line_size, h) -
104                  ff_hf_noise8_mmx(pix2, line_size, h);
105
106     if (c)
107         return score1 + FFABS(score2) * c->avctx->nsse_weight;
108     else
109         return score1 + FFABS(score2) * 8;
110 }
111
112 #endif /* HAVE_YASM */
113
114 #if HAVE_INLINE_ASM
115
116 static int vsad_intra16_mmx(MpegEncContext *v, uint8_t *pix, uint8_t *dummy,
117                             int line_size, int h)
118 {
119     int tmp;
120
121     av_assert2((((int) pix) & 7) == 0);
122     av_assert2((line_size & 7) == 0);
123
124 #define SUM(in0, in1, out0, out1)               \
125     "movq (%0), %%mm2\n"                        \
126     "movq 8(%0), %%mm3\n"                       \
127     "add %2,%0\n"                               \
128     "movq %%mm2, " #out0 "\n"                   \
129     "movq %%mm3, " #out1 "\n"                   \
130     "psubusb " #in0 ", %%mm2\n"                 \
131     "psubusb " #in1 ", %%mm3\n"                 \
132     "psubusb " #out0 ", " #in0 "\n"             \
133     "psubusb " #out1 ", " #in1 "\n"             \
134     "por %%mm2, " #in0 "\n"                     \
135     "por %%mm3, " #in1 "\n"                     \
136     "movq " #in0 ", %%mm2\n"                    \
137     "movq " #in1 ", %%mm3\n"                    \
138     "punpcklbw %%mm7, " #in0 "\n"               \
139     "punpcklbw %%mm7, " #in1 "\n"               \
140     "punpckhbw %%mm7, %%mm2\n"                  \
141     "punpckhbw %%mm7, %%mm3\n"                  \
142     "paddw " #in1 ", " #in0 "\n"                \
143     "paddw %%mm3, %%mm2\n"                      \
144     "paddw %%mm2, " #in0 "\n"                   \
145     "paddw " #in0 ", %%mm6\n"
146
147
148     __asm__ volatile (
149         "movl    %3, %%ecx\n"
150         "pxor %%mm6, %%mm6\n"
151         "pxor %%mm7, %%mm7\n"
152         "movq  (%0), %%mm0\n"
153         "movq 8(%0), %%mm1\n"
154         "add %2, %0\n"
155         "jmp 2f\n"
156         "1:\n"
157
158         SUM(%%mm4, %%mm5, %%mm0, %%mm1)
159         "2:\n"
160         SUM(%%mm0, %%mm1, %%mm4, %%mm5)
161
162         "subl $2, %%ecx\n"
163         "jnz 1b\n"
164
165         "movq  %%mm6, %%mm0\n"
166         "psrlq $32,   %%mm6\n"
167         "paddw %%mm6, %%mm0\n"
168         "movq  %%mm0, %%mm6\n"
169         "psrlq $16,   %%mm0\n"
170         "paddw %%mm6, %%mm0\n"
171         "movd  %%mm0, %1\n"
172         : "+r" (pix), "=r" (tmp)
173         : "r" ((x86_reg) line_size), "m" (h)
174         : "%ecx");
175
176     return tmp & 0xFFFF;
177 }
178 #undef SUM
179
180 static int vsad_intra16_mmxext(MpegEncContext *v, uint8_t *pix, uint8_t *dummy,
181                                int line_size, int h)
182 {
183     int tmp;
184
185     av_assert2((((int) pix) & 7) == 0);
186     av_assert2((line_size & 7) == 0);
187
188 #define SUM(in0, in1, out0, out1)               \
189     "movq (%0), " #out0 "\n"                    \
190     "movq 8(%0), " #out1 "\n"                   \
191     "add %2, %0\n"                              \
192     "psadbw " #out0 ", " #in0 "\n"              \
193     "psadbw " #out1 ", " #in1 "\n"              \
194     "paddw " #in1 ", " #in0 "\n"                \
195     "paddw " #in0 ", %%mm6\n"
196
197     __asm__ volatile (
198         "movl %3, %%ecx\n"
199         "pxor %%mm6, %%mm6\n"
200         "pxor %%mm7, %%mm7\n"
201         "movq (%0), %%mm0\n"
202         "movq 8(%0), %%mm1\n"
203         "add %2, %0\n"
204         "jmp 2f\n"
205         "1:\n"
206
207         SUM(%%mm4, %%mm5, %%mm0, %%mm1)
208         "2:\n"
209         SUM(%%mm0, %%mm1, %%mm4, %%mm5)
210
211         "subl $2, %%ecx\n"
212         "jnz 1b\n"
213
214         "movd %%mm6, %1\n"
215         : "+r" (pix), "=r" (tmp)
216         : "r" ((x86_reg) line_size), "m" (h)
217         : "%ecx");
218
219     return tmp;
220 }
221 #undef SUM
222
223 static int vsad16_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
224                       int line_size, int h)
225 {
226     int tmp;
227
228     av_assert2((((int) pix1) & 7) == 0);
229     av_assert2((((int) pix2) & 7) == 0);
230     av_assert2((line_size & 7) == 0);
231
232 #define SUM(in0, in1, out0, out1)       \
233     "movq (%0), %%mm2\n"                \
234     "movq (%1), " #out0 "\n"            \
235     "movq 8(%0), %%mm3\n"               \
236     "movq 8(%1), " #out1 "\n"           \
237     "add %3, %0\n"                      \
238     "add %3, %1\n"                      \
239     "psubb " #out0 ", %%mm2\n"          \
240     "psubb " #out1 ", %%mm3\n"          \
241     "pxor %%mm7, %%mm2\n"               \
242     "pxor %%mm7, %%mm3\n"               \
243     "movq %%mm2, " #out0 "\n"           \
244     "movq %%mm3, " #out1 "\n"           \
245     "psubusb " #in0 ", %%mm2\n"         \
246     "psubusb " #in1 ", %%mm3\n"         \
247     "psubusb " #out0 ", " #in0 "\n"     \
248     "psubusb " #out1 ", " #in1 "\n"     \
249     "por %%mm2, " #in0 "\n"             \
250     "por %%mm3, " #in1 "\n"             \
251     "movq " #in0 ", %%mm2\n"            \
252     "movq " #in1 ", %%mm3\n"            \
253     "punpcklbw %%mm7, " #in0 "\n"       \
254     "punpcklbw %%mm7, " #in1 "\n"       \
255     "punpckhbw %%mm7, %%mm2\n"          \
256     "punpckhbw %%mm7, %%mm3\n"          \
257     "paddw " #in1 ", " #in0 "\n"        \
258     "paddw %%mm3, %%mm2\n"              \
259     "paddw %%mm2, " #in0 "\n"           \
260     "paddw " #in0 ", %%mm6\n"
261
262
263     __asm__ volatile (
264         "movl %4, %%ecx\n"
265         "pxor %%mm6, %%mm6\n"
266         "pcmpeqw %%mm7, %%mm7\n"
267         "psllw $15, %%mm7\n"
268         "packsswb %%mm7, %%mm7\n"
269         "movq (%0), %%mm0\n"
270         "movq (%1), %%mm2\n"
271         "movq 8(%0), %%mm1\n"
272         "movq 8(%1), %%mm3\n"
273         "add %3, %0\n"
274         "add %3, %1\n"
275         "psubb %%mm2, %%mm0\n"
276         "psubb %%mm3, %%mm1\n"
277         "pxor %%mm7, %%mm0\n"
278         "pxor %%mm7, %%mm1\n"
279         "jmp 2f\n"
280         "1:\n"
281
282         SUM(%%mm4, %%mm5, %%mm0, %%mm1)
283         "2:\n"
284         SUM(%%mm0, %%mm1, %%mm4, %%mm5)
285
286         "subl $2, %%ecx\n"
287         "jnz 1b\n"
288
289         "movq %%mm6, %%mm0\n"
290         "psrlq $32, %%mm6\n"
291         "paddw %%mm6, %%mm0\n"
292         "movq %%mm0, %%mm6\n"
293         "psrlq $16, %%mm0\n"
294         "paddw %%mm6, %%mm0\n"
295         "movd %%mm0, %2\n"
296         : "+r" (pix1), "+r" (pix2), "=r" (tmp)
297         : "r" ((x86_reg) line_size), "m" (h)
298         : "%ecx");
299
300     return tmp & 0x7FFF;
301 }
302 #undef SUM
303
304 static int vsad16_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
305                          int line_size, int h)
306 {
307     int tmp;
308
309     av_assert2((((int) pix1) & 7) == 0);
310     av_assert2((((int) pix2) & 7) == 0);
311     av_assert2((line_size & 7) == 0);
312
313 #define SUM(in0, in1, out0, out1)               \
314     "movq (%0), " #out0 "\n"                    \
315     "movq (%1), %%mm2\n"                        \
316     "movq 8(%0), " #out1 "\n"                   \
317     "movq 8(%1), %%mm3\n"                       \
318     "add %3, %0\n"                              \
319     "add %3, %1\n"                              \
320     "psubb %%mm2, " #out0 "\n"                  \
321     "psubb %%mm3, " #out1 "\n"                  \
322     "pxor %%mm7, " #out0 "\n"                   \
323     "pxor %%mm7, " #out1 "\n"                   \
324     "psadbw " #out0 ", " #in0 "\n"              \
325     "psadbw " #out1 ", " #in1 "\n"              \
326     "paddw " #in1 ", " #in0 "\n"                \
327     "paddw " #in0 ", %%mm6\n    "
328
329     __asm__ volatile (
330         "movl %4, %%ecx\n"
331         "pxor %%mm6, %%mm6\n"
332         "pcmpeqw %%mm7, %%mm7\n"
333         "psllw $15, %%mm7\n"
334         "packsswb %%mm7, %%mm7\n"
335         "movq (%0), %%mm0\n"
336         "movq (%1), %%mm2\n"
337         "movq 8(%0), %%mm1\n"
338         "movq 8(%1), %%mm3\n"
339         "add %3, %0\n"
340         "add %3, %1\n"
341         "psubb %%mm2, %%mm0\n"
342         "psubb %%mm3, %%mm1\n"
343         "pxor %%mm7, %%mm0\n"
344         "pxor %%mm7, %%mm1\n"
345         "jmp 2f\n"
346         "1:\n"
347
348         SUM(%%mm4, %%mm5, %%mm0, %%mm1)
349         "2:\n"
350         SUM(%%mm0, %%mm1, %%mm4, %%mm5)
351
352         "subl $2, %%ecx\n"
353         "jnz 1b\n"
354
355         "movd %%mm6, %2\n"
356         : "+r" (pix1), "+r" (pix2), "=r" (tmp)
357         : "r" ((x86_reg) line_size), "m" (h)
358         : "%ecx");
359
360     return tmp;
361 }
362 #undef SUM
363
364
365
366 DECLARE_ASM_CONST(8, uint64_t, round_tab)[3] = {
367     0x0000000000000000ULL,
368     0x0001000100010001ULL,
369     0x0002000200020002ULL,
370 };
371
372 static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
373 {
374     x86_reg len = -(x86_reg)stride * h;
375     __asm__ volatile (
376         ".p2align 4                     \n\t"
377         "1:                             \n\t"
378         "movq (%1, %%"REG_a"), %%mm0    \n\t"
379         "movq (%2, %%"REG_a"), %%mm2    \n\t"
380         "movq (%2, %%"REG_a"), %%mm4    \n\t"
381         "add %3, %%"REG_a"              \n\t"
382         "psubusb %%mm0, %%mm2           \n\t"
383         "psubusb %%mm4, %%mm0           \n\t"
384         "movq (%1, %%"REG_a"), %%mm1    \n\t"
385         "movq (%2, %%"REG_a"), %%mm3    \n\t"
386         "movq (%2, %%"REG_a"), %%mm5    \n\t"
387         "psubusb %%mm1, %%mm3           \n\t"
388         "psubusb %%mm5, %%mm1           \n\t"
389         "por %%mm2, %%mm0               \n\t"
390         "por %%mm1, %%mm3               \n\t"
391         "movq %%mm0, %%mm1              \n\t"
392         "movq %%mm3, %%mm2              \n\t"
393         "punpcklbw %%mm7, %%mm0         \n\t"
394         "punpckhbw %%mm7, %%mm1         \n\t"
395         "punpcklbw %%mm7, %%mm3         \n\t"
396         "punpckhbw %%mm7, %%mm2         \n\t"
397         "paddw %%mm1, %%mm0             \n\t"
398         "paddw %%mm3, %%mm2             \n\t"
399         "paddw %%mm2, %%mm0             \n\t"
400         "paddw %%mm0, %%mm6             \n\t"
401         "add %3, %%"REG_a"              \n\t"
402         " js 1b                         \n\t"
403         : "+a" (len)
404         : "r" (blk1 - len), "r" (blk2 - len), "r" ((x86_reg) stride));
405 }
406
407 static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2,
408                               int stride, int h)
409 {
410     x86_reg len = -(x86_reg)stride * h;
411     __asm__ volatile (
412         ".p2align 4                     \n\t"
413         "1:                             \n\t"
414         "movq (%1, %%"REG_a"), %%mm0    \n\t"
415         "movq (%2, %%"REG_a"), %%mm1    \n\t"
416         "movq (%1, %%"REG_a"), %%mm2    \n\t"
417         "movq (%2, %%"REG_a"), %%mm3    \n\t"
418         "punpcklbw %%mm7, %%mm0         \n\t"
419         "punpcklbw %%mm7, %%mm1         \n\t"
420         "punpckhbw %%mm7, %%mm2         \n\t"
421         "punpckhbw %%mm7, %%mm3         \n\t"
422         "paddw %%mm0, %%mm1             \n\t"
423         "paddw %%mm2, %%mm3             \n\t"
424         "movq (%3, %%"REG_a"), %%mm4    \n\t"
425         "movq (%3, %%"REG_a"), %%mm2    \n\t"
426         "paddw %%mm5, %%mm1             \n\t"
427         "paddw %%mm5, %%mm3             \n\t"
428         "psrlw $1, %%mm1                \n\t"
429         "psrlw $1, %%mm3                \n\t"
430         "packuswb %%mm3, %%mm1          \n\t"
431         "psubusb %%mm1, %%mm4           \n\t"
432         "psubusb %%mm2, %%mm1           \n\t"
433         "por %%mm4, %%mm1               \n\t"
434         "movq %%mm1, %%mm0              \n\t"
435         "punpcklbw %%mm7, %%mm0         \n\t"
436         "punpckhbw %%mm7, %%mm1         \n\t"
437         "paddw %%mm1, %%mm0             \n\t"
438         "paddw %%mm0, %%mm6             \n\t"
439         "add %4, %%"REG_a"              \n\t"
440         " js 1b                         \n\t"
441         : "+a" (len)
442         : "r" (blk1a - len), "r" (blk1b - len), "r" (blk2 - len),
443           "r" ((x86_reg) stride));
444 }
445
446 static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
447 {
448     x86_reg len = -(x86_reg)stride * h;
449     __asm__ volatile (
450         "movq  (%1, %%"REG_a"), %%mm0   \n\t"
451         "movq 1(%1, %%"REG_a"), %%mm2   \n\t"
452         "movq %%mm0, %%mm1              \n\t"
453         "movq %%mm2, %%mm3              \n\t"
454         "punpcklbw %%mm7, %%mm0         \n\t"
455         "punpckhbw %%mm7, %%mm1         \n\t"
456         "punpcklbw %%mm7, %%mm2         \n\t"
457         "punpckhbw %%mm7, %%mm3         \n\t"
458         "paddw %%mm2, %%mm0             \n\t"
459         "paddw %%mm3, %%mm1             \n\t"
460         ".p2align 4                     \n\t"
461         "1:                             \n\t"
462         "movq  (%2, %%"REG_a"), %%mm2   \n\t"
463         "movq 1(%2, %%"REG_a"), %%mm4   \n\t"
464         "movq %%mm2, %%mm3              \n\t"
465         "movq %%mm4, %%mm5              \n\t"
466         "punpcklbw %%mm7, %%mm2         \n\t"
467         "punpckhbw %%mm7, %%mm3         \n\t"
468         "punpcklbw %%mm7, %%mm4         \n\t"
469         "punpckhbw %%mm7, %%mm5         \n\t"
470         "paddw %%mm4, %%mm2             \n\t"
471         "paddw %%mm5, %%mm3             \n\t"
472         "movq %5, %%mm5                 \n\t"
473         "paddw %%mm2, %%mm0             \n\t"
474         "paddw %%mm3, %%mm1             \n\t"
475         "paddw %%mm5, %%mm0             \n\t"
476         "paddw %%mm5, %%mm1             \n\t"
477         "movq (%3, %%"REG_a"), %%mm4    \n\t"
478         "movq (%3, %%"REG_a"), %%mm5    \n\t"
479         "psrlw $2, %%mm0                \n\t"
480         "psrlw $2, %%mm1                \n\t"
481         "packuswb %%mm1, %%mm0          \n\t"
482         "psubusb %%mm0, %%mm4           \n\t"
483         "psubusb %%mm5, %%mm0           \n\t"
484         "por %%mm4, %%mm0               \n\t"
485         "movq %%mm0, %%mm4              \n\t"
486         "punpcklbw %%mm7, %%mm0         \n\t"
487         "punpckhbw %%mm7, %%mm4         \n\t"
488         "paddw %%mm0, %%mm6             \n\t"
489         "paddw %%mm4, %%mm6             \n\t"
490         "movq  %%mm2, %%mm0             \n\t"
491         "movq  %%mm3, %%mm1             \n\t"
492         "add %4, %%"REG_a"              \n\t"
493         " js 1b                         \n\t"
494         : "+a" (len)
495         : "r" (blk1 - len), "r" (blk1 - len + stride), "r" (blk2 - len),
496           "r" ((x86_reg) stride), "m" (round_tab[2]));
497 }
498
499 static inline int sum_mmx(void)
500 {
501     int ret;
502     __asm__ volatile (
503         "movq %%mm6, %%mm0              \n\t"
504         "psrlq $32, %%mm6               \n\t"
505         "paddw %%mm0, %%mm6             \n\t"
506         "movq %%mm6, %%mm0              \n\t"
507         "psrlq $16, %%mm6               \n\t"
508         "paddw %%mm0, %%mm6             \n\t"
509         "movd %%mm6, %0                 \n\t"
510         : "=r" (ret));
511     return ret & 0xFFFF;
512 }
513
514 static inline void sad8_x2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
515 {
516     sad8_2_mmx(blk1, blk1 + 1, blk2, stride, h);
517 }
518
519 static inline void sad8_y2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
520 {
521     sad8_2_mmx(blk1, blk1 + stride, blk2, stride, h);
522 }
523
524 #define PIX_SAD(suf)                                                    \
525 static int sad8_ ## suf(MpegEncContext *v, uint8_t *blk2,               \
526                         uint8_t *blk1, int stride, int h)               \
527 {                                                                       \
528     av_assert2(h == 8);                                                     \
529     __asm__ volatile (                                                  \
530         "pxor %%mm7, %%mm7     \n\t"                                    \
531         "pxor %%mm6, %%mm6     \n\t"                                    \
532         :);                                                             \
533                                                                         \
534     sad8_1_ ## suf(blk1, blk2, stride, 8);                              \
535                                                                         \
536     return sum_ ## suf();                                               \
537 }                                                                       \
538                                                                         \
539 static int sad8_x2_ ## suf(MpegEncContext *v, uint8_t *blk2,            \
540                            uint8_t *blk1, int stride, int h)            \
541 {                                                                       \
542     av_assert2(h == 8);                                                     \
543     __asm__ volatile (                                                  \
544         "pxor %%mm7, %%mm7     \n\t"                                    \
545         "pxor %%mm6, %%mm6     \n\t"                                    \
546         "movq %0, %%mm5        \n\t"                                    \
547         :: "m" (round_tab[1]));                                         \
548                                                                         \
549     sad8_x2a_ ## suf(blk1, blk2, stride, 8);                            \
550                                                                         \
551     return sum_ ## suf();                                               \
552 }                                                                       \
553                                                                         \
554 static int sad8_y2_ ## suf(MpegEncContext *v, uint8_t *blk2,            \
555                            uint8_t *blk1, int stride, int h)            \
556 {                                                                       \
557     av_assert2(h == 8);                                                     \
558     __asm__ volatile (                                                  \
559         "pxor %%mm7, %%mm7     \n\t"                                    \
560         "pxor %%mm6, %%mm6     \n\t"                                    \
561         "movq %0, %%mm5        \n\t"                                    \
562         :: "m" (round_tab[1]));                                         \
563                                                                         \
564     sad8_y2a_ ## suf(blk1, blk2, stride, 8);                            \
565                                                                         \
566     return sum_ ## suf();                                               \
567 }                                                                       \
568                                                                         \
569 static int sad8_xy2_ ## suf(MpegEncContext *v, uint8_t *blk2,           \
570                             uint8_t *blk1, int stride, int h)           \
571 {                                                                       \
572     av_assert2(h == 8);                                                     \
573     __asm__ volatile (                                                  \
574         "pxor %%mm7, %%mm7     \n\t"                                    \
575         "pxor %%mm6, %%mm6     \n\t"                                    \
576         ::);                                                            \
577                                                                         \
578     sad8_4_ ## suf(blk1, blk2, stride, 8);                              \
579                                                                         \
580     return sum_ ## suf();                                               \
581 }                                                                       \
582                                                                         \
583 static int sad16_ ## suf(MpegEncContext *v, uint8_t *blk2,              \
584                          uint8_t *blk1, int stride, int h)              \
585 {                                                                       \
586     __asm__ volatile (                                                  \
587         "pxor %%mm7, %%mm7     \n\t"                                    \
588         "pxor %%mm6, %%mm6     \n\t"                                    \
589         :);                                                             \
590                                                                         \
591     sad8_1_ ## suf(blk1,     blk2,     stride, h);                      \
592     sad8_1_ ## suf(blk1 + 8, blk2 + 8, stride, h);                      \
593                                                                         \
594     return sum_ ## suf();                                               \
595 }                                                                       \
596                                                                         \
597 static int sad16_x2_ ## suf(MpegEncContext *v, uint8_t *blk2,           \
598                             uint8_t *blk1, int stride, int h)           \
599 {                                                                       \
600     __asm__ volatile (                                                  \
601         "pxor %%mm7, %%mm7     \n\t"                                    \
602         "pxor %%mm6, %%mm6     \n\t"                                    \
603         "movq %0, %%mm5        \n\t"                                    \
604         :: "m" (round_tab[1]));                                         \
605                                                                         \
606     sad8_x2a_ ## suf(blk1,     blk2,     stride, h);                    \
607     sad8_x2a_ ## suf(blk1 + 8, blk2 + 8, stride, h);                    \
608                                                                         \
609     return sum_ ## suf();                                               \
610 }                                                                       \
611                                                                         \
612 static int sad16_y2_ ## suf(MpegEncContext *v, uint8_t *blk2,           \
613                             uint8_t *blk1, int stride, int h)           \
614 {                                                                       \
615     __asm__ volatile (                                                  \
616         "pxor %%mm7, %%mm7     \n\t"                                    \
617         "pxor %%mm6, %%mm6     \n\t"                                    \
618         "movq %0, %%mm5        \n\t"                                    \
619         :: "m" (round_tab[1]));                                         \
620                                                                         \
621     sad8_y2a_ ## suf(blk1,     blk2,     stride, h);                    \
622     sad8_y2a_ ## suf(blk1 + 8, blk2 + 8, stride, h);                    \
623                                                                         \
624     return sum_ ## suf();                                               \
625 }                                                                       \
626                                                                         \
627 static int sad16_xy2_ ## suf(MpegEncContext *v, uint8_t *blk2,          \
628                              uint8_t *blk1, int stride, int h)          \
629 {                                                                       \
630     __asm__ volatile (                                                  \
631         "pxor %%mm7, %%mm7     \n\t"                                    \
632         "pxor %%mm6, %%mm6     \n\t"                                    \
633         ::);                                                            \
634                                                                         \
635     sad8_4_ ## suf(blk1,     blk2,     stride, h);                      \
636     sad8_4_ ## suf(blk1 + 8, blk2 + 8, stride, h);                      \
637                                                                         \
638     return sum_ ## suf();                                               \
639 }                                                                       \
640
641 PIX_SAD(mmx)
642
643 #endif /* HAVE_INLINE_ASM */
644
645 av_cold void ff_me_cmp_init_x86(MECmpContext *c, AVCodecContext *avctx)
646 {
647     int cpu_flags = av_get_cpu_flags();
648
649 #if HAVE_INLINE_ASM
650     if (INLINE_MMX(cpu_flags)) {
651         c->pix_abs[0][0] = sad16_mmx;
652         c->pix_abs[0][1] = sad16_x2_mmx;
653         c->pix_abs[0][2] = sad16_y2_mmx;
654         c->pix_abs[0][3] = sad16_xy2_mmx;
655         c->pix_abs[1][0] = sad8_mmx;
656         c->pix_abs[1][1] = sad8_x2_mmx;
657         c->pix_abs[1][2] = sad8_y2_mmx;
658         c->pix_abs[1][3] = sad8_xy2_mmx;
659
660         c->sad[0] = sad16_mmx;
661         c->sad[1] = sad8_mmx;
662
663         c->vsad[4] = vsad_intra16_mmx;
664
665         if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
666             c->vsad[0] = vsad16_mmx;
667         }
668     }
669
670     if (INLINE_MMXEXT(cpu_flags)) {
671         c->vsad[4] = vsad_intra16_mmxext;
672
673         if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
674             c->vsad[0] = vsad16_mmxext;
675         }
676     }
677
678 #endif /* HAVE_INLINE_ASM */
679
680     if (EXTERNAL_MMX(cpu_flags)) {
681         c->hadamard8_diff[0] = ff_hadamard8_diff16_mmx;
682         c->hadamard8_diff[1] = ff_hadamard8_diff_mmx;
683         c->sum_abs_dctelem   = ff_sum_abs_dctelem_mmx;
684         c->sse[0]            = ff_sse16_mmx;
685         c->sse[1]            = ff_sse8_mmx;
686 #if HAVE_YASM
687         c->nsse[0]           = nsse16_mmx;
688         c->nsse[1]           = nsse8_mmx;
689 #endif
690     }
691
692     if (EXTERNAL_MMXEXT(cpu_flags)) {
693         c->hadamard8_diff[0] = ff_hadamard8_diff16_mmxext;
694         c->hadamard8_diff[1] = ff_hadamard8_diff_mmxext;
695         c->sum_abs_dctelem   = ff_sum_abs_dctelem_mmxext;
696
697         c->sad[0] = ff_sad16_mmxext;
698         c->sad[1] = ff_sad8_mmxext;
699
700         c->pix_abs[0][0] = ff_sad16_mmxext;
701         c->pix_abs[0][1] = ff_sad16_x2_mmxext;
702         c->pix_abs[0][2] = ff_sad16_y2_mmxext;
703         c->pix_abs[1][0] = ff_sad8_mmxext;
704         c->pix_abs[1][1] = ff_sad8_x2_mmxext;
705         c->pix_abs[1][2] = ff_sad8_y2_mmxext;
706
707         if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
708             c->pix_abs[0][3] = ff_sad16_approx_xy2_mmxext;
709             c->pix_abs[1][3] = ff_sad8_approx_xy2_mmxext;
710         }
711     }
712
713     if (EXTERNAL_SSE2(cpu_flags)) {
714         c->sse[0] = ff_sse16_sse2;
715         c->sum_abs_dctelem   = ff_sum_abs_dctelem_sse2;
716
717 #if HAVE_ALIGNED_STACK
718         c->hadamard8_diff[0] = ff_hadamard8_diff16_sse2;
719         c->hadamard8_diff[1] = ff_hadamard8_diff_sse2;
720 #endif
721         if (!(cpu_flags & AV_CPU_FLAG_SSE2SLOW) && avctx->codec_id != AV_CODEC_ID_SNOW) {
722             c->sad[0]        = ff_sad16_sse2;
723             c->pix_abs[0][0] = ff_sad16_sse2;
724             c->pix_abs[0][1] = ff_sad16_x2_sse2;
725             c->pix_abs[0][2] = ff_sad16_y2_sse2;
726
727             if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
728                 c->pix_abs[0][3] = ff_sad16_approx_xy2_sse2;
729             }
730         }
731     }
732
733     if (EXTERNAL_SSSE3(cpu_flags)) {
734         c->sum_abs_dctelem   = ff_sum_abs_dctelem_ssse3;
735 #if HAVE_ALIGNED_STACK
736         c->hadamard8_diff[0] = ff_hadamard8_diff16_ssse3;
737         c->hadamard8_diff[1] = ff_hadamard8_diff_ssse3;
738 #endif
739     }
740 }