2 * MMX optimized DSP utils
3 * Copyright (c) 2000, 2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "libavutil/attributes.h"
26 #include "libavutil/cpu.h"
27 #include "libavutil/x86/asm.h"
28 #include "libavutil/x86/cpu.h"
29 #include "libavcodec/dct.h"
30 #include "libavcodec/dsputil.h"
31 #include "libavcodec/mpegvideo.h"
32 #include "dsputil_x86.h"
34 void ff_get_pixels_mmx(int16_t *block, const uint8_t *pixels, int line_size);
35 void ff_get_pixels_sse2(int16_t *block, const uint8_t *pixels, int line_size);
36 void ff_diff_pixels_mmx(int16_t *block, const uint8_t *s1, const uint8_t *s2,
38 int ff_pix_sum16_mmx(uint8_t *pix, int line_size);
39 int ff_pix_norm1_mmx(uint8_t *pix, int line_size);
43 static int sse8_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
51 "pxor %%mm0, %%mm0 \n" /* mm0 = 0 */
52 "pxor %%mm7, %%mm7 \n" /* mm7 holds the sum */
54 "movq (%0), %%mm1 \n" /* mm1 = pix1[0][0 - 7] */
55 "movq (%1), %%mm2 \n" /* mm2 = pix2[0][0 - 7] */
56 "movq (%0, %3), %%mm3 \n" /* mm3 = pix1[1][0 - 7] */
57 "movq (%1, %3), %%mm4 \n" /* mm4 = pix2[1][0 - 7] */
59 /* todo: mm1-mm2, mm3-mm4 */
60 /* algo: subtract mm1 from mm2 with saturation and vice versa */
61 /* OR the results to get absolute difference */
62 "movq %%mm1, %%mm5 \n"
63 "movq %%mm3, %%mm6 \n"
64 "psubusb %%mm2, %%mm1 \n"
65 "psubusb %%mm4, %%mm3 \n"
66 "psubusb %%mm5, %%mm2 \n"
67 "psubusb %%mm6, %%mm4 \n"
72 /* now convert to 16-bit vectors so we can square them */
73 "movq %%mm2, %%mm1 \n"
74 "movq %%mm4, %%mm3 \n"
76 "punpckhbw %%mm0, %%mm2 \n"
77 "punpckhbw %%mm0, %%mm4 \n"
78 "punpcklbw %%mm0, %%mm1 \n" /* mm1 now spread over (mm1, mm2) */
79 "punpcklbw %%mm0, %%mm3 \n" /* mm4 now spread over (mm3, mm4) */
81 "pmaddwd %%mm2, %%mm2 \n"
82 "pmaddwd %%mm4, %%mm4 \n"
83 "pmaddwd %%mm1, %%mm1 \n"
84 "pmaddwd %%mm3, %%mm3 \n"
86 "lea (%0, %3, 2), %0 \n" /* pix1 += 2 * line_size */
87 "lea (%1, %3, 2), %1 \n" /* pix2 += 2 * line_size */
89 "paddd %%mm2, %%mm1 \n"
90 "paddd %%mm4, %%mm3 \n"
91 "paddd %%mm1, %%mm7 \n"
92 "paddd %%mm3, %%mm7 \n"
97 "movq %%mm7, %%mm1 \n"
98 "psrlq $32, %%mm7 \n" /* shift hi dword to lo */
99 "paddd %%mm7, %%mm1 \n"
101 : "+r" (pix1), "+r" (pix2), "=r" (tmp)
102 : "r" ((x86_reg) line_size), "m" (h)
108 static int sse16_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
109 int line_size, int h)
115 "pxor %%mm0, %%mm0\n" /* mm0 = 0 */
116 "pxor %%mm7, %%mm7\n" /* mm7 holds the sum */
118 "movq (%0), %%mm1\n" /* mm1 = pix1[0 - 7] */
119 "movq (%1), %%mm2\n" /* mm2 = pix2[0 - 7] */
120 "movq 8(%0), %%mm3\n" /* mm3 = pix1[8 - 15] */
121 "movq 8(%1), %%mm4\n" /* mm4 = pix2[8 - 15] */
123 /* todo: mm1-mm2, mm3-mm4 */
124 /* algo: subtract mm1 from mm2 with saturation and vice versa */
125 /* OR the results to get absolute difference */
126 "movq %%mm1, %%mm5\n"
127 "movq %%mm3, %%mm6\n"
128 "psubusb %%mm2, %%mm1\n"
129 "psubusb %%mm4, %%mm3\n"
130 "psubusb %%mm5, %%mm2\n"
131 "psubusb %%mm6, %%mm4\n"
136 /* now convert to 16-bit vectors so we can square them */
137 "movq %%mm2, %%mm1\n"
138 "movq %%mm4, %%mm3\n"
140 "punpckhbw %%mm0, %%mm2\n"
141 "punpckhbw %%mm0, %%mm4\n"
142 "punpcklbw %%mm0, %%mm1\n" /* mm1 now spread over (mm1, mm2) */
143 "punpcklbw %%mm0, %%mm3\n" /* mm4 now spread over (mm3, mm4) */
145 "pmaddwd %%mm2, %%mm2\n"
146 "pmaddwd %%mm4, %%mm4\n"
147 "pmaddwd %%mm1, %%mm1\n"
148 "pmaddwd %%mm3, %%mm3\n"
153 "paddd %%mm2, %%mm1\n"
154 "paddd %%mm4, %%mm3\n"
155 "paddd %%mm1, %%mm7\n"
156 "paddd %%mm3, %%mm7\n"
161 "movq %%mm7, %%mm1\n"
162 "psrlq $32, %%mm7\n" /* shift hi dword to lo */
163 "paddd %%mm7, %%mm1\n"
165 : "+r" (pix1), "+r" (pix2), "=r" (tmp)
166 : "r" ((x86_reg) line_size), "m" (h)
172 static int hf_noise8_mmx(uint8_t *pix1, int line_size, int h)
178 "pxor %%mm7, %%mm7\n"
179 "pxor %%mm6, %%mm6\n"
182 "movq %%mm0, %%mm1\n"
186 "movq %%mm0, %%mm2\n"
187 "movq %%mm1, %%mm3\n"
188 "punpcklbw %%mm7, %%mm0\n"
189 "punpcklbw %%mm7, %%mm1\n"
190 "punpckhbw %%mm7, %%mm2\n"
191 "punpckhbw %%mm7, %%mm3\n"
192 "psubw %%mm1, %%mm0\n"
193 "psubw %%mm3, %%mm2\n"
198 "movq %%mm4, %%mm1\n"
202 "movq %%mm4, %%mm5\n"
203 "movq %%mm1, %%mm3\n"
204 "punpcklbw %%mm7, %%mm4\n"
205 "punpcklbw %%mm7, %%mm1\n"
206 "punpckhbw %%mm7, %%mm5\n"
207 "punpckhbw %%mm7, %%mm3\n"
208 "psubw %%mm1, %%mm4\n"
209 "psubw %%mm3, %%mm5\n"
210 "psubw %%mm4, %%mm0\n"
211 "psubw %%mm5, %%mm2\n"
212 "pxor %%mm3, %%mm3\n"
213 "pxor %%mm1, %%mm1\n"
214 "pcmpgtw %%mm0, %%mm3\n\t"
215 "pcmpgtw %%mm2, %%mm1\n\t"
216 "pxor %%mm3, %%mm0\n"
217 "pxor %%mm1, %%mm2\n"
218 "psubw %%mm3, %%mm0\n"
219 "psubw %%mm1, %%mm2\n"
220 "paddw %%mm0, %%mm2\n"
221 "paddw %%mm2, %%mm6\n"
227 "movq %%mm0, %%mm1\n"
231 "movq %%mm0, %%mm2\n"
232 "movq %%mm1, %%mm3\n"
233 "punpcklbw %%mm7, %%mm0\n"
234 "punpcklbw %%mm7, %%mm1\n"
235 "punpckhbw %%mm7, %%mm2\n"
236 "punpckhbw %%mm7, %%mm3\n"
237 "psubw %%mm1, %%mm0\n"
238 "psubw %%mm3, %%mm2\n"
239 "psubw %%mm0, %%mm4\n"
240 "psubw %%mm2, %%mm5\n"
241 "pxor %%mm3, %%mm3\n"
242 "pxor %%mm1, %%mm1\n"
243 "pcmpgtw %%mm4, %%mm3\n\t"
244 "pcmpgtw %%mm5, %%mm1\n\t"
245 "pxor %%mm3, %%mm4\n"
246 "pxor %%mm1, %%mm5\n"
247 "psubw %%mm3, %%mm4\n"
248 "psubw %%mm1, %%mm5\n"
249 "paddw %%mm4, %%mm5\n"
250 "paddw %%mm5, %%mm6\n"
255 "movq %%mm4, %%mm1\n"
259 "movq %%mm4, %%mm5\n"
260 "movq %%mm1, %%mm3\n"
261 "punpcklbw %%mm7, %%mm4\n"
262 "punpcklbw %%mm7, %%mm1\n"
263 "punpckhbw %%mm7, %%mm5\n"
264 "punpckhbw %%mm7, %%mm3\n"
265 "psubw %%mm1, %%mm4\n"
266 "psubw %%mm3, %%mm5\n"
267 "psubw %%mm4, %%mm0\n"
268 "psubw %%mm5, %%mm2\n"
269 "pxor %%mm3, %%mm3\n"
270 "pxor %%mm1, %%mm1\n"
271 "pcmpgtw %%mm0, %%mm3\n\t"
272 "pcmpgtw %%mm2, %%mm1\n\t"
273 "pxor %%mm3, %%mm0\n"
274 "pxor %%mm1, %%mm2\n"
275 "psubw %%mm3, %%mm0\n"
276 "psubw %%mm1, %%mm2\n"
277 "paddw %%mm0, %%mm2\n"
278 "paddw %%mm2, %%mm6\n"
284 "movq %%mm6, %%mm0\n"
285 "punpcklwd %%mm7, %%mm0\n"
286 "punpckhwd %%mm7, %%mm6\n"
287 "paddd %%mm0, %%mm6\n"
289 "movq %%mm6, %%mm0\n"
291 "paddd %%mm6, %%mm0\n"
293 : "+r" (pix1), "=r" (tmp)
294 : "r" ((x86_reg) line_size), "g" (h - 2)
300 static int hf_noise16_mmx(uint8_t *pix1, int line_size, int h)
307 "pxor %%mm7, %%mm7\n"
308 "pxor %%mm6, %%mm6\n"
311 "movq 1(%0), %%mm1\n"
312 "movq %%mm0, %%mm2\n"
313 "movq %%mm1, %%mm3\n"
314 "punpcklbw %%mm7, %%mm0\n"
315 "punpcklbw %%mm7, %%mm1\n"
316 "punpckhbw %%mm7, %%mm2\n"
317 "punpckhbw %%mm7, %%mm3\n"
318 "psubw %%mm1, %%mm0\n"
319 "psubw %%mm3, %%mm2\n"
324 "movq 1(%0), %%mm1\n"
325 "movq %%mm4, %%mm5\n"
326 "movq %%mm1, %%mm3\n"
327 "punpcklbw %%mm7, %%mm4\n"
328 "punpcklbw %%mm7, %%mm1\n"
329 "punpckhbw %%mm7, %%mm5\n"
330 "punpckhbw %%mm7, %%mm3\n"
331 "psubw %%mm1, %%mm4\n"
332 "psubw %%mm3, %%mm5\n"
333 "psubw %%mm4, %%mm0\n"
334 "psubw %%mm5, %%mm2\n"
335 "pxor %%mm3, %%mm3\n"
336 "pxor %%mm1, %%mm1\n"
337 "pcmpgtw %%mm0, %%mm3\n\t"
338 "pcmpgtw %%mm2, %%mm1\n\t"
339 "pxor %%mm3, %%mm0\n"
340 "pxor %%mm1, %%mm2\n"
341 "psubw %%mm3, %%mm0\n"
342 "psubw %%mm1, %%mm2\n"
343 "paddw %%mm0, %%mm2\n"
344 "paddw %%mm2, %%mm6\n"
350 "movq 1(%0), %%mm1\n"
351 "movq %%mm0, %%mm2\n"
352 "movq %%mm1, %%mm3\n"
353 "punpcklbw %%mm7, %%mm0\n"
354 "punpcklbw %%mm7, %%mm1\n"
355 "punpckhbw %%mm7, %%mm2\n"
356 "punpckhbw %%mm7, %%mm3\n"
357 "psubw %%mm1, %%mm0\n"
358 "psubw %%mm3, %%mm2\n"
359 "psubw %%mm0, %%mm4\n"
360 "psubw %%mm2, %%mm5\n"
361 "pxor %%mm3, %%mm3\n"
362 "pxor %%mm1, %%mm1\n"
363 "pcmpgtw %%mm4, %%mm3\n\t"
364 "pcmpgtw %%mm5, %%mm1\n\t"
365 "pxor %%mm3, %%mm4\n"
366 "pxor %%mm1, %%mm5\n"
367 "psubw %%mm3, %%mm4\n"
368 "psubw %%mm1, %%mm5\n"
369 "paddw %%mm4, %%mm5\n"
370 "paddw %%mm5, %%mm6\n"
375 "movq 1(%0), %%mm1\n"
376 "movq %%mm4, %%mm5\n"
377 "movq %%mm1, %%mm3\n"
378 "punpcklbw %%mm7, %%mm4\n"
379 "punpcklbw %%mm7, %%mm1\n"
380 "punpckhbw %%mm7, %%mm5\n"
381 "punpckhbw %%mm7, %%mm3\n"
382 "psubw %%mm1, %%mm4\n"
383 "psubw %%mm3, %%mm5\n"
384 "psubw %%mm4, %%mm0\n"
385 "psubw %%mm5, %%mm2\n"
386 "pxor %%mm3, %%mm3\n"
387 "pxor %%mm1, %%mm1\n"
388 "pcmpgtw %%mm0, %%mm3\n\t"
389 "pcmpgtw %%mm2, %%mm1\n\t"
390 "pxor %%mm3, %%mm0\n"
391 "pxor %%mm1, %%mm2\n"
392 "psubw %%mm3, %%mm0\n"
393 "psubw %%mm1, %%mm2\n"
394 "paddw %%mm0, %%mm2\n"
395 "paddw %%mm2, %%mm6\n"
401 "movq %%mm6, %%mm0\n"
402 "punpcklwd %%mm7, %%mm0\n"
403 "punpckhwd %%mm7, %%mm6\n"
404 "paddd %%mm0, %%mm6\n"
406 "movq %%mm6, %%mm0\n"
408 "paddd %%mm6, %%mm0\n"
410 : "+r" (pix1), "=r" (tmp)
411 : "r" ((x86_reg) line_size), "g" (h - 2)
414 return tmp + hf_noise8_mmx(pix + 8, line_size, h);
417 static int nsse16_mmx(MpegEncContext *c, uint8_t *pix1, uint8_t *pix2,
418 int line_size, int h)
423 score1 = c->dsp.sse[0](c, pix1, pix2, line_size, h);
425 score1 = sse16_mmx(c, pix1, pix2, line_size, h);
426 score2 = hf_noise16_mmx(pix1, line_size, h) -
427 hf_noise16_mmx(pix2, line_size, h);
430 return score1 + FFABS(score2) * c->avctx->nsse_weight;
432 return score1 + FFABS(score2) * 8;
435 static int nsse8_mmx(MpegEncContext *c, uint8_t *pix1, uint8_t *pix2,
436 int line_size, int h)
438 int score1 = sse8_mmx(c, pix1, pix2, line_size, h);
439 int score2 = hf_noise8_mmx(pix1, line_size, h) -
440 hf_noise8_mmx(pix2, line_size, h);
443 return score1 + FFABS(score2) * c->avctx->nsse_weight;
445 return score1 + FFABS(score2) * 8;
448 static int vsad_intra16_mmx(MpegEncContext *v, uint8_t *pix, uint8_t *dummy,
449 int line_size, int h)
453 assert((((int) pix) & 7) == 0);
454 assert((line_size & 7) == 0);
456 #define SUM(in0, in1, out0, out1) \
457 "movq (%0), %%mm2\n" \
458 "movq 8(%0), %%mm3\n" \
460 "movq %%mm2, " #out0 "\n" \
461 "movq %%mm3, " #out1 "\n" \
462 "psubusb " #in0 ", %%mm2\n" \
463 "psubusb " #in1 ", %%mm3\n" \
464 "psubusb " #out0 ", " #in0 "\n" \
465 "psubusb " #out1 ", " #in1 "\n" \
466 "por %%mm2, " #in0 "\n" \
467 "por %%mm3, " #in1 "\n" \
468 "movq " #in0 ", %%mm2\n" \
469 "movq " #in1 ", %%mm3\n" \
470 "punpcklbw %%mm7, " #in0 "\n" \
471 "punpcklbw %%mm7, " #in1 "\n" \
472 "punpckhbw %%mm7, %%mm2\n" \
473 "punpckhbw %%mm7, %%mm3\n" \
474 "paddw " #in1 ", " #in0 "\n" \
475 "paddw %%mm3, %%mm2\n" \
476 "paddw %%mm2, " #in0 "\n" \
477 "paddw " #in0 ", %%mm6\n"
482 "pxor %%mm6, %%mm6\n"
483 "pxor %%mm7, %%mm7\n"
485 "movq 8(%0), %%mm1\n"
490 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
492 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
497 "movq %%mm6, %%mm0\n"
499 "paddw %%mm6, %%mm0\n"
500 "movq %%mm0, %%mm6\n"
502 "paddw %%mm6, %%mm0\n"
504 : "+r" (pix), "=r" (tmp)
505 : "r" ((x86_reg) line_size), "m" (h)
512 static int vsad_intra16_mmxext(MpegEncContext *v, uint8_t *pix, uint8_t *dummy,
513 int line_size, int h)
517 assert((((int) pix) & 7) == 0);
518 assert((line_size & 7) == 0);
520 #define SUM(in0, in1, out0, out1) \
521 "movq (%0), " #out0 "\n" \
522 "movq 8(%0), " #out1 "\n" \
524 "psadbw " #out0 ", " #in0 "\n" \
525 "psadbw " #out1 ", " #in1 "\n" \
526 "paddw " #in1 ", " #in0 "\n" \
527 "paddw " #in0 ", %%mm6\n"
531 "pxor %%mm6, %%mm6\n"
532 "pxor %%mm7, %%mm7\n"
534 "movq 8(%0), %%mm1\n"
539 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
541 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
547 : "+r" (pix), "=r" (tmp)
548 : "r" ((x86_reg) line_size), "m" (h)
555 static int vsad16_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
556 int line_size, int h)
560 assert((((int) pix1) & 7) == 0);
561 assert((((int) pix2) & 7) == 0);
562 assert((line_size & 7) == 0);
564 #define SUM(in0, in1, out0, out1) \
565 "movq (%0), %%mm2\n" \
566 "movq (%1), " #out0 "\n" \
567 "movq 8(%0), %%mm3\n" \
568 "movq 8(%1), " #out1 "\n" \
571 "psubb " #out0 ", %%mm2\n" \
572 "psubb " #out1 ", %%mm3\n" \
573 "pxor %%mm7, %%mm2\n" \
574 "pxor %%mm7, %%mm3\n" \
575 "movq %%mm2, " #out0 "\n" \
576 "movq %%mm3, " #out1 "\n" \
577 "psubusb " #in0 ", %%mm2\n" \
578 "psubusb " #in1 ", %%mm3\n" \
579 "psubusb " #out0 ", " #in0 "\n" \
580 "psubusb " #out1 ", " #in1 "\n" \
581 "por %%mm2, " #in0 "\n" \
582 "por %%mm3, " #in1 "\n" \
583 "movq " #in0 ", %%mm2\n" \
584 "movq " #in1 ", %%mm3\n" \
585 "punpcklbw %%mm7, " #in0 "\n" \
586 "punpcklbw %%mm7, " #in1 "\n" \
587 "punpckhbw %%mm7, %%mm2\n" \
588 "punpckhbw %%mm7, %%mm3\n" \
589 "paddw " #in1 ", " #in0 "\n" \
590 "paddw %%mm3, %%mm2\n" \
591 "paddw %%mm2, " #in0 "\n" \
592 "paddw " #in0 ", %%mm6\n"
597 "pxor %%mm6, %%mm6\n"
598 "pcmpeqw %%mm7, %%mm7\n"
600 "packsswb %%mm7, %%mm7\n"
603 "movq 8(%0), %%mm1\n"
604 "movq 8(%1), %%mm3\n"
607 "psubb %%mm2, %%mm0\n"
608 "psubb %%mm3, %%mm1\n"
609 "pxor %%mm7, %%mm0\n"
610 "pxor %%mm7, %%mm1\n"
614 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
616 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
621 "movq %%mm6, %%mm0\n"
623 "paddw %%mm6, %%mm0\n"
624 "movq %%mm0, %%mm6\n"
626 "paddw %%mm6, %%mm0\n"
628 : "+r" (pix1), "+r" (pix2), "=r" (tmp)
629 : "r" ((x86_reg) line_size), "m" (h)
636 static int vsad16_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
637 int line_size, int h)
641 assert((((int) pix1) & 7) == 0);
642 assert((((int) pix2) & 7) == 0);
643 assert((line_size & 7) == 0);
645 #define SUM(in0, in1, out0, out1) \
646 "movq (%0), " #out0 "\n" \
647 "movq (%1), %%mm2\n" \
648 "movq 8(%0), " #out1 "\n" \
649 "movq 8(%1), %%mm3\n" \
652 "psubb %%mm2, " #out0 "\n" \
653 "psubb %%mm3, " #out1 "\n" \
654 "pxor %%mm7, " #out0 "\n" \
655 "pxor %%mm7, " #out1 "\n" \
656 "psadbw " #out0 ", " #in0 "\n" \
657 "psadbw " #out1 ", " #in1 "\n" \
658 "paddw " #in1 ", " #in0 "\n" \
659 "paddw " #in0 ", %%mm6\n "
663 "pxor %%mm6, %%mm6\n"
664 "pcmpeqw %%mm7, %%mm7\n"
666 "packsswb %%mm7, %%mm7\n"
669 "movq 8(%0), %%mm1\n"
670 "movq 8(%1), %%mm3\n"
673 "psubb %%mm2, %%mm0\n"
674 "psubb %%mm3, %%mm1\n"
675 "pxor %%mm7, %%mm0\n"
676 "pxor %%mm7, %%mm1\n"
680 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
682 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
688 : "+r" (pix1), "+r" (pix2), "=r" (tmp)
689 : "r" ((x86_reg) line_size), "m" (h)
696 #define MMABS_MMX(a,z) \
697 "pxor " #z ", " #z " \n\t" \
698 "pcmpgtw " #a ", " #z " \n\t" \
699 "pxor " #z ", " #a " \n\t" \
700 "psubw " #z ", " #a " \n\t"
702 #define MMABS_MMXEXT(a, z) \
703 "pxor " #z ", " #z " \n\t" \
704 "psubw " #a ", " #z " \n\t" \
705 "pmaxsw " #z ", " #a " \n\t"
707 #define MMABS_SSSE3(a,z) \
708 "pabsw " #a ", " #a " \n\t"
710 #define MMABS_SUM(a,z, sum) \
712 "paddusw " #a ", " #sum " \n\t"
714 /* FIXME: HSUM_* saturates at 64k, while an 8x8 hadamard or dct block can get
715 * up to about 100k on extreme inputs. But that's very unlikely to occur in
716 * natural video, and it's even more unlikely to not have any alternative
717 * mvs/modes with lower cost. */
718 #define HSUM_MMX(a, t, dst) \
719 "movq " #a ", " #t " \n\t" \
720 "psrlq $32, " #a " \n\t" \
721 "paddusw " #t ", " #a " \n\t" \
722 "movq " #a ", " #t " \n\t" \
723 "psrlq $16, " #a " \n\t" \
724 "paddusw " #t ", " #a " \n\t" \
725 "movd " #a ", " #dst " \n\t" \
727 #define HSUM_MMXEXT(a, t, dst) \
728 "pshufw $0x0E, " #a ", " #t " \n\t" \
729 "paddusw " #t ", " #a " \n\t" \
730 "pshufw $0x01, " #a ", " #t " \n\t" \
731 "paddusw " #t ", " #a " \n\t" \
732 "movd " #a ", " #dst " \n\t" \
734 #define HSUM_SSE2(a, t, dst) \
735 "movhlps " #a ", " #t " \n\t" \
736 "paddusw " #t ", " #a " \n\t" \
737 "pshuflw $0x0E, " #a ", " #t " \n\t" \
738 "paddusw " #t ", " #a " \n\t" \
739 "pshuflw $0x01, " #a ", " #t " \n\t" \
740 "paddusw " #t ", " #a " \n\t" \
741 "movd " #a ", " #dst " \n\t" \
743 #define DCT_SAD4(m, mm, o) \
744 "mov"#m" "#o" + 0(%1), " #mm "2 \n\t" \
745 "mov"#m" "#o" + 16(%1), " #mm "3 \n\t" \
746 "mov"#m" "#o" + 32(%1), " #mm "4 \n\t" \
747 "mov"#m" "#o" + 48(%1), " #mm "5 \n\t" \
748 MMABS_SUM(mm ## 2, mm ## 6, mm ## 0) \
749 MMABS_SUM(mm ## 3, mm ## 7, mm ## 1) \
750 MMABS_SUM(mm ## 4, mm ## 6, mm ## 0) \
751 MMABS_SUM(mm ## 5, mm ## 7, mm ## 1) \
753 #define DCT_SAD_MMX \
754 "pxor %%mm0, %%mm0 \n\t" \
755 "pxor %%mm1, %%mm1 \n\t" \
756 DCT_SAD4(q, %%mm, 0) \
757 DCT_SAD4(q, %%mm, 8) \
758 DCT_SAD4(q, %%mm, 64) \
759 DCT_SAD4(q, %%mm, 72) \
760 "paddusw %%mm1, %%mm0 \n\t" \
761 HSUM(%%mm0, %%mm1, %0)
763 #define DCT_SAD_SSE2 \
764 "pxor %%xmm0, %%xmm0 \n\t" \
765 "pxor %%xmm1, %%xmm1 \n\t" \
766 DCT_SAD4(dqa, %%xmm, 0) \
767 DCT_SAD4(dqa, %%xmm, 64) \
768 "paddusw %%xmm1, %%xmm0 \n\t" \
769 HSUM(%%xmm0, %%xmm1, %0)
771 #define DCT_SAD_FUNC(cpu) \
772 static int sum_abs_dctelem_ ## cpu(int16_t *block) \
779 return sum & 0xFFFF; \
782 #define DCT_SAD DCT_SAD_MMX
783 #define HSUM(a, t, dst) HSUM_MMX(a, t, dst)
784 #define MMABS(a, z) MMABS_MMX(a, z)
789 #define HSUM(a, t, dst) HSUM_MMXEXT(a, t, dst)
790 #define MMABS(a, z) MMABS_MMXEXT(a, z)
795 #define DCT_SAD DCT_SAD_SSE2
796 #define HSUM(a, t, dst) HSUM_SSE2(a, t, dst)
800 #if HAVE_SSSE3_INLINE
801 #define MMABS(a, z) MMABS_SSSE3(a, z)
808 #define PHADDD(a, t) \
809 "movq " #a ", " #t " \n\t" \
810 "psrlq $32, " #a " \n\t" \
811 "paddd " #t ", " #a " \n\t"
814 * pmulhw: dst[0 - 15] = (src[0 - 15] * dst[0 - 15])[16 - 31]
815 * pmulhrw: dst[0 - 15] = (src[0 - 15] * dst[0 - 15] + 0x8000)[16 - 31]
816 * pmulhrsw: dst[0 - 15] = (src[0 - 15] * dst[0 - 15] + 0x4000)[15 - 30]
818 #define PMULHRW(x, y, s, o) \
819 "pmulhw " #s ", " #x " \n\t" \
820 "pmulhw " #s ", " #y " \n\t" \
821 "paddw " #o ", " #x " \n\t" \
822 "paddw " #o ", " #y " \n\t" \
823 "psraw $1, " #x " \n\t" \
824 "psraw $1, " #y " \n\t"
825 #define DEF(x) x ## _mmx
826 #define SET_RND MOVQ_WONE
827 #define SCALE_OFFSET 1
829 #include "dsputil_qns_template.c"
836 #define DEF(x) x ## _3dnow
838 #define SCALE_OFFSET 0
839 #define PMULHRW(x, y, s, o) \
840 "pmulhrw " #s ", " #x " \n\t" \
841 "pmulhrw " #s ", " #y " \n\t"
843 #include "dsputil_qns_template.c"
850 #if HAVE_SSSE3_INLINE
852 #define DEF(x) x ## _ssse3
854 #define SCALE_OFFSET -1
856 #define PHADDD(a, t) \
857 "pshufw $0x0E, " #a ", " #t " \n\t" \
858 /* faster than phaddd on core2 */ \
859 "paddd " #t ", " #a " \n\t"
861 #define PMULHRW(x, y, s, o) \
862 "pmulhrsw " #s ", " #x " \n\t" \
863 "pmulhrsw " #s ", " #y " \n\t"
865 #include "dsputil_qns_template.c"
872 #endif /* HAVE_SSSE3_INLINE */
874 #endif /* HAVE_INLINE_ASM */
876 int ff_sse16_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
877 int line_size, int h);
879 #define hadamard_func(cpu) \
880 int ff_hadamard8_diff_ ## cpu(MpegEncContext *s, uint8_t *src1, \
881 uint8_t *src2, int stride, int h); \
882 int ff_hadamard8_diff16_ ## cpu(MpegEncContext *s, uint8_t *src1, \
883 uint8_t *src2, int stride, int h);
886 hadamard_func(mmxext)
890 av_cold void ff_dsputilenc_init_mmx(DSPContext *c, AVCodecContext *avctx,
891 unsigned high_bit_depth)
893 int cpu_flags = av_get_cpu_flags();
894 const int dct_algo = avctx->dct_algo;
896 if (EXTERNAL_MMX(cpu_flags)) {
898 c->get_pixels = ff_get_pixels_mmx;
899 c->diff_pixels = ff_diff_pixels_mmx;
900 c->pix_sum = ff_pix_sum16_mmx;
901 c->pix_norm1 = ff_pix_norm1_mmx;
904 if (EXTERNAL_SSE2(cpu_flags))
906 c->get_pixels = ff_get_pixels_sse2;
909 if (INLINE_MMX(cpu_flags)) {
910 if (!high_bit_depth &&
911 (dct_algo == FF_DCT_AUTO || dct_algo == FF_DCT_MMX))
912 c->fdct = ff_fdct_mmx;
914 c->sum_abs_dctelem = sum_abs_dctelem_mmx;
916 c->sse[0] = sse16_mmx;
917 c->sse[1] = sse8_mmx;
918 c->vsad[4] = vsad_intra16_mmx;
920 c->nsse[0] = nsse16_mmx;
921 c->nsse[1] = nsse8_mmx;
922 if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
923 c->vsad[0] = vsad16_mmx;
924 c->try_8x8basis = try_8x8basis_mmx;
926 c->add_8x8basis = add_8x8basis_mmx;
929 if (INLINE_AMD3DNOW(cpu_flags)) {
930 if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
931 c->try_8x8basis = try_8x8basis_3dnow;
933 c->add_8x8basis = add_8x8basis_3dnow;
936 if (INLINE_MMXEXT(cpu_flags)) {
937 if (!high_bit_depth &&
938 (dct_algo == FF_DCT_AUTO || dct_algo == FF_DCT_MMX))
939 c->fdct = ff_fdct_mmxext;
941 c->sum_abs_dctelem = sum_abs_dctelem_mmxext;
942 c->vsad[4] = vsad_intra16_mmxext;
944 if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
945 c->vsad[0] = vsad16_mmxext;
949 if (INLINE_SSE2(cpu_flags)) {
950 if (!high_bit_depth &&
951 (dct_algo == FF_DCT_AUTO || dct_algo == FF_DCT_MMX))
952 c->fdct = ff_fdct_sse2;
954 c->sum_abs_dctelem = sum_abs_dctelem_sse2;
957 #if HAVE_SSSE3_INLINE
958 if (INLINE_SSSE3(cpu_flags)) {
959 if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
960 c->try_8x8basis = try_8x8basis_ssse3;
962 c->add_8x8basis = add_8x8basis_ssse3;
963 c->sum_abs_dctelem = sum_abs_dctelem_ssse3;
966 #endif /* HAVE_INLINE_ASM */
968 if (EXTERNAL_MMX(cpu_flags)) {
969 c->hadamard8_diff[0] = ff_hadamard8_diff16_mmx;
970 c->hadamard8_diff[1] = ff_hadamard8_diff_mmx;
973 if (EXTERNAL_MMXEXT(cpu_flags)) {
974 c->hadamard8_diff[0] = ff_hadamard8_diff16_mmxext;
975 c->hadamard8_diff[1] = ff_hadamard8_diff_mmxext;
978 if (EXTERNAL_SSE2(cpu_flags)) {
979 c->sse[0] = ff_sse16_sse2;
981 #if HAVE_ALIGNED_STACK
982 c->hadamard8_diff[0] = ff_hadamard8_diff16_sse2;
983 c->hadamard8_diff[1] = ff_hadamard8_diff_sse2;
987 if (EXTERNAL_SSSE3(cpu_flags) && HAVE_ALIGNED_STACK) {
988 c->hadamard8_diff[0] = ff_hadamard8_diff16_ssse3;
989 c->hadamard8_diff[1] = ff_hadamard8_diff_ssse3;
992 ff_dsputil_init_pix_mmx(c, avctx);