2 * MMX optimized DSP utils
3 * Copyright (c) 2000, 2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "libavutil/attributes.h"
26 #include "libavutil/cpu.h"
27 #include "libavutil/x86/asm.h"
28 #include "libavcodec/dsputil.h"
29 #include "libavcodec/h264dsp.h"
30 #include "libavcodec/mpegvideo.h"
31 #include "libavcodec/simple_idct.h"
32 #include "dsputil_mmx.h"
33 #include "idct_xvid.h"
38 /* pixel operations */
39 DECLARE_ALIGNED(8, const uint64_t, ff_pw_15) = 0x000F000F000F000FULL;
40 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_17) = { 0x0011001100110011ULL, 0x0011001100110011ULL };
41 DECLARE_ALIGNED(8, const uint64_t, ff_pw_20) = 0x0014001400140014ULL;
42 DECLARE_ALIGNED(8, const uint64_t, ff_pw_42) = 0x002A002A002A002AULL;
43 DECLARE_ALIGNED(8, const uint64_t, ff_pw_53) = 0x0035003500350035ULL;
44 DECLARE_ALIGNED(8, const uint64_t, ff_pw_96) = 0x0060006000600060ULL;
45 DECLARE_ALIGNED(8, const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
46 DECLARE_ALIGNED(8, const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
47 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_512) = { 0x0200020002000200ULL, 0x0200020002000200ULL };
48 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_1019) = { 0x03FB03FB03FB03FBULL, 0x03FB03FB03FB03FBULL };
50 DECLARE_ALIGNED(8, const uint64_t, ff_pb_3F) = 0x3F3F3F3F3F3F3F3FULL;
51 DECLARE_ALIGNED(8, const uint64_t, ff_pb_FC) = 0xFCFCFCFCFCFCFCFCULL;
53 DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 };
54 DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 };
58 void ff_put_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
59 int dstStride, int src1Stride, int h);
60 void ff_put_no_rnd_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1,
61 uint8_t *src2, int dstStride,
62 int src1Stride, int h);
63 void ff_avg_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
64 int dstStride, int src1Stride, int h);
65 void ff_put_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
66 int dstStride, int src1Stride, int h);
67 void ff_avg_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
68 int dstStride, int src1Stride, int h);
69 void ff_put_no_rnd_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
70 int dstStride, int src1Stride, int h);
72 static void ff_put_pixels16_mmxext(uint8_t *block, const uint8_t *pixels,
73 ptrdiff_t line_size, int h)
75 ff_put_pixels8_mmxext(block, pixels, line_size, h);
76 ff_put_pixels8_mmxext(block + 8, pixels + 8, line_size, h);
79 void ff_put_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
80 int dstStride, int srcStride, int h);
81 void ff_avg_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
82 int dstStride, int srcStride, int h);
83 void ff_put_no_rnd_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
84 int dstStride, int srcStride,
86 void ff_put_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
87 int dstStride, int srcStride, int h);
88 void ff_avg_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
89 int dstStride, int srcStride, int h);
90 void ff_put_no_rnd_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
91 int dstStride, int srcStride,
93 void ff_put_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
94 int dstStride, int srcStride);
95 void ff_avg_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
96 int dstStride, int srcStride);
97 void ff_put_no_rnd_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
98 int dstStride, int srcStride);
99 void ff_put_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
100 int dstStride, int srcStride);
101 void ff_avg_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
102 int dstStride, int srcStride);
103 void ff_put_no_rnd_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
104 int dstStride, int srcStride);
105 #define ff_put_no_rnd_pixels16_mmxext ff_put_pixels16_mmxext
106 #define ff_put_no_rnd_pixels8_mmxext ff_put_pixels8_mmxext
107 #endif /* HAVE_YASM */
112 /***********************************/
115 #define DEF(x, y) x ## _ ## y ## _mmx
116 #define SET_RND MOVQ_WTWO
117 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
118 #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
119 #define OP_AVG(a, b, c, e) PAVGB_MMX(a, b, c, e)
121 #include "dsputil_rnd_template.c"
129 #endif /* HAVE_INLINE_ASM */
134 /***********************************/
135 /* MMXEXT specific */
137 //FIXME the following could be optimized too ...
138 static void ff_avg_pixels16_mmxext(uint8_t *block, const uint8_t *pixels,
139 int line_size, int h)
141 ff_avg_pixels8_mmxext(block, pixels, line_size, h);
142 ff_avg_pixels8_mmxext(block + 8, pixels + 8, line_size, h);
145 #endif /* HAVE_YASM */
149 /***********************************/
152 void ff_put_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
158 /* read the pixels */
163 "movq (%3), %%mm0 \n\t"
164 "movq 8(%3), %%mm1 \n\t"
165 "movq 16(%3), %%mm2 \n\t"
166 "movq 24(%3), %%mm3 \n\t"
167 "movq 32(%3), %%mm4 \n\t"
168 "movq 40(%3), %%mm5 \n\t"
169 "movq 48(%3), %%mm6 \n\t"
170 "movq 56(%3), %%mm7 \n\t"
171 "packuswb %%mm1, %%mm0 \n\t"
172 "packuswb %%mm3, %%mm2 \n\t"
173 "packuswb %%mm5, %%mm4 \n\t"
174 "packuswb %%mm7, %%mm6 \n\t"
175 "movq %%mm0, (%0) \n\t"
176 "movq %%mm2, (%0, %1) \n\t"
177 "movq %%mm4, (%0, %1, 2) \n\t"
178 "movq %%mm6, (%0, %2) \n\t"
179 :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3),
182 pix += line_size * 4;
185 // if here would be an exact copy of the code above
186 // compiler would generate some very strange code
189 "movq (%3), %%mm0 \n\t"
190 "movq 8(%3), %%mm1 \n\t"
191 "movq 16(%3), %%mm2 \n\t"
192 "movq 24(%3), %%mm3 \n\t"
193 "movq 32(%3), %%mm4 \n\t"
194 "movq 40(%3), %%mm5 \n\t"
195 "movq 48(%3), %%mm6 \n\t"
196 "movq 56(%3), %%mm7 \n\t"
197 "packuswb %%mm1, %%mm0 \n\t"
198 "packuswb %%mm3, %%mm2 \n\t"
199 "packuswb %%mm5, %%mm4 \n\t"
200 "packuswb %%mm7, %%mm6 \n\t"
201 "movq %%mm0, (%0) \n\t"
202 "movq %%mm2, (%0, %1) \n\t"
203 "movq %%mm4, (%0, %1, 2) \n\t"
204 "movq %%mm6, (%0, %2) \n\t"
205 :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3), "r"(p)
209 #define put_signed_pixels_clamped_mmx_half(off) \
210 "movq "#off"(%2), %%mm1 \n\t" \
211 "movq 16 + "#off"(%2), %%mm2 \n\t" \
212 "movq 32 + "#off"(%2), %%mm3 \n\t" \
213 "movq 48 + "#off"(%2), %%mm4 \n\t" \
214 "packsswb 8 + "#off"(%2), %%mm1 \n\t" \
215 "packsswb 24 + "#off"(%2), %%mm2 \n\t" \
216 "packsswb 40 + "#off"(%2), %%mm3 \n\t" \
217 "packsswb 56 + "#off"(%2), %%mm4 \n\t" \
218 "paddb %%mm0, %%mm1 \n\t" \
219 "paddb %%mm0, %%mm2 \n\t" \
220 "paddb %%mm0, %%mm3 \n\t" \
221 "paddb %%mm0, %%mm4 \n\t" \
222 "movq %%mm1, (%0) \n\t" \
223 "movq %%mm2, (%0, %3) \n\t" \
224 "movq %%mm3, (%0, %3, 2) \n\t" \
225 "movq %%mm4, (%0, %1) \n\t"
227 void ff_put_signed_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
230 x86_reg line_skip = line_size;
234 "movq "MANGLE(ff_pb_80)", %%mm0 \n\t"
235 "lea (%3, %3, 2), %1 \n\t"
236 put_signed_pixels_clamped_mmx_half(0)
237 "lea (%0, %3, 4), %0 \n\t"
238 put_signed_pixels_clamped_mmx_half(64)
239 : "+&r"(pixels), "=&r"(line_skip3)
240 : "r"(block), "r"(line_skip)
244 void ff_add_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
251 /* read the pixels */
258 "movq (%2), %%mm0 \n\t"
259 "movq 8(%2), %%mm1 \n\t"
260 "movq 16(%2), %%mm2 \n\t"
261 "movq 24(%2), %%mm3 \n\t"
262 "movq %0, %%mm4 \n\t"
263 "movq %1, %%mm6 \n\t"
264 "movq %%mm4, %%mm5 \n\t"
265 "punpcklbw %%mm7, %%mm4 \n\t"
266 "punpckhbw %%mm7, %%mm5 \n\t"
267 "paddsw %%mm4, %%mm0 \n\t"
268 "paddsw %%mm5, %%mm1 \n\t"
269 "movq %%mm6, %%mm5 \n\t"
270 "punpcklbw %%mm7, %%mm6 \n\t"
271 "punpckhbw %%mm7, %%mm5 \n\t"
272 "paddsw %%mm6, %%mm2 \n\t"
273 "paddsw %%mm5, %%mm3 \n\t"
274 "packuswb %%mm1, %%mm0 \n\t"
275 "packuswb %%mm3, %%mm2 \n\t"
276 "movq %%mm0, %0 \n\t"
277 "movq %%mm2, %1 \n\t"
278 : "+m"(*pix), "+m"(*(pix + line_size))
281 pix += line_size * 2;
286 static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels,
287 ptrdiff_t line_size, int h)
290 "lea (%3, %3), %%"REG_a" \n\t"
293 "movq (%1 ), %%mm0 \n\t"
294 "movq (%1, %3), %%mm1 \n\t"
295 "movq %%mm0, (%2) \n\t"
296 "movq %%mm1, (%2, %3) \n\t"
297 "add %%"REG_a", %1 \n\t"
298 "add %%"REG_a", %2 \n\t"
299 "movq (%1 ), %%mm0 \n\t"
300 "movq (%1, %3), %%mm1 \n\t"
301 "movq %%mm0, (%2) \n\t"
302 "movq %%mm1, (%2, %3) \n\t"
303 "add %%"REG_a", %1 \n\t"
304 "add %%"REG_a", %2 \n\t"
307 : "+g"(h), "+r"(pixels), "+r"(block)
308 : "r"((x86_reg)line_size)
313 static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels,
314 ptrdiff_t line_size, int h)
317 "lea (%3, %3), %%"REG_a" \n\t"
320 "movq (%1 ), %%mm0 \n\t"
321 "movq 8(%1 ), %%mm4 \n\t"
322 "movq (%1, %3), %%mm1 \n\t"
323 "movq 8(%1, %3), %%mm5 \n\t"
324 "movq %%mm0, (%2) \n\t"
325 "movq %%mm4, 8(%2) \n\t"
326 "movq %%mm1, (%2, %3) \n\t"
327 "movq %%mm5, 8(%2, %3) \n\t"
328 "add %%"REG_a", %1 \n\t"
329 "add %%"REG_a", %2 \n\t"
330 "movq (%1 ), %%mm0 \n\t"
331 "movq 8(%1 ), %%mm4 \n\t"
332 "movq (%1, %3), %%mm1 \n\t"
333 "movq 8(%1, %3), %%mm5 \n\t"
334 "movq %%mm0, (%2) \n\t"
335 "movq %%mm4, 8(%2) \n\t"
336 "movq %%mm1, (%2, %3) \n\t"
337 "movq %%mm5, 8(%2, %3) \n\t"
338 "add %%"REG_a", %1 \n\t"
339 "add %%"REG_a", %2 \n\t"
342 : "+g"(h), "+r"(pixels), "+r"(block)
343 : "r"((x86_reg)line_size)
348 #define CLEAR_BLOCKS(name, n) \
349 static void name(int16_t *blocks) \
352 "pxor %%mm7, %%mm7 \n\t" \
353 "mov %1, %%"REG_a" \n\t" \
355 "movq %%mm7, (%0, %%"REG_a") \n\t" \
356 "movq %%mm7, 8(%0, %%"REG_a") \n\t" \
357 "movq %%mm7, 16(%0, %%"REG_a") \n\t" \
358 "movq %%mm7, 24(%0, %%"REG_a") \n\t" \
359 "add $32, %%"REG_a" \n\t" \
361 :: "r"(((uint8_t *)blocks) + 128 * n), \
366 CLEAR_BLOCKS(clear_blocks_mmx, 6)
367 CLEAR_BLOCKS(clear_block_mmx, 1)
369 static void clear_block_sse(int16_t *block)
372 "xorps %%xmm0, %%xmm0 \n"
373 "movaps %%xmm0, (%0) \n"
374 "movaps %%xmm0, 16(%0) \n"
375 "movaps %%xmm0, 32(%0) \n"
376 "movaps %%xmm0, 48(%0) \n"
377 "movaps %%xmm0, 64(%0) \n"
378 "movaps %%xmm0, 80(%0) \n"
379 "movaps %%xmm0, 96(%0) \n"
380 "movaps %%xmm0, 112(%0) \n"
386 static void clear_blocks_sse(int16_t *blocks)
389 "xorps %%xmm0, %%xmm0 \n"
390 "mov %1, %%"REG_a" \n"
392 "movaps %%xmm0, (%0, %%"REG_a") \n"
393 "movaps %%xmm0, 16(%0, %%"REG_a") \n"
394 "movaps %%xmm0, 32(%0, %%"REG_a") \n"
395 "movaps %%xmm0, 48(%0, %%"REG_a") \n"
396 "movaps %%xmm0, 64(%0, %%"REG_a") \n"
397 "movaps %%xmm0, 80(%0, %%"REG_a") \n"
398 "movaps %%xmm0, 96(%0, %%"REG_a") \n"
399 "movaps %%xmm0, 112(%0, %%"REG_a") \n"
400 "add $128, %%"REG_a" \n"
402 :: "r"(((uint8_t *)blocks) + 128 * 6),
408 static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w)
414 "movq (%1, %0), %%mm0 \n\t"
415 "movq (%2, %0), %%mm1 \n\t"
416 "paddb %%mm0, %%mm1 \n\t"
417 "movq %%mm1, (%2, %0) \n\t"
418 "movq 8(%1, %0), %%mm0 \n\t"
419 "movq 8(%2, %0), %%mm1 \n\t"
420 "paddb %%mm0, %%mm1 \n\t"
421 "movq %%mm1, 8(%2, %0) \n\t"
427 : "r"(src), "r"(dst), "r"((x86_reg)w - 15)
430 dst[i + 0] += src[i + 0];
434 static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top,
435 const uint8_t *diff, int w,
436 int *left, int *left_top)
440 int l = *left & 0xff;
441 int tl = *left_top & 0xff;
446 "movzbl (%3, %4), %2 \n"
459 "add (%6, %4), %b0 \n"
460 "mov %b0, (%5, %4) \n"
463 : "+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
464 : "r"(dst + w), "r"(diff + w), "rm"(top + w)
471 /* Draw the edges of width 'w' of an image of size width, height
472 * this MMX version can only handle w == 8 || w == 16. */
473 static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height,
474 int w, int h, int sides)
476 uint8_t *ptr, *last_line;
479 last_line = buf + (height - 1) * wrap;
485 "movd (%0), %%mm0 \n\t"
486 "punpcklbw %%mm0, %%mm0 \n\t"
487 "punpcklwd %%mm0, %%mm0 \n\t"
488 "punpckldq %%mm0, %%mm0 \n\t"
489 "movq %%mm0, -8(%0) \n\t"
490 "movq -8(%0, %2), %%mm1 \n\t"
491 "punpckhbw %%mm1, %%mm1 \n\t"
492 "punpckhwd %%mm1, %%mm1 \n\t"
493 "punpckhdq %%mm1, %%mm1 \n\t"
494 "movq %%mm1, (%0, %2) \n\t"
499 : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
504 "movd (%0), %%mm0 \n\t"
505 "punpcklbw %%mm0, %%mm0 \n\t"
506 "punpcklwd %%mm0, %%mm0 \n\t"
507 "punpckldq %%mm0, %%mm0 \n\t"
508 "movq %%mm0, -8(%0) \n\t"
509 "movq %%mm0, -16(%0) \n\t"
510 "movq -8(%0, %2), %%mm1 \n\t"
511 "punpckhbw %%mm1, %%mm1 \n\t"
512 "punpckhwd %%mm1, %%mm1 \n\t"
513 "punpckhdq %%mm1, %%mm1 \n\t"
514 "movq %%mm1, (%0, %2) \n\t"
515 "movq %%mm1, 8(%0, %2) \n\t"
520 : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
524 /* top and bottom (and hopefully also the corners) */
525 if (sides & EDGE_TOP) {
526 for (i = 0; i < h; i += 4) {
527 ptr = buf - (i + 1) * wrap - w;
530 "movq (%1, %0), %%mm0 \n\t"
531 "movq %%mm0, (%0) \n\t"
532 "movq %%mm0, (%0, %2) \n\t"
533 "movq %%mm0, (%0, %2, 2) \n\t"
534 "movq %%mm0, (%0, %3) \n\t"
539 : "r"((x86_reg)buf - (x86_reg)ptr - w), "r"((x86_reg) -wrap),
540 "r"((x86_reg) -wrap * 3), "r"(ptr + width + 2 * w)
545 if (sides & EDGE_BOTTOM) {
546 for (i = 0; i < h; i += 4) {
547 ptr = last_line + (i + 1) * wrap - w;
550 "movq (%1, %0), %%mm0 \n\t"
551 "movq %%mm0, (%0) \n\t"
552 "movq %%mm0, (%0, %2) \n\t"
553 "movq %%mm0, (%0, %2, 2) \n\t"
554 "movq %%mm0, (%0, %3) \n\t"
559 : "r"((x86_reg)last_line - (x86_reg)ptr - w),
560 "r"((x86_reg)wrap), "r"((x86_reg)wrap * 3),
561 "r"(ptr + width + 2 * w)
566 #endif /* HAVE_INLINE_ASM */
570 #define QPEL_OP(OPNAME, ROUNDER, RND, MMX) \
571 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, \
574 ff_ ## OPNAME ## pixels8_ ## MMX(dst, src, stride, 8); \
577 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, \
581 uint8_t * const half = (uint8_t*)temp; \
582 ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, \
584 ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src, half, \
585 stride, stride, 8); \
588 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, \
591 ff_ ## OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, \
595 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, \
599 uint8_t * const half = (uint8_t*)temp; \
600 ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, \
602 ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src + 1, half, stride, \
606 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, \
610 uint8_t * const half = (uint8_t*)temp; \
611 ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, \
613 ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src, half, \
614 stride, stride, 8); \
617 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, \
620 ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, \
624 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, \
628 uint8_t * const half = (uint8_t*)temp; \
629 ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, \
631 ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src + stride, half, stride,\
635 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, \
638 uint64_t half[8 + 9]; \
639 uint8_t * const halfH = ((uint8_t*)half) + 64; \
640 uint8_t * const halfHV = ((uint8_t*)half); \
641 ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
643 ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, \
645 ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
646 ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, \
650 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, \
653 uint64_t half[8 + 9]; \
654 uint8_t * const halfH = ((uint8_t*)half) + 64; \
655 uint8_t * const halfHV = ((uint8_t*)half); \
656 ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
658 ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \
660 ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
661 ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, \
665 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, \
668 uint64_t half[8 + 9]; \
669 uint8_t * const halfH = ((uint8_t*)half) + 64; \
670 uint8_t * const halfHV = ((uint8_t*)half); \
671 ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
673 ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, \
675 ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
676 ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, \
680 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, \
683 uint64_t half[8 + 9]; \
684 uint8_t * const halfH = ((uint8_t*)half) + 64; \
685 uint8_t * const halfHV = ((uint8_t*)half); \
686 ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
688 ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \
690 ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
691 ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, \
695 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, \
698 uint64_t half[8 + 9]; \
699 uint8_t * const halfH = ((uint8_t*)half) + 64; \
700 uint8_t * const halfHV = ((uint8_t*)half); \
701 ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
703 ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
704 ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, \
708 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, \
711 uint64_t half[8 + 9]; \
712 uint8_t * const halfH = ((uint8_t*)half) + 64; \
713 uint8_t * const halfHV = ((uint8_t*)half); \
714 ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
716 ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
717 ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, \
721 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, \
724 uint64_t half[8 + 9]; \
725 uint8_t * const halfH = ((uint8_t*)half); \
726 ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
728 ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, \
730 ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, \
734 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, \
737 uint64_t half[8 + 9]; \
738 uint8_t * const halfH = ((uint8_t*)half); \
739 ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
741 ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \
743 ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, \
747 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, \
751 uint8_t * const halfH = ((uint8_t*)half); \
752 ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
754 ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, \
758 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, \
761 ff_ ## OPNAME ## pixels16_ ## MMX(dst, src, stride, 16); \
764 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, \
768 uint8_t * const half = (uint8_t*)temp; \
769 ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, \
771 ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, \
775 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, \
778 ff_ ## OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, \
779 stride, stride, 16);\
782 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, \
786 uint8_t * const half = (uint8_t*)temp; \
787 ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, \
789 ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src + 1, half, \
790 stride, stride, 16); \
793 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, \
797 uint8_t * const half = (uint8_t*)temp; \
798 ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, \
800 ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, \
804 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, \
807 ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, \
811 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, \
815 uint8_t * const half = (uint8_t*)temp; \
816 ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, \
818 ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, \
819 stride, stride, 16); \
822 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, \
825 uint64_t half[16 * 2 + 17 * 2]; \
826 uint8_t * const halfH = ((uint8_t*)half) + 256; \
827 uint8_t * const halfHV = ((uint8_t*)half); \
828 ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
830 ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \
832 ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
834 ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, \
838 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, \
841 uint64_t half[16 * 2 + 17 * 2]; \
842 uint8_t * const halfH = ((uint8_t*)half) + 256; \
843 uint8_t * const halfHV = ((uint8_t*)half); \
844 ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
846 ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \
848 ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
850 ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, \
854 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, \
857 uint64_t half[16 * 2 + 17 * 2]; \
858 uint8_t * const halfH = ((uint8_t*)half) + 256; \
859 uint8_t * const halfHV = ((uint8_t*)half); \
860 ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
862 ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \
864 ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
866 ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, \
870 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, \
873 uint64_t half[16 * 2 + 17 * 2]; \
874 uint8_t * const halfH = ((uint8_t*)half) + 256; \
875 uint8_t * const halfHV = ((uint8_t*)half); \
876 ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
878 ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \
880 ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
882 ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, \
886 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, \
889 uint64_t half[16 * 2 + 17 * 2]; \
890 uint8_t * const halfH = ((uint8_t*)half) + 256; \
891 uint8_t * const halfHV = ((uint8_t*)half); \
892 ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
894 ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
896 ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, \
900 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, \
903 uint64_t half[16 * 2 + 17 * 2]; \
904 uint8_t * const halfH = ((uint8_t*)half) + 256; \
905 uint8_t * const halfHV = ((uint8_t*)half); \
906 ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
908 ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
910 ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, \
914 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, \
917 uint64_t half[17 * 2]; \
918 uint8_t * const halfH = ((uint8_t*)half); \
919 ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
921 ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \
923 ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, \
927 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, \
930 uint64_t half[17 * 2]; \
931 uint8_t * const halfH = ((uint8_t*)half); \
932 ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
934 ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \
936 ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, \
940 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, \
943 uint64_t half[17 * 2]; \
944 uint8_t * const halfH = ((uint8_t*)half); \
945 ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
947 ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, \
951 QPEL_OP(put_, ff_pw_16, _, mmxext)
952 QPEL_OP(avg_, ff_pw_16, _, mmxext)
953 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, mmxext)
954 #endif /* HAVE_YASM */
958 void ff_put_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
960 put_pixels8_xy2_mmx(dst, src, stride, 8);
962 void ff_put_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
964 put_pixels16_xy2_mmx(dst, src, stride, 16);
966 void ff_avg_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
968 avg_pixels8_xy2_mmx(dst, src, stride, 8);
970 void ff_avg_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
972 avg_pixels16_xy2_mmx(dst, src, stride, 16);
975 static void gmc_mmx(uint8_t *dst, uint8_t *src,
976 int stride, int h, int ox, int oy,
977 int dxx, int dxy, int dyx, int dyy,
978 int shift, int r, int width, int height)
981 const int ix = ox >> (16 + shift);
982 const int iy = oy >> (16 + shift);
983 const int oxs = ox >> 4;
984 const int oys = oy >> 4;
985 const int dxxs = dxx >> 4;
986 const int dxys = dxy >> 4;
987 const int dyxs = dyx >> 4;
988 const int dyys = dyy >> 4;
989 const uint16_t r4[4] = { r, r, r, r };
990 const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys };
991 const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys };
992 const uint64_t shift2 = 2 * shift;
995 const int dxw = (dxx - (1 << (16 + shift))) * (w - 1);
996 const int dyh = (dyy - (1 << (16 + shift))) * (h - 1);
997 const int dxh = dxy * (h - 1);
998 const int dyw = dyx * (w - 1);
999 if ( // non-constant fullpel offset (3% of blocks)
1000 ((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) |
1001 (oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift)
1002 // uses more than 16 bits of subpel mv (only at huge resolution)
1003 || (dxx | dxy | dyx | dyy) & 15 ||
1004 (unsigned)ix >= width - w ||
1005 (unsigned)iy >= height - h) {
1006 // FIXME could still use mmx for some of the rows
1007 ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy,
1008 shift, r, width, height);
1012 src += ix + iy * stride;
1015 "movd %0, %%mm6 \n\t"
1016 "pxor %%mm7, %%mm7 \n\t"
1017 "punpcklwd %%mm6, %%mm6 \n\t"
1018 "punpcklwd %%mm6, %%mm6 \n\t"
1022 for (x = 0; x < w; x += 4) {
1023 uint16_t dx4[4] = { oxs - dxys + dxxs * (x + 0),
1024 oxs - dxys + dxxs * (x + 1),
1025 oxs - dxys + dxxs * (x + 2),
1026 oxs - dxys + dxxs * (x + 3) };
1027 uint16_t dy4[4] = { oys - dyys + dyxs * (x + 0),
1028 oys - dyys + dyxs * (x + 1),
1029 oys - dyys + dyxs * (x + 2),
1030 oys - dyys + dyxs * (x + 3) };
1032 for (y = 0; y < h; y++) {
1034 "movq %0, %%mm4 \n\t"
1035 "movq %1, %%mm5 \n\t"
1036 "paddw %2, %%mm4 \n\t"
1037 "paddw %3, %%mm5 \n\t"
1038 "movq %%mm4, %0 \n\t"
1039 "movq %%mm5, %1 \n\t"
1040 "psrlw $12, %%mm4 \n\t"
1041 "psrlw $12, %%mm5 \n\t"
1042 : "+m"(*dx4), "+m"(*dy4)
1043 : "m"(*dxy4), "m"(*dyy4)
1047 "movq %%mm6, %%mm2 \n\t"
1048 "movq %%mm6, %%mm1 \n\t"
1049 "psubw %%mm4, %%mm2 \n\t"
1050 "psubw %%mm5, %%mm1 \n\t"
1051 "movq %%mm2, %%mm0 \n\t"
1052 "movq %%mm4, %%mm3 \n\t"
1053 "pmullw %%mm1, %%mm0 \n\t" // (s - dx) * (s - dy)
1054 "pmullw %%mm5, %%mm3 \n\t" // dx * dy
1055 "pmullw %%mm5, %%mm2 \n\t" // (s - dx) * dy
1056 "pmullw %%mm4, %%mm1 \n\t" // dx * (s - dy)
1058 "movd %4, %%mm5 \n\t"
1059 "movd %3, %%mm4 \n\t"
1060 "punpcklbw %%mm7, %%mm5 \n\t"
1061 "punpcklbw %%mm7, %%mm4 \n\t"
1062 "pmullw %%mm5, %%mm3 \n\t" // src[1, 1] * dx * dy
1063 "pmullw %%mm4, %%mm2 \n\t" // src[0, 1] * (s - dx) * dy
1065 "movd %2, %%mm5 \n\t"
1066 "movd %1, %%mm4 \n\t"
1067 "punpcklbw %%mm7, %%mm5 \n\t"
1068 "punpcklbw %%mm7, %%mm4 \n\t"
1069 "pmullw %%mm5, %%mm1 \n\t" // src[1, 0] * dx * (s - dy)
1070 "pmullw %%mm4, %%mm0 \n\t" // src[0, 0] * (s - dx) * (s - dy)
1071 "paddw %5, %%mm1 \n\t"
1072 "paddw %%mm3, %%mm2 \n\t"
1073 "paddw %%mm1, %%mm0 \n\t"
1074 "paddw %%mm2, %%mm0 \n\t"
1076 "psrlw %6, %%mm0 \n\t"
1077 "packuswb %%mm0, %%mm0 \n\t"
1078 "movd %%mm0, %0 \n\t"
1080 : "=m"(dst[x + y * stride])
1081 : "m"(src[0]), "m"(src[1]),
1082 "m"(src[stride]), "m"(src[stride + 1]),
1083 "m"(*r4), "m"(shift2)
1087 src += 4 - h * stride;
1092 void ff_put_cavs_qpel8_mc00_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1094 put_pixels8_mmx(dst, src, stride, 8);
1097 void ff_avg_cavs_qpel8_mc00_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1099 avg_pixels8_mmx(dst, src, stride, 8);
1102 void ff_put_cavs_qpel16_mc00_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1104 put_pixels16_mmx(dst, src, stride, 16);
1107 void ff_avg_cavs_qpel16_mc00_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
1109 avg_pixels16_mmx(dst, src, stride, 16);
1113 void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src,
1114 ptrdiff_t stride, int rnd)
1116 put_pixels8_mmx(dst, src, stride, 8);
1119 static void vector_clipf_sse(float *dst, const float *src,
1120 float min, float max, int len)
1122 x86_reg i = (len - 16) * 4;
1124 "movss %3, %%xmm4 \n\t"
1125 "movss %4, %%xmm5 \n\t"
1126 "shufps $0, %%xmm4, %%xmm4 \n\t"
1127 "shufps $0, %%xmm5, %%xmm5 \n\t"
1129 "movaps (%2, %0), %%xmm0 \n\t" // 3/1 on intel
1130 "movaps 16(%2, %0), %%xmm1 \n\t"
1131 "movaps 32(%2, %0), %%xmm2 \n\t"
1132 "movaps 48(%2, %0), %%xmm3 \n\t"
1133 "maxps %%xmm4, %%xmm0 \n\t"
1134 "maxps %%xmm4, %%xmm1 \n\t"
1135 "maxps %%xmm4, %%xmm2 \n\t"
1136 "maxps %%xmm4, %%xmm3 \n\t"
1137 "minps %%xmm5, %%xmm0 \n\t"
1138 "minps %%xmm5, %%xmm1 \n\t"
1139 "minps %%xmm5, %%xmm2 \n\t"
1140 "minps %%xmm5, %%xmm3 \n\t"
1141 "movaps %%xmm0, (%1, %0) \n\t"
1142 "movaps %%xmm1, 16(%1, %0) \n\t"
1143 "movaps %%xmm2, 32(%1, %0) \n\t"
1144 "movaps %%xmm3, 48(%1, %0) \n\t"
1148 : "r"(dst), "r"(src), "m"(min), "m"(max)
1153 #endif /* HAVE_INLINE_ASM */
1155 void ff_h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale);
1156 void ff_h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale);
1158 int32_t ff_scalarproduct_int16_mmxext(const int16_t *v1, const int16_t *v2,
1160 int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2,
1162 int32_t ff_scalarproduct_and_madd_int16_mmxext(int16_t *v1, const int16_t *v2,
1164 int order, int mul);
1165 int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2,
1167 int order, int mul);
1168 int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2,
1170 int order, int mul);
1172 void ff_apply_window_int16_round_mmxext(int16_t *output, const int16_t *input,
1173 const int16_t *window, unsigned int len);
1174 void ff_apply_window_int16_round_sse2(int16_t *output, const int16_t *input,
1175 const int16_t *window, unsigned int len);
1176 void ff_apply_window_int16_mmxext(int16_t *output, const int16_t *input,
1177 const int16_t *window, unsigned int len);
1178 void ff_apply_window_int16_sse2(int16_t *output, const int16_t *input,
1179 const int16_t *window, unsigned int len);
1180 void ff_apply_window_int16_ssse3(int16_t *output, const int16_t *input,
1181 const int16_t *window, unsigned int len);
1182 void ff_apply_window_int16_ssse3_atom(int16_t *output, const int16_t *input,
1183 const int16_t *window, unsigned int len);
1185 void ff_bswap32_buf_ssse3(uint32_t *dst, const uint32_t *src, int w);
1186 void ff_bswap32_buf_sse2(uint32_t *dst, const uint32_t *src, int w);
1188 void ff_add_hfyu_median_prediction_mmxext(uint8_t *dst, const uint8_t *top,
1189 const uint8_t *diff, int w,
1190 int *left, int *left_top);
1191 int ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src,
1193 int ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src,
1196 void ff_vector_clip_int32_mmx (int32_t *dst, const int32_t *src,
1197 int32_t min, int32_t max, unsigned int len);
1198 void ff_vector_clip_int32_sse2 (int32_t *dst, const int32_t *src,
1199 int32_t min, int32_t max, unsigned int len);
1200 void ff_vector_clip_int32_int_sse2(int32_t *dst, const int32_t *src,
1201 int32_t min, int32_t max, unsigned int len);
1202 void ff_vector_clip_int32_sse4 (int32_t *dst, const int32_t *src,
1203 int32_t min, int32_t max, unsigned int len);
1205 #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX) \
1207 c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \
1208 c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \
1209 c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \
1210 c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \
1211 c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \
1212 c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \
1213 c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \
1214 c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \
1215 c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \
1216 c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \
1217 c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \
1218 c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \
1219 c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \
1220 c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \
1221 c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \
1222 c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU; \
1225 static av_cold void dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx,
1229 const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1231 c->put_pixels_clamped = ff_put_pixels_clamped_mmx;
1232 c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx;
1233 c->add_pixels_clamped = ff_add_pixels_clamped_mmx;
1235 if (!high_bit_depth) {
1236 c->clear_block = clear_block_mmx;
1237 c->clear_blocks = clear_blocks_mmx;
1238 c->draw_edges = draw_edges_mmx;
1240 switch (avctx->idct_algo) {
1242 case FF_IDCT_SIMPLEMMX:
1243 c->idct_put = ff_simple_idct_put_mmx;
1244 c->idct_add = ff_simple_idct_add_mmx;
1245 c->idct = ff_simple_idct_mmx;
1246 c->idct_permutation_type = FF_SIMPLE_IDCT_PERM;
1248 case FF_IDCT_XVIDMMX:
1249 c->idct_put = ff_idct_xvid_mmx_put;
1250 c->idct_add = ff_idct_xvid_mmx_add;
1251 c->idct = ff_idct_xvid_mmx;
1258 c->add_bytes = add_bytes_mmx;
1259 #endif /* HAVE_INLINE_ASM */
1262 if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
1263 c->h263_v_loop_filter = ff_h263_v_loop_filter_mmx;
1264 c->h263_h_loop_filter = ff_h263_h_loop_filter_mmx;
1267 c->vector_clip_int32 = ff_vector_clip_int32_mmx;
1268 #endif /* HAVE_YASM */
1271 static av_cold void dsputil_init_mmxext(DSPContext *c, AVCodecContext *avctx,
1275 const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1277 if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX) {
1278 c->idct_put = ff_idct_xvid_mmxext_put;
1279 c->idct_add = ff_idct_xvid_mmxext_add;
1280 c->idct = ff_idct_xvid_mmxext;
1282 #endif /* HAVE_INLINE_ASM */
1284 #if HAVE_MMXEXT_EXTERNAL
1285 SET_QPEL_FUNCS(avg_qpel, 0, 16, mmxext, );
1286 SET_QPEL_FUNCS(avg_qpel, 1, 8, mmxext, );
1288 SET_QPEL_FUNCS(put_qpel, 0, 16, mmxext, );
1289 SET_QPEL_FUNCS(put_qpel, 1, 8, mmxext, );
1290 SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmxext, );
1291 SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmxext, );
1293 /* slower than cmov version on AMD */
1294 if (!(mm_flags & AV_CPU_FLAG_3DNOW))
1295 c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmxext;
1297 c->scalarproduct_int16 = ff_scalarproduct_int16_mmxext;
1298 c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmxext;
1300 if (avctx->flags & CODEC_FLAG_BITEXACT) {
1301 c->apply_window_int16 = ff_apply_window_int16_mmxext;
1303 c->apply_window_int16 = ff_apply_window_int16_round_mmxext;
1305 #endif /* HAVE_MMXEXT_EXTERNAL */
1308 static av_cold void dsputil_init_sse(DSPContext *c, AVCodecContext *avctx,
1312 const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1314 if (!high_bit_depth) {
1315 if (!(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)) {
1316 /* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
1317 c->clear_block = clear_block_sse;
1318 c->clear_blocks = clear_blocks_sse;
1322 c->vector_clipf = vector_clipf_sse;
1323 #endif /* HAVE_INLINE_ASM */
1326 static av_cold void dsputil_init_sse2(DSPContext *c, AVCodecContext *avctx,
1329 #if HAVE_SSE2_INLINE
1330 const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1332 if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX) {
1333 c->idct_put = ff_idct_xvid_sse2_put;
1334 c->idct_add = ff_idct_xvid_sse2_add;
1335 c->idct = ff_idct_xvid_sse2;
1336 c->idct_permutation_type = FF_SSE2_IDCT_PERM;
1338 #endif /* HAVE_SSE2_INLINE */
1340 #if HAVE_SSE2_EXTERNAL
1341 c->scalarproduct_int16 = ff_scalarproduct_int16_sse2;
1342 c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
1343 if (mm_flags & AV_CPU_FLAG_ATOM) {
1344 c->vector_clip_int32 = ff_vector_clip_int32_int_sse2;
1346 c->vector_clip_int32 = ff_vector_clip_int32_sse2;
1348 if (avctx->flags & CODEC_FLAG_BITEXACT) {
1349 c->apply_window_int16 = ff_apply_window_int16_sse2;
1350 } else if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
1351 c->apply_window_int16 = ff_apply_window_int16_round_sse2;
1353 c->bswap_buf = ff_bswap32_buf_sse2;
1354 #endif /* HAVE_SSE2_EXTERNAL */
1357 static av_cold void dsputil_init_ssse3(DSPContext *c, AVCodecContext *avctx,
1360 #if HAVE_SSSE3_EXTERNAL
1361 c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
1362 if (mm_flags & AV_CPU_FLAG_SSE4) // not really sse4, just slow on Conroe
1363 c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
1365 if (mm_flags & AV_CPU_FLAG_ATOM)
1366 c->apply_window_int16 = ff_apply_window_int16_ssse3_atom;
1368 c->apply_window_int16 = ff_apply_window_int16_ssse3;
1369 if (!(mm_flags & (AV_CPU_FLAG_SSE42|AV_CPU_FLAG_3DNOW))) // cachesplit
1370 c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
1371 c->bswap_buf = ff_bswap32_buf_ssse3;
1372 #endif /* HAVE_SSSE3_EXTERNAL */
1375 static av_cold void dsputil_init_sse4(DSPContext *c, AVCodecContext *avctx,
1378 #if HAVE_SSE4_EXTERNAL
1379 c->vector_clip_int32 = ff_vector_clip_int32_sse4;
1380 #endif /* HAVE_SSE4_EXTERNAL */
1383 av_cold void ff_dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx)
1385 int mm_flags = av_get_cpu_flags();
1387 #if HAVE_7REGS && HAVE_INLINE_ASM
1388 if (mm_flags & AV_CPU_FLAG_CMOV)
1389 c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
1392 if (mm_flags & AV_CPU_FLAG_MMX)
1393 dsputil_init_mmx(c, avctx, mm_flags);
1395 if (mm_flags & AV_CPU_FLAG_MMXEXT)
1396 dsputil_init_mmxext(c, avctx, mm_flags);
1398 if (mm_flags & AV_CPU_FLAG_SSE)
1399 dsputil_init_sse(c, avctx, mm_flags);
1401 if (mm_flags & AV_CPU_FLAG_SSE2)
1402 dsputil_init_sse2(c, avctx, mm_flags);
1404 if (mm_flags & AV_CPU_FLAG_SSSE3)
1405 dsputil_init_ssse3(c, avctx, mm_flags);
1407 if (mm_flags & AV_CPU_FLAG_SSE4)
1408 dsputil_init_sse4(c, avctx, mm_flags);
1410 if (CONFIG_ENCODERS)
1411 ff_dsputilenc_init_mmx(c, avctx);