2 * MMX optimized DSP utils
3 * Copyright (c) 2000, 2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "libavutil/cpu.h"
26 #include "libavutil/x86/asm.h"
27 #include "libavcodec/dsputil.h"
28 #include "libavcodec/h264dsp.h"
29 #include "libavcodec/mpegvideo.h"
30 #include "libavcodec/simple_idct.h"
31 #include "dsputil_mmx.h"
32 #include "idct_xvid.h"
37 /* pixel operations */
38 DECLARE_ALIGNED(8, const uint64_t, ff_bone) = 0x0101010101010101ULL;
39 DECLARE_ALIGNED(8, const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
41 DECLARE_ALIGNED(16, const uint64_t, ff_pdw_80000000)[2] =
42 { 0x8000000080000000ULL, 0x8000000080000000ULL };
44 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_1) = { 0x0001000100010001ULL, 0x0001000100010001ULL };
45 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_2) = { 0x0002000200020002ULL, 0x0002000200020002ULL };
46 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_3) = { 0x0003000300030003ULL, 0x0003000300030003ULL };
47 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_4) = { 0x0004000400040004ULL, 0x0004000400040004ULL };
48 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_5) = { 0x0005000500050005ULL, 0x0005000500050005ULL };
49 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_8) = { 0x0008000800080008ULL, 0x0008000800080008ULL };
50 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_9) = { 0x0009000900090009ULL, 0x0009000900090009ULL };
51 DECLARE_ALIGNED(8, const uint64_t, ff_pw_15) = 0x000F000F000F000FULL;
52 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_16) = { 0x0010001000100010ULL, 0x0010001000100010ULL };
53 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_17) = { 0x0011001100110011ULL, 0x0011001100110011ULL };
54 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_18) = { 0x0012001200120012ULL, 0x0012001200120012ULL };
55 DECLARE_ALIGNED(8, const uint64_t, ff_pw_20) = 0x0014001400140014ULL;
56 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_27) = { 0x001B001B001B001BULL, 0x001B001B001B001BULL };
57 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_28) = { 0x001C001C001C001CULL, 0x001C001C001C001CULL };
58 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_32) = { 0x0020002000200020ULL, 0x0020002000200020ULL };
59 DECLARE_ALIGNED(8, const uint64_t, ff_pw_42) = 0x002A002A002A002AULL;
60 DECLARE_ALIGNED(8, const uint64_t, ff_pw_53) = 0x0035003500350035ULL;
61 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_63) = { 0x003F003F003F003FULL, 0x003F003F003F003FULL };
62 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_64) = { 0x0040004000400040ULL, 0x0040004000400040ULL };
63 DECLARE_ALIGNED(8, const uint64_t, ff_pw_96) = 0x0060006000600060ULL;
64 DECLARE_ALIGNED(8, const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
65 DECLARE_ALIGNED(8, const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
66 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_512) = { 0x0200020002000200ULL, 0x0200020002000200ULL };
67 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_1019) = { 0x03FB03FB03FB03FBULL, 0x03FB03FB03FB03FBULL };
69 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_0) = { 0x0000000000000000ULL, 0x0000000000000000ULL };
70 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_1) = { 0x0101010101010101ULL, 0x0101010101010101ULL };
71 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_3) = { 0x0303030303030303ULL, 0x0303030303030303ULL };
72 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_4) = { 0x0404040404040404ULL, 0x0404040404040404ULL };
73 DECLARE_ALIGNED(8, const uint64_t, ff_pb_7) = 0x0707070707070707ULL;
74 DECLARE_ALIGNED(8, const uint64_t, ff_pb_1F) = 0x1F1F1F1F1F1F1F1FULL;
75 DECLARE_ALIGNED(8, const uint64_t, ff_pb_3F) = 0x3F3F3F3F3F3F3F3FULL;
76 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_80) = { 0x8080808080808080ULL, 0x8080808080808080ULL };
77 DECLARE_ALIGNED(8, const uint64_t, ff_pb_81) = 0x8181818181818181ULL;
78 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_A1) = { 0xA1A1A1A1A1A1A1A1ULL, 0xA1A1A1A1A1A1A1A1ULL };
79 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_F8) = { 0xF8F8F8F8F8F8F8F8ULL, 0xF8F8F8F8F8F8F8F8ULL };
80 DECLARE_ALIGNED(8, const uint64_t, ff_pb_FC) = 0xFCFCFCFCFCFCFCFCULL;
81 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_FE) = { 0xFEFEFEFEFEFEFEFEULL, 0xFEFEFEFEFEFEFEFEULL };
83 DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 };
84 DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 };
88 #define JUMPALIGN() __asm__ volatile (".p2align 3"::)
89 #define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%"#regd", %%"#regd ::)
91 #define MOVQ_BFE(regd) \
93 "pcmpeqd %%"#regd", %%"#regd" \n\t" \
94 "paddb %%"#regd", %%"#regd" \n\t" ::)
97 #define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%"#regd" \n\t" :: "m"(ff_bone))
98 #define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%"#regd" \n\t" :: "m"(ff_wtwo))
100 // for shared library it's better to use this way for accessing constants
102 #define MOVQ_BONE(regd) \
104 "pcmpeqd %%"#regd", %%"#regd" \n\t" \
105 "psrlw $15, %%"#regd" \n\t" \
106 "packuswb %%"#regd", %%"#regd" \n\t" ::)
108 #define MOVQ_WTWO(regd) \
110 "pcmpeqd %%"#regd", %%"#regd" \n\t" \
111 "psrlw $15, %%"#regd" \n\t" \
112 "psllw $1, %%"#regd" \n\t"::)
116 // using regr as temporary and for the output result
117 // first argument is unmodifed and second is trashed
118 // regfe is supposed to contain 0xfefefefefefefefe
119 #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
120 "movq "#rega", "#regr" \n\t" \
121 "pand "#regb", "#regr" \n\t" \
122 "pxor "#rega", "#regb" \n\t" \
123 "pand "#regfe", "#regb" \n\t" \
124 "psrlq $1, "#regb" \n\t" \
125 "paddb "#regb", "#regr" \n\t"
127 #define PAVGB_MMX(rega, regb, regr, regfe) \
128 "movq "#rega", "#regr" \n\t" \
129 "por "#regb", "#regr" \n\t" \
130 "pxor "#rega", "#regb" \n\t" \
131 "pand "#regfe", "#regb" \n\t" \
132 "psrlq $1, "#regb" \n\t" \
133 "psubb "#regb", "#regr" \n\t"
135 // mm6 is supposed to contain 0xfefefefefefefefe
136 #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
137 "movq "#rega", "#regr" \n\t" \
138 "movq "#regc", "#regp" \n\t" \
139 "pand "#regb", "#regr" \n\t" \
140 "pand "#regd", "#regp" \n\t" \
141 "pxor "#rega", "#regb" \n\t" \
142 "pxor "#regc", "#regd" \n\t" \
143 "pand %%mm6, "#regb" \n\t" \
144 "pand %%mm6, "#regd" \n\t" \
145 "psrlq $1, "#regb" \n\t" \
146 "psrlq $1, "#regd" \n\t" \
147 "paddb "#regb", "#regr" \n\t" \
148 "paddb "#regd", "#regp" \n\t"
150 #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
151 "movq "#rega", "#regr" \n\t" \
152 "movq "#regc", "#regp" \n\t" \
153 "por "#regb", "#regr" \n\t" \
154 "por "#regd", "#regp" \n\t" \
155 "pxor "#rega", "#regb" \n\t" \
156 "pxor "#regc", "#regd" \n\t" \
157 "pand %%mm6, "#regb" \n\t" \
158 "pand %%mm6, "#regd" \n\t" \
159 "psrlq $1, "#regd" \n\t" \
160 "psrlq $1, "#regb" \n\t" \
161 "psubb "#regb", "#regr" \n\t" \
162 "psubb "#regd", "#regp" \n\t"
164 /***********************************/
165 /* MMX no rounding */
166 #define DEF(x, y) x ## _no_rnd_ ## y ## _mmx
167 #define SET_RND MOVQ_WONE
168 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
169 #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
170 #define OP_AVG(a, b, c, e) PAVGB_MMX(a, b, c, e)
172 #include "dsputil_rnd_template.c"
178 /***********************************/
181 #define DEF(x, y) x ## _ ## y ## _mmx
182 #define SET_RND MOVQ_WTWO
183 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
184 #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
186 #include "dsputil_rnd_template.c"
194 /***********************************/
197 #define DEF(x) x ## _3dnow
198 #define PAVGB "pavgusb"
200 #define SKIP_FOR_3DNOW
202 #include "dsputil_avg_template.c"
207 #undef SKIP_FOR_3DNOW
209 /***********************************/
210 /* MMXEXT specific */
212 #define DEF(x) x ## _mmxext
214 /* Introduced only in MMXEXT set */
215 #define PAVGB "pavgb"
218 #include "dsputil_avg_template.c"
224 #define put_no_rnd_pixels16_mmx put_pixels16_mmx
225 #define put_no_rnd_pixels8_mmx put_pixels8_mmx
226 #define put_pixels16_mmxext put_pixels16_mmx
227 #define put_pixels8_mmxext put_pixels8_mmx
228 #define put_pixels4_mmxext put_pixels4_mmx
229 #define put_no_rnd_pixels16_mmxext put_no_rnd_pixels16_mmx
230 #define put_no_rnd_pixels8_mmxext put_no_rnd_pixels8_mmx
232 /***********************************/
235 void ff_put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels,
241 /* read the pixels */
246 "movq (%3), %%mm0 \n\t"
247 "movq 8(%3), %%mm1 \n\t"
248 "movq 16(%3), %%mm2 \n\t"
249 "movq 24(%3), %%mm3 \n\t"
250 "movq 32(%3), %%mm4 \n\t"
251 "movq 40(%3), %%mm5 \n\t"
252 "movq 48(%3), %%mm6 \n\t"
253 "movq 56(%3), %%mm7 \n\t"
254 "packuswb %%mm1, %%mm0 \n\t"
255 "packuswb %%mm3, %%mm2 \n\t"
256 "packuswb %%mm5, %%mm4 \n\t"
257 "packuswb %%mm7, %%mm6 \n\t"
258 "movq %%mm0, (%0) \n\t"
259 "movq %%mm2, (%0, %1) \n\t"
260 "movq %%mm4, (%0, %1, 2) \n\t"
261 "movq %%mm6, (%0, %2) \n\t"
262 :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3),
265 pix += line_size * 4;
268 // if here would be an exact copy of the code above
269 // compiler would generate some very strange code
272 "movq (%3), %%mm0 \n\t"
273 "movq 8(%3), %%mm1 \n\t"
274 "movq 16(%3), %%mm2 \n\t"
275 "movq 24(%3), %%mm3 \n\t"
276 "movq 32(%3), %%mm4 \n\t"
277 "movq 40(%3), %%mm5 \n\t"
278 "movq 48(%3), %%mm6 \n\t"
279 "movq 56(%3), %%mm7 \n\t"
280 "packuswb %%mm1, %%mm0 \n\t"
281 "packuswb %%mm3, %%mm2 \n\t"
282 "packuswb %%mm5, %%mm4 \n\t"
283 "packuswb %%mm7, %%mm6 \n\t"
284 "movq %%mm0, (%0) \n\t"
285 "movq %%mm2, (%0, %1) \n\t"
286 "movq %%mm4, (%0, %1, 2) \n\t"
287 "movq %%mm6, (%0, %2) \n\t"
288 :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3), "r"(p)
292 #define put_signed_pixels_clamped_mmx_half(off) \
293 "movq "#off"(%2), %%mm1 \n\t" \
294 "movq 16 + "#off"(%2), %%mm2 \n\t" \
295 "movq 32 + "#off"(%2), %%mm3 \n\t" \
296 "movq 48 + "#off"(%2), %%mm4 \n\t" \
297 "packsswb 8 + "#off"(%2), %%mm1 \n\t" \
298 "packsswb 24 + "#off"(%2), %%mm2 \n\t" \
299 "packsswb 40 + "#off"(%2), %%mm3 \n\t" \
300 "packsswb 56 + "#off"(%2), %%mm4 \n\t" \
301 "paddb %%mm0, %%mm1 \n\t" \
302 "paddb %%mm0, %%mm2 \n\t" \
303 "paddb %%mm0, %%mm3 \n\t" \
304 "paddb %%mm0, %%mm4 \n\t" \
305 "movq %%mm1, (%0) \n\t" \
306 "movq %%mm2, (%0, %3) \n\t" \
307 "movq %%mm3, (%0, %3, 2) \n\t" \
308 "movq %%mm4, (%0, %1) \n\t"
310 void ff_put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels,
313 x86_reg line_skip = line_size;
317 "movq "MANGLE(ff_pb_80)", %%mm0 \n\t"
318 "lea (%3, %3, 2), %1 \n\t"
319 put_signed_pixels_clamped_mmx_half(0)
320 "lea (%0, %3, 4), %0 \n\t"
321 put_signed_pixels_clamped_mmx_half(64)
322 : "+&r"(pixels), "=&r"(line_skip3)
323 : "r"(block), "r"(line_skip)
327 void ff_add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels,
334 /* read the pixels */
341 "movq (%2), %%mm0 \n\t"
342 "movq 8(%2), %%mm1 \n\t"
343 "movq 16(%2), %%mm2 \n\t"
344 "movq 24(%2), %%mm3 \n\t"
345 "movq %0, %%mm4 \n\t"
346 "movq %1, %%mm6 \n\t"
347 "movq %%mm4, %%mm5 \n\t"
348 "punpcklbw %%mm7, %%mm4 \n\t"
349 "punpckhbw %%mm7, %%mm5 \n\t"
350 "paddsw %%mm4, %%mm0 \n\t"
351 "paddsw %%mm5, %%mm1 \n\t"
352 "movq %%mm6, %%mm5 \n\t"
353 "punpcklbw %%mm7, %%mm6 \n\t"
354 "punpckhbw %%mm7, %%mm5 \n\t"
355 "paddsw %%mm6, %%mm2 \n\t"
356 "paddsw %%mm5, %%mm3 \n\t"
357 "packuswb %%mm1, %%mm0 \n\t"
358 "packuswb %%mm3, %%mm2 \n\t"
359 "movq %%mm0, %0 \n\t"
360 "movq %%mm2, %1 \n\t"
361 : "+m"(*pix), "+m"(*(pix + line_size))
364 pix += line_size * 2;
369 static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels,
370 int line_size, int h)
373 "lea (%3, %3), %%"REG_a" \n\t"
376 "movq (%1 ), %%mm0 \n\t"
377 "movq (%1, %3), %%mm1 \n\t"
378 "movq %%mm0, (%2) \n\t"
379 "movq %%mm1, (%2, %3) \n\t"
380 "add %%"REG_a", %1 \n\t"
381 "add %%"REG_a", %2 \n\t"
382 "movq (%1 ), %%mm0 \n\t"
383 "movq (%1, %3), %%mm1 \n\t"
384 "movq %%mm0, (%2) \n\t"
385 "movq %%mm1, (%2, %3) \n\t"
386 "add %%"REG_a", %1 \n\t"
387 "add %%"REG_a", %2 \n\t"
390 : "+g"(h), "+r"(pixels), "+r"(block)
391 : "r"((x86_reg)line_size)
396 static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels,
397 int line_size, int h)
400 "lea (%3, %3), %%"REG_a" \n\t"
403 "movq (%1 ), %%mm0 \n\t"
404 "movq 8(%1 ), %%mm4 \n\t"
405 "movq (%1, %3), %%mm1 \n\t"
406 "movq 8(%1, %3), %%mm5 \n\t"
407 "movq %%mm0, (%2) \n\t"
408 "movq %%mm4, 8(%2) \n\t"
409 "movq %%mm1, (%2, %3) \n\t"
410 "movq %%mm5, 8(%2, %3) \n\t"
411 "add %%"REG_a", %1 \n\t"
412 "add %%"REG_a", %2 \n\t"
413 "movq (%1 ), %%mm0 \n\t"
414 "movq 8(%1 ), %%mm4 \n\t"
415 "movq (%1, %3), %%mm1 \n\t"
416 "movq 8(%1, %3), %%mm5 \n\t"
417 "movq %%mm0, (%2) \n\t"
418 "movq %%mm4, 8(%2) \n\t"
419 "movq %%mm1, (%2, %3) \n\t"
420 "movq %%mm5, 8(%2, %3) \n\t"
421 "add %%"REG_a", %1 \n\t"
422 "add %%"REG_a", %2 \n\t"
425 : "+g"(h), "+r"(pixels), "+r"(block)
426 : "r"((x86_reg)line_size)
431 #define CLEAR_BLOCKS(name, n) \
432 static void name(DCTELEM *blocks) \
435 "pxor %%mm7, %%mm7 \n\t" \
436 "mov %1, %%"REG_a" \n\t" \
438 "movq %%mm7, (%0, %%"REG_a") \n\t" \
439 "movq %%mm7, 8(%0, %%"REG_a") \n\t" \
440 "movq %%mm7, 16(%0, %%"REG_a") \n\t" \
441 "movq %%mm7, 24(%0, %%"REG_a") \n\t" \
442 "add $32, %%"REG_a" \n\t" \
444 :: "r"(((uint8_t *)blocks) + 128 * n), \
449 CLEAR_BLOCKS(clear_blocks_mmx, 6)
450 CLEAR_BLOCKS(clear_block_mmx, 1)
452 static void clear_block_sse(DCTELEM *block)
455 "xorps %%xmm0, %%xmm0 \n"
456 "movaps %%xmm0, (%0) \n"
457 "movaps %%xmm0, 16(%0) \n"
458 "movaps %%xmm0, 32(%0) \n"
459 "movaps %%xmm0, 48(%0) \n"
460 "movaps %%xmm0, 64(%0) \n"
461 "movaps %%xmm0, 80(%0) \n"
462 "movaps %%xmm0, 96(%0) \n"
463 "movaps %%xmm0, 112(%0) \n"
469 static void clear_blocks_sse(DCTELEM *blocks)
472 "xorps %%xmm0, %%xmm0 \n"
473 "mov %1, %%"REG_a" \n"
475 "movaps %%xmm0, (%0, %%"REG_a") \n"
476 "movaps %%xmm0, 16(%0, %%"REG_a") \n"
477 "movaps %%xmm0, 32(%0, %%"REG_a") \n"
478 "movaps %%xmm0, 48(%0, %%"REG_a") \n"
479 "movaps %%xmm0, 64(%0, %%"REG_a") \n"
480 "movaps %%xmm0, 80(%0, %%"REG_a") \n"
481 "movaps %%xmm0, 96(%0, %%"REG_a") \n"
482 "movaps %%xmm0, 112(%0, %%"REG_a") \n"
483 "add $128, %%"REG_a" \n"
485 :: "r"(((uint8_t *)blocks) + 128 * 6),
491 static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w)
497 "movq (%1, %0), %%mm0 \n\t"
498 "movq (%2, %0), %%mm1 \n\t"
499 "paddb %%mm0, %%mm1 \n\t"
500 "movq %%mm1, (%2, %0) \n\t"
501 "movq 8(%1, %0), %%mm0 \n\t"
502 "movq 8(%2, %0), %%mm1 \n\t"
503 "paddb %%mm0, %%mm1 \n\t"
504 "movq %%mm1, 8(%2, %0) \n\t"
510 : "r"(src), "r"(dst), "r"((x86_reg)w - 15)
513 dst[i + 0] += src[i + 0];
517 static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top,
518 const uint8_t *diff, int w,
519 int *left, int *left_top)
523 int l = *left & 0xff;
524 int tl = *left_top & 0xff;
529 "movzbl (%3, %4), %2 \n"
542 "add (%6, %4), %b0 \n"
543 "mov %b0, (%5, %4) \n"
546 : "+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
547 : "r"(dst + w), "r"(diff + w), "rm"(top + w)
554 static inline void transpose4x4(uint8_t *dst, uint8_t *src, x86_reg dst_stride, x86_reg src_stride){
555 __asm__ volatile( //FIXME could save 1 instruction if done as 8x4 ...
556 "movd (%1), %%mm0 \n\t"
558 "movd (%1), %%mm1 \n\t"
559 "movd (%1,%3,1), %%mm2 \n\t"
560 "movd (%1,%3,2), %%mm3 \n\t"
561 "punpcklbw %%mm1, %%mm0 \n\t"
562 "punpcklbw %%mm3, %%mm2 \n\t"
563 "movq %%mm0, %%mm1 \n\t"
564 "punpcklwd %%mm2, %%mm0 \n\t"
565 "punpckhwd %%mm2, %%mm1 \n\t"
566 "movd %%mm0, (%0) \n\t"
568 "punpckhdq %%mm0, %%mm0 \n\t"
569 "movd %%mm0, (%0) \n\t"
570 "movd %%mm1, (%0,%2,1) \n\t"
571 "punpckhdq %%mm1, %%mm1 \n\t"
572 "movd %%mm1, (%0,%2,2) \n\t"
582 #define H263_LOOP_FILTER \
583 "pxor %%mm7, %%mm7 \n\t" \
584 "movq %0, %%mm0 \n\t" \
585 "movq %0, %%mm1 \n\t" \
586 "movq %3, %%mm2 \n\t" \
587 "movq %3, %%mm3 \n\t" \
588 "punpcklbw %%mm7, %%mm0 \n\t" \
589 "punpckhbw %%mm7, %%mm1 \n\t" \
590 "punpcklbw %%mm7, %%mm2 \n\t" \
591 "punpckhbw %%mm7, %%mm3 \n\t" \
592 "psubw %%mm2, %%mm0 \n\t" \
593 "psubw %%mm3, %%mm1 \n\t" \
594 "movq %1, %%mm2 \n\t" \
595 "movq %1, %%mm3 \n\t" \
596 "movq %2, %%mm4 \n\t" \
597 "movq %2, %%mm5 \n\t" \
598 "punpcklbw %%mm7, %%mm2 \n\t" \
599 "punpckhbw %%mm7, %%mm3 \n\t" \
600 "punpcklbw %%mm7, %%mm4 \n\t" \
601 "punpckhbw %%mm7, %%mm5 \n\t" \
602 "psubw %%mm2, %%mm4 \n\t" \
603 "psubw %%mm3, %%mm5 \n\t" \
604 "psllw $2, %%mm4 \n\t" \
605 "psllw $2, %%mm5 \n\t" \
606 "paddw %%mm0, %%mm4 \n\t" \
607 "paddw %%mm1, %%mm5 \n\t" \
608 "pxor %%mm6, %%mm6 \n\t" \
609 "pcmpgtw %%mm4, %%mm6 \n\t" \
610 "pcmpgtw %%mm5, %%mm7 \n\t" \
611 "pxor %%mm6, %%mm4 \n\t" \
612 "pxor %%mm7, %%mm5 \n\t" \
613 "psubw %%mm6, %%mm4 \n\t" \
614 "psubw %%mm7, %%mm5 \n\t" \
615 "psrlw $3, %%mm4 \n\t" \
616 "psrlw $3, %%mm5 \n\t" \
617 "packuswb %%mm5, %%mm4 \n\t" \
618 "packsswb %%mm7, %%mm6 \n\t" \
619 "pxor %%mm7, %%mm7 \n\t" \
620 "movd %4, %%mm2 \n\t" \
621 "punpcklbw %%mm2, %%mm2 \n\t" \
622 "punpcklbw %%mm2, %%mm2 \n\t" \
623 "punpcklbw %%mm2, %%mm2 \n\t" \
624 "psubusb %%mm4, %%mm2 \n\t" \
625 "movq %%mm2, %%mm3 \n\t" \
626 "psubusb %%mm4, %%mm3 \n\t" \
627 "psubb %%mm3, %%mm2 \n\t" \
628 "movq %1, %%mm3 \n\t" \
629 "movq %2, %%mm4 \n\t" \
630 "pxor %%mm6, %%mm3 \n\t" \
631 "pxor %%mm6, %%mm4 \n\t" \
632 "paddusb %%mm2, %%mm3 \n\t" \
633 "psubusb %%mm2, %%mm4 \n\t" \
634 "pxor %%mm6, %%mm3 \n\t" \
635 "pxor %%mm6, %%mm4 \n\t" \
636 "paddusb %%mm2, %%mm2 \n\t" \
637 "packsswb %%mm1, %%mm0 \n\t" \
638 "pcmpgtb %%mm0, %%mm7 \n\t" \
639 "pxor %%mm7, %%mm0 \n\t" \
640 "psubb %%mm7, %%mm0 \n\t" \
641 "movq %%mm0, %%mm1 \n\t" \
642 "psubusb %%mm2, %%mm0 \n\t" \
643 "psubb %%mm0, %%mm1 \n\t" \
644 "pand %5, %%mm1 \n\t" \
645 "psrlw $2, %%mm1 \n\t" \
646 "pxor %%mm7, %%mm1 \n\t" \
647 "psubb %%mm7, %%mm1 \n\t" \
648 "movq %0, %%mm5 \n\t" \
649 "movq %3, %%mm6 \n\t" \
650 "psubb %%mm1, %%mm5 \n\t" \
651 "paddb %%mm1, %%mm6 \n\t"
653 static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale)
655 if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
656 const int strength = ff_h263_loop_filter_strength[qscale];
661 "movq %%mm3, %1 \n\t"
662 "movq %%mm4, %2 \n\t"
663 "movq %%mm5, %0 \n\t"
664 "movq %%mm6, %3 \n\t"
665 : "+m"(*(uint64_t*)(src - 2 * stride)),
666 "+m"(*(uint64_t*)(src - 1 * stride)),
667 "+m"(*(uint64_t*)(src + 0 * stride)),
668 "+m"(*(uint64_t*)(src + 1 * stride))
669 : "g"(2 * strength), "m"(ff_pb_FC)
674 static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale)
676 if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
677 const int strength = ff_h263_loop_filter_strength[qscale];
678 DECLARE_ALIGNED(8, uint64_t, temp)[4];
679 uint8_t *btemp = (uint8_t*)temp;
683 transpose4x4(btemp, src, 8, stride);
684 transpose4x4(btemp + 4, src + 4 * stride, 8, stride);
686 H263_LOOP_FILTER // 5 3 4 6
692 : "g"(2 * strength), "m"(ff_pb_FC)
696 "movq %%mm5, %%mm1 \n\t"
697 "movq %%mm4, %%mm0 \n\t"
698 "punpcklbw %%mm3, %%mm5 \n\t"
699 "punpcklbw %%mm6, %%mm4 \n\t"
700 "punpckhbw %%mm3, %%mm1 \n\t"
701 "punpckhbw %%mm6, %%mm0 \n\t"
702 "movq %%mm5, %%mm3 \n\t"
703 "movq %%mm1, %%mm6 \n\t"
704 "punpcklwd %%mm4, %%mm5 \n\t"
705 "punpcklwd %%mm0, %%mm1 \n\t"
706 "punpckhwd %%mm4, %%mm3 \n\t"
707 "punpckhwd %%mm0, %%mm6 \n\t"
708 "movd %%mm5, (%0) \n\t"
709 "punpckhdq %%mm5, %%mm5 \n\t"
710 "movd %%mm5, (%0, %2) \n\t"
711 "movd %%mm3, (%0, %2, 2) \n\t"
712 "punpckhdq %%mm3, %%mm3 \n\t"
713 "movd %%mm3, (%0, %3) \n\t"
714 "movd %%mm1, (%1) \n\t"
715 "punpckhdq %%mm1, %%mm1 \n\t"
716 "movd %%mm1, (%1, %2) \n\t"
717 "movd %%mm6, (%1, %2, 2) \n\t"
718 "punpckhdq %%mm6, %%mm6 \n\t"
719 "movd %%mm6, (%1, %3) \n\t"
721 "r"(src + 4 * stride),
722 "r"((x86_reg)stride),
723 "r"((x86_reg)(3 * stride))
728 /* Draw the edges of width 'w' of an image of size width, height
729 * this MMX version can only handle w == 8 || w == 16. */
730 static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height,
731 int w, int h, int sides)
733 uint8_t *ptr, *last_line;
736 last_line = buf + (height - 1) * wrap;
742 "movd (%0), %%mm0 \n\t"
743 "punpcklbw %%mm0, %%mm0 \n\t"
744 "punpcklwd %%mm0, %%mm0 \n\t"
745 "punpckldq %%mm0, %%mm0 \n\t"
746 "movq %%mm0, -8(%0) \n\t"
747 "movq -8(%0, %2), %%mm1 \n\t"
748 "punpckhbw %%mm1, %%mm1 \n\t"
749 "punpckhwd %%mm1, %%mm1 \n\t"
750 "punpckhdq %%mm1, %%mm1 \n\t"
751 "movq %%mm1, (%0, %2) \n\t"
756 : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
761 "movd (%0), %%mm0 \n\t"
762 "punpcklbw %%mm0, %%mm0 \n\t"
763 "punpcklwd %%mm0, %%mm0 \n\t"
764 "punpckldq %%mm0, %%mm0 \n\t"
765 "movq %%mm0, -8(%0) \n\t"
766 "movq %%mm0, -16(%0) \n\t"
767 "movq -8(%0, %2), %%mm1 \n\t"
768 "punpckhbw %%mm1, %%mm1 \n\t"
769 "punpckhwd %%mm1, %%mm1 \n\t"
770 "punpckhdq %%mm1, %%mm1 \n\t"
771 "movq %%mm1, (%0, %2) \n\t"
772 "movq %%mm1, 8(%0, %2) \n\t"
777 : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
781 /* top and bottom (and hopefully also the corners) */
782 if (sides & EDGE_TOP) {
783 for (i = 0; i < h; i += 4) {
784 ptr = buf - (i + 1) * wrap - w;
787 "movq (%1, %0), %%mm0 \n\t"
788 "movq %%mm0, (%0) \n\t"
789 "movq %%mm0, (%0, %2) \n\t"
790 "movq %%mm0, (%0, %2, 2) \n\t"
791 "movq %%mm0, (%0, %3) \n\t"
796 : "r"((x86_reg)buf - (x86_reg)ptr - w), "r"((x86_reg) -wrap),
797 "r"((x86_reg) -wrap * 3), "r"(ptr + width + 2 * w)
802 if (sides & EDGE_BOTTOM) {
803 for (i = 0; i < h; i += 4) {
804 ptr = last_line + (i + 1) * wrap - w;
807 "movq (%1, %0), %%mm0 \n\t"
808 "movq %%mm0, (%0) \n\t"
809 "movq %%mm0, (%0, %2) \n\t"
810 "movq %%mm0, (%0, %2, 2) \n\t"
811 "movq %%mm0, (%0, %3) \n\t"
816 : "r"((x86_reg)last_line - (x86_reg)ptr - w),
817 "r"((x86_reg)wrap), "r"((x86_reg)wrap * 3),
818 "r"(ptr + width + 2 * w)
824 #define QPEL_V_LOW(m3, m4, m5, m6, pw_20, pw_3, rnd, \
825 in0, in1, in2, in7, out, OP) \
826 "paddw "#m4", "#m3" \n\t" /* x1 */ \
827 "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */ \
828 "pmullw "#m3", %%mm4 \n\t" /* 20x1 */ \
829 "movq "#in7", "#m3" \n\t" /* d */ \
830 "movq "#in0", %%mm5 \n\t" /* D */ \
831 "paddw "#m3", %%mm5 \n\t" /* x4 */ \
832 "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */ \
833 "movq "#in1", %%mm5 \n\t" /* C */ \
834 "movq "#in2", %%mm6 \n\t" /* B */ \
835 "paddw "#m6", %%mm5 \n\t" /* x3 */ \
836 "paddw "#m5", %%mm6 \n\t" /* x2 */ \
837 "paddw %%mm6, %%mm6 \n\t" /* 2x2 */ \
838 "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */ \
839 "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */ \
840 "paddw "#rnd", %%mm4 \n\t" /* x2 */ \
841 "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */ \
842 "psraw $5, %%mm5 \n\t" \
843 "packuswb %%mm5, %%mm5 \n\t" \
844 OP(%%mm5, out, %%mm7, d)
846 #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMXEXT) \
847 static void OPNAME ## mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, \
856 "pxor %%mm7, %%mm7 \n\t" \
858 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */ \
859 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */ \
860 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */ \
861 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */ \
862 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */ \
863 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */ \
864 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */ \
865 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */ \
866 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */ \
867 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */ \
868 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */ \
869 "psllq $24, %%mm4 \n\t" /* 000ABCDE */ \
870 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */ \
871 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */ \
872 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */ \
873 "paddw %%mm3, %%mm5 \n\t" /* b */ \
874 "paddw %%mm2, %%mm6 \n\t" /* c */ \
875 "paddw %%mm5, %%mm5 \n\t" /* 2b */ \
876 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */ \
877 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */ \
878 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */ \
879 "paddw %%mm4, %%mm0 \n\t" /* a */ \
880 "paddw %%mm1, %%mm5 \n\t" /* d */ \
881 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */ \
882 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */ \
883 "paddw %6, %%mm6 \n\t" \
884 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */ \
885 "psraw $5, %%mm0 \n\t" \
886 "movq %%mm0, %5 \n\t" \
887 /* mm1 = EFGH, mm2 = DEFG, mm3 = CDEF, mm4 = BCDE, mm7 = 0 */ \
889 "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */ \
890 "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */ \
891 "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */ \
892 "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */ \
893 "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */ \
894 "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */ \
895 "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */ \
896 "paddw %%mm0, %%mm2 \n\t" /* b */ \
897 "paddw %%mm5, %%mm3 \n\t" /* c */ \
898 "paddw %%mm2, %%mm2 \n\t" /* 2b */ \
899 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */ \
900 "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */ \
901 "psrlq $24, %%mm6 \n\t" /* IJKLM000 */ \
902 "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */ \
903 "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */ \
904 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */ \
905 "paddw %%mm2, %%mm1 \n\t" /* a */ \
906 "paddw %%mm6, %%mm4 \n\t" /* d */ \
907 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */ \
908 "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */ \
909 "paddw %6, %%mm1 \n\t" \
910 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */ \
911 "psraw $5, %%mm3 \n\t" \
912 "movq %5, %%mm1 \n\t" \
913 "packuswb %%mm3, %%mm1 \n\t" \
914 OP_MMXEXT(%%mm1, (%1), %%mm4, q) \
915 /* mm0 = GHIJ, mm2 = FGHI, mm5 = HIJK, mm6 = IJKL, mm7 = 0 */ \
917 "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */ \
918 "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */ \
919 "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */ \
920 "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */ \
921 "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */ \
922 "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */ \
923 "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */ \
924 "paddw %%mm1, %%mm5 \n\t" /* b */ \
925 "paddw %%mm4, %%mm0 \n\t" /* c */ \
926 "paddw %%mm5, %%mm5 \n\t" /* 2b */ \
927 "psubw %%mm5, %%mm0 \n\t" /* c - 2b */ \
928 "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */ \
929 "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */ \
930 "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */ \
931 "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */ \
932 "paddw %%mm3, %%mm2 \n\t" /* d */ \
933 "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */ \
934 "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */ \
935 "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */ \
936 "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */ \
937 "paddw %%mm2, %%mm6 \n\t" /* a */ \
938 "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */ \
939 "paddw %6, %%mm0 \n\t" \
940 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */ \
941 "psraw $5, %%mm0 \n\t" \
942 /* mm1 = KLMN, mm2 = JKLM, mm3 = MNOP, */ \
943 /* mm4 = LMNO, mm5 = NOPQ mm7 = 0 */ \
945 "paddw %%mm5, %%mm3 \n\t" /* a */ \
946 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */ \
947 "paddw %%mm4, %%mm6 \n\t" /* b */ \
948 "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */ \
949 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */ \
950 "paddw %%mm1, %%mm4 \n\t" /* c */ \
951 "paddw %%mm2, %%mm5 \n\t" /* d */ \
952 "paddw %%mm6, %%mm6 \n\t" /* 2b */ \
953 "psubw %%mm6, %%mm4 \n\t" /* c - 2b */ \
954 "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */ \
955 "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */ \
956 "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */ \
957 "paddw %6, %%mm4 \n\t" \
958 "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */ \
959 "psraw $5, %%mm4 \n\t" \
960 "packuswb %%mm4, %%mm0 \n\t" \
961 OP_MMXEXT(%%mm0, 8(%1), %%mm4, q) \
967 : "+a"(src), "+c"(dst), "+D"(h) \
968 : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), \
969 /* "m"(ff_pw_20), "m"(ff_pw_3), */ "m"(temp), "m"(ROUNDER) \
974 static void OPNAME ## mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, \
981 "pxor %%mm7, %%mm7 \n\t" \
983 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */ \
984 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */ \
985 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */ \
986 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */ \
987 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */ \
988 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */ \
989 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */ \
990 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */ \
991 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */ \
992 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */ \
993 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */ \
994 "psllq $24, %%mm4 \n\t" /* 000ABCDE */ \
995 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */ \
996 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */ \
997 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */ \
998 "paddw %%mm3, %%mm5 \n\t" /* b */ \
999 "paddw %%mm2, %%mm6 \n\t" /* c */ \
1000 "paddw %%mm5, %%mm5 \n\t" /* 2b */ \
1001 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */ \
1002 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */ \
1003 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */ \
1004 "paddw %%mm4, %%mm0 \n\t" /* a */ \
1005 "paddw %%mm1, %%mm5 \n\t" /* d */ \
1006 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */ \
1007 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */ \
1008 "paddw %5, %%mm6 \n\t" \
1009 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */ \
1010 "psraw $5, %%mm0 \n\t" \
1011 /* mm1 = EFGH, mm2 = DEFG, mm3 = CDEF, mm4 = BCDE, mm7 = 0 */ \
1013 "movd 5(%0), %%mm5 \n\t" /* FGHI */ \
1014 "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */ \
1015 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */ \
1016 "paddw %%mm5, %%mm1 \n\t" /* a */ \
1017 "paddw %%mm6, %%mm2 \n\t" /* b */ \
1018 "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */ \
1019 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */ \
1020 "paddw %%mm6, %%mm3 \n\t" /* c */ \
1021 "paddw %%mm5, %%mm4 \n\t" /* d */ \
1022 "paddw %%mm2, %%mm2 \n\t" /* 2b */ \
1023 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */ \
1024 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */ \
1025 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */ \
1026 "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */ \
1027 "paddw %5, %%mm1 \n\t" \
1028 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */ \
1029 "psraw $5, %%mm3 \n\t" \
1030 "packuswb %%mm3, %%mm0 \n\t" \
1031 OP_MMXEXT(%%mm0, (%1), %%mm4, q) \
1037 : "+a"(src), "+c"(dst), "+d"(h) \
1038 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), \
1039 /* "m"(ff_pw_20), "m"(ff_pw_3), */ "m"(ROUNDER) \
1044 #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX) \
1045 static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, \
1050 uint64_t temp[17 * 4]; \
1051 uint64_t *temp_ptr = temp; \
1054 /* FIXME unroll */ \
1055 __asm__ volatile ( \
1056 "pxor %%mm7, %%mm7 \n\t" \
1058 "movq (%0), %%mm0 \n\t" \
1059 "movq (%0), %%mm1 \n\t" \
1060 "movq 8(%0), %%mm2 \n\t" \
1061 "movq 8(%0), %%mm3 \n\t" \
1062 "punpcklbw %%mm7, %%mm0 \n\t" \
1063 "punpckhbw %%mm7, %%mm1 \n\t" \
1064 "punpcklbw %%mm7, %%mm2 \n\t" \
1065 "punpckhbw %%mm7, %%mm3 \n\t" \
1066 "movq %%mm0, (%1) \n\t" \
1067 "movq %%mm1, 17 * 8(%1) \n\t" \
1068 "movq %%mm2, 2 * 17 * 8(%1) \n\t" \
1069 "movq %%mm3, 3 * 17 * 8(%1) \n\t" \
1074 : "+r"(src), "+r"(temp_ptr), "+r"(count) \
1075 : "r"((x86_reg)srcStride) \
1082 /* FIXME reorder for speed */ \
1083 __asm__ volatile ( \
1084 /* "pxor %%mm7, %%mm7 \n\t" */ \
1086 "movq (%0), %%mm0 \n\t" \
1087 "movq 8(%0), %%mm1 \n\t" \
1088 "movq 16(%0), %%mm2 \n\t" \
1089 "movq 24(%0), %%mm3 \n\t" \
1090 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP) \
1091 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP) \
1093 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP) \
1095 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP) \
1097 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP) \
1098 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP) \
1100 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP) \
1101 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP) \
1103 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP) \
1104 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0), 104(%0), (%1, %3), OP) \
1106 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0), 112(%0), (%1), OP) \
1107 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0), 120(%0), (%1, %3), OP) \
1109 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0), 128(%0), (%1), OP) \
1111 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0), 128(%0), (%1, %3), OP) \
1113 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0), 104(%0), 120(%0), (%1), OP) \
1114 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0), 104(%0), 112(%0), 112(%0), (%1, %3), OP) \
1116 "add $136, %0 \n\t" \
1121 : "+r"(temp_ptr), "+r"(dst), "+g"(count) \
1122 : "r"((x86_reg)dstStride), "r"(2 * (x86_reg)dstStride), \
1123 /* "m"(ff_pw_20), "m"(ff_pw_3), */ "m"(ROUNDER), \
1124 "g"(4 - 14 * (x86_reg)dstStride) \
1129 static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, \
1134 uint64_t temp[9 * 2]; \
1135 uint64_t *temp_ptr = temp; \
1138 /* FIXME unroll */ \
1139 __asm__ volatile ( \
1140 "pxor %%mm7, %%mm7 \n\t" \
1142 "movq (%0), %%mm0 \n\t" \
1143 "movq (%0), %%mm1 \n\t" \
1144 "punpcklbw %%mm7, %%mm0 \n\t" \
1145 "punpckhbw %%mm7, %%mm1 \n\t" \
1146 "movq %%mm0, (%1) \n\t" \
1147 "movq %%mm1, 9*8(%1) \n\t" \
1152 : "+r"(src), "+r"(temp_ptr), "+r"(count) \
1153 : "r"((x86_reg)srcStride) \
1160 /* FIXME reorder for speed */ \
1161 __asm__ volatile ( \
1162 /* "pxor %%mm7, %%mm7 \n\t" */ \
1164 "movq (%0), %%mm0 \n\t" \
1165 "movq 8(%0), %%mm1 \n\t" \
1166 "movq 16(%0), %%mm2 \n\t" \
1167 "movq 24(%0), %%mm3 \n\t" \
1168 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP) \
1169 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP) \
1171 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP) \
1173 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP) \
1175 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP) \
1177 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP) \
1179 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP) \
1180 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP) \
1182 "add $72, %0 \n\t" \
1187 : "+r"(temp_ptr), "+r"(dst), "+g"(count) \
1188 : "r"((x86_reg)dstStride), "r"(2 * (x86_reg)dstStride), \
1189 /* "m"(ff_pw_20), "m"(ff_pw_3), */ "m"(ROUNDER), \
1190 "g"(4 - 6 * (x86_reg)dstStride) \
1195 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, \
1198 OPNAME ## pixels8_ ## MMX(dst, src, stride, 8); \
1201 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, \
1205 uint8_t * const half = (uint8_t*)temp; \
1206 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, \
1208 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8); \
1211 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, \
1214 OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, \
1218 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, \
1222 uint8_t * const half = (uint8_t*)temp; \
1223 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, \
1225 OPNAME ## pixels8_l2_ ## MMX(dst, src + 1, half, stride, \
1229 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, \
1233 uint8_t * const half = (uint8_t*)temp; \
1234 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride); \
1235 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8); \
1238 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, \
1241 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride); \
1244 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, \
1248 uint8_t * const half = (uint8_t*)temp; \
1249 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride); \
1250 OPNAME ## pixels8_l2_ ## MMX(dst, src + stride, half, stride, \
1254 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, \
1257 uint64_t half[8 + 9]; \
1258 uint8_t * const halfH = ((uint8_t*)half) + 64; \
1259 uint8_t * const halfHV = ((uint8_t*)half); \
1260 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
1262 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9); \
1263 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8); \
1264 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8); \
1267 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, \
1270 uint64_t half[8 + 9]; \
1271 uint8_t * const halfH = ((uint8_t*)half) + 64; \
1272 uint8_t * const halfHV = ((uint8_t*)half); \
1273 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
1275 put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \
1277 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8); \
1278 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8); \
1281 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, \
1284 uint64_t half[8 + 9]; \
1285 uint8_t * const halfH = ((uint8_t*)half) + 64; \
1286 uint8_t * const halfHV = ((uint8_t*)half); \
1287 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
1289 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9); \
1290 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8); \
1291 OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, stride, 8, 8); \
1294 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, \
1297 uint64_t half[8 + 9]; \
1298 uint8_t * const halfH = ((uint8_t*)half) + 64; \
1299 uint8_t * const halfHV = ((uint8_t*)half); \
1300 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
1302 put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \
1304 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8); \
1305 OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, stride, 8, 8); \
1308 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, \
1311 uint64_t half[8 + 9]; \
1312 uint8_t * const halfH = ((uint8_t*)half) + 64; \
1313 uint8_t * const halfHV = ((uint8_t*)half); \
1314 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
1316 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8); \
1317 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8); \
1320 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, \
1323 uint64_t half[8 + 9]; \
1324 uint8_t * const halfH = ((uint8_t*)half) + 64; \
1325 uint8_t * const halfHV = ((uint8_t*)half); \
1326 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
1328 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8); \
1329 OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, stride, 8, 8); \
1332 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, \
1335 uint64_t half[8 + 9]; \
1336 uint8_t * const halfH = ((uint8_t*)half); \
1337 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
1339 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9); \
1340 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8); \
1343 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, \
1346 uint64_t half[8 + 9]; \
1347 uint8_t * const halfH = ((uint8_t*)half); \
1348 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
1350 put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \
1352 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8); \
1355 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, \
1359 uint8_t * const halfH = ((uint8_t*)half); \
1360 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
1362 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8); \
1365 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, \
1368 OPNAME ## pixels16_ ## MMX(dst, src, stride, 16); \
1371 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, \
1374 uint64_t temp[32]; \
1375 uint8_t * const half = (uint8_t*)temp; \
1376 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, \
1378 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16); \
1381 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, \
1384 OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, \
1385 stride, stride, 16); \
1388 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, \
1391 uint64_t temp[32]; \
1392 uint8_t * const half = (uint8_t*)temp; \
1393 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, \
1395 OPNAME ## pixels16_l2_ ## MMX(dst, src + 1, half, \
1396 stride, stride, 16); \
1399 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, \
1402 uint64_t temp[32]; \
1403 uint8_t * const half = (uint8_t*)temp; \
1404 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, \
1406 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16); \
1409 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, \
1412 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride); \
1415 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, \
1418 uint64_t temp[32]; \
1419 uint8_t * const half = (uint8_t*)temp; \
1420 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, \
1422 OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, \
1423 stride, stride, 16); \
1426 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, \
1429 uint64_t half[16 * 2 + 17 * 2]; \
1430 uint8_t * const halfH = ((uint8_t*)half) + 256; \
1431 uint8_t * const halfHV = ((uint8_t*)half); \
1432 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
1434 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \
1436 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
1438 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16); \
1441 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, \
1444 uint64_t half[16 * 2 + 17 * 2]; \
1445 uint8_t * const halfH = ((uint8_t*)half) + 256; \
1446 uint8_t * const halfHV = ((uint8_t*)half); \
1447 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
1449 put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \
1451 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
1453 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16); \
1456 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, \
1459 uint64_t half[16 * 2 + 17 * 2]; \
1460 uint8_t * const halfH = ((uint8_t*)half) + 256; \
1461 uint8_t * const halfHV = ((uint8_t*)half); \
1462 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
1464 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \
1466 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
1468 OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, stride, \
1472 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, \
1475 uint64_t half[16 * 2 + 17 * 2]; \
1476 uint8_t * const halfH = ((uint8_t*)half) + 256; \
1477 uint8_t * const halfHV = ((uint8_t*)half); \
1478 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
1480 put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \
1482 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
1484 OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, stride, \
1488 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, \
1491 uint64_t half[16 * 2 + 17 * 2]; \
1492 uint8_t * const halfH = ((uint8_t*)half) + 256; \
1493 uint8_t * const halfHV = ((uint8_t*)half); \
1494 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
1496 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
1498 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16); \
1501 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, \
1504 uint64_t half[16 * 2 + 17 * 2]; \
1505 uint8_t * const halfH = ((uint8_t*)half) + 256; \
1506 uint8_t * const halfHV = ((uint8_t*)half); \
1507 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
1509 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
1511 OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, stride, \
1515 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, \
1518 uint64_t half[17 * 2]; \
1519 uint8_t * const halfH = ((uint8_t*)half); \
1520 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
1522 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \
1524 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16); \
1527 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, \
1530 uint64_t half[17 * 2]; \
1531 uint8_t * const halfH = ((uint8_t*)half); \
1532 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
1534 put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \
1536 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16); \
1539 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, \
1542 uint64_t half[17 * 2]; \
1543 uint8_t * const halfH = ((uint8_t*)half); \
1544 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
1546 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16); \
1549 #define PUT_OP(a, b, temp, size) \
1550 "mov"#size" "#a", "#b" \n\t"
1552 #define AVG_MMXEXT_OP(a, b, temp, size) \
1553 "mov"#size" "#b", "#temp" \n\t" \
1554 "pavgb "#temp", "#a" \n\t" \
1555 "mov"#size" "#a", "#b" \n\t"
1557 QPEL_BASE(put_, ff_pw_16, _, PUT_OP)
1558 QPEL_BASE(avg_, ff_pw_16, _, AVG_MMXEXT_OP)
1559 QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP)
1560 QPEL_OP(put_, ff_pw_16, _, PUT_OP, mmxext)
1561 QPEL_OP(avg_, ff_pw_16, _, AVG_MMXEXT_OP, mmxext)
1562 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmxext)
1564 /***********************************/
1565 /* bilinear qpel: not compliant to any spec, only for -lavdopts fast */
1567 #define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL) \
1568 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, \
1572 OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE); \
1575 #define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2) \
1576 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, \
1580 OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src + S0, stride, SIZE, \
1584 #define QPEL_2TAP(OPNAME, SIZE, MMX) \
1585 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX) \
1586 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX) \
1587 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx) \
1588 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX = \
1589 OPNAME ## qpel ## SIZE ## _mc00_ ## MMX; \
1590 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX = \
1591 OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX; \
1592 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX = \
1593 OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX; \
1594 static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, \
1598 OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src + 1, stride, SIZE); \
1600 static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, \
1604 OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src + stride, \
1607 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0, 1, 0) \
1608 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1, -1, 0) \
1609 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0, stride, 0) \
1610 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride, -stride, 0) \
1611 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0, stride, 1) \
1612 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1, stride, -1) \
1613 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride, -stride, 1) \
1614 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride + 1, -stride, -1) \
1616 QPEL_2TAP(put_, 16, mmxext)
1617 QPEL_2TAP(avg_, 16, mmxext)
1618 QPEL_2TAP(put_, 8, mmxext)
1619 QPEL_2TAP(avg_, 8, mmxext)
1621 void ff_put_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, int stride)
1623 put_pixels8_xy2_mmx(dst, src, stride, 8);
1625 void ff_put_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, int stride)
1627 put_pixels16_xy2_mmx(dst, src, stride, 16);
1629 void ff_avg_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, int stride)
1631 avg_pixels8_xy2_mmx(dst, src, stride, 8);
1633 void ff_avg_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, int stride)
1635 avg_pixels16_xy2_mmx(dst, src, stride, 16);
1638 #endif /* HAVE_INLINE_ASM */
1641 typedef void emu_edge_core_func(uint8_t *buf, const uint8_t *src,
1642 x86_reg linesize, x86_reg start_y,
1643 x86_reg end_y, x86_reg block_h,
1644 x86_reg start_x, x86_reg end_x,
1646 extern emu_edge_core_func ff_emu_edge_core_mmx;
1647 extern emu_edge_core_func ff_emu_edge_core_sse;
1649 static av_always_inline void emulated_edge_mc(uint8_t *buf, const uint8_t *src,
1651 int block_w, int block_h,
1652 int src_x, int src_y,
1654 emu_edge_core_func *core_fn)
1656 int start_y, start_x, end_y, end_x, src_y_add = 0;
1659 src_y_add = h - 1 - src_y;
1661 } else if (src_y <= -block_h) {
1662 src_y_add = 1 - block_h - src_y;
1663 src_y = 1 - block_h;
1666 src += w - 1 - src_x;
1668 } else if (src_x <= -block_w) {
1669 src += 1 - block_w - src_x;
1670 src_x = 1 - block_w;
1673 start_y = FFMAX(0, -src_y);
1674 start_x = FFMAX(0, -src_x);
1675 end_y = FFMIN(block_h, h-src_y);
1676 end_x = FFMIN(block_w, w-src_x);
1677 assert(start_x < end_x && block_w > 0);
1678 assert(start_y < end_y && block_h > 0);
1680 // fill in the to-be-copied part plus all above/below
1681 src += (src_y_add + start_y) * linesize + start_x;
1683 core_fn(buf, src, linesize, start_y, end_y,
1684 block_h, start_x, end_x, block_w);
1688 static av_noinline void emulated_edge_mc_mmx(uint8_t *buf, const uint8_t *src,
1690 int block_w, int block_h,
1691 int src_x, int src_y, int w, int h)
1693 emulated_edge_mc(buf, src, linesize, block_w, block_h, src_x, src_y,
1694 w, h, &ff_emu_edge_core_mmx);
1698 static av_noinline void emulated_edge_mc_sse(uint8_t *buf, const uint8_t *src,
1700 int block_w, int block_h,
1701 int src_x, int src_y, int w, int h)
1703 emulated_edge_mc(buf, src, linesize, block_w, block_h, src_x, src_y,
1704 w, h, &ff_emu_edge_core_sse);
1706 #endif /* HAVE_YASM */
1710 static void gmc_mmx(uint8_t *dst, uint8_t *src,
1711 int stride, int h, int ox, int oy,
1712 int dxx, int dxy, int dyx, int dyy,
1713 int shift, int r, int width, int height)
1716 const int ix = ox >> (16 + shift);
1717 const int iy = oy >> (16 + shift);
1718 const int oxs = ox >> 4;
1719 const int oys = oy >> 4;
1720 const int dxxs = dxx >> 4;
1721 const int dxys = dxy >> 4;
1722 const int dyxs = dyx >> 4;
1723 const int dyys = dyy >> 4;
1724 const uint16_t r4[4] = { r, r, r, r };
1725 const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys };
1726 const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys };
1727 const uint64_t shift2 = 2 * shift;
1730 const int dxw = (dxx - (1 << (16 + shift))) * (w - 1);
1731 const int dyh = (dyy - (1 << (16 + shift))) * (h - 1);
1732 const int dxh = dxy * (h - 1);
1733 const int dyw = dyx * (w - 1);
1734 if ( // non-constant fullpel offset (3% of blocks)
1735 ((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) |
1736 (oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift)
1737 // uses more than 16 bits of subpel mv (only at huge resolution)
1738 || (dxx | dxy | dyx | dyy) & 15 ||
1739 (unsigned)ix >= width - w ||
1740 (unsigned)iy >= height - h) {
1741 // FIXME could still use mmx for some of the rows
1742 ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy,
1743 shift, r, width, height);
1747 src += ix + iy * stride;
1750 "movd %0, %%mm6 \n\t"
1751 "pxor %%mm7, %%mm7 \n\t"
1752 "punpcklwd %%mm6, %%mm6 \n\t"
1753 "punpcklwd %%mm6, %%mm6 \n\t"
1757 for (x = 0; x < w; x += 4) {
1758 uint16_t dx4[4] = { oxs - dxys + dxxs * (x + 0),
1759 oxs - dxys + dxxs * (x + 1),
1760 oxs - dxys + dxxs * (x + 2),
1761 oxs - dxys + dxxs * (x + 3) };
1762 uint16_t dy4[4] = { oys - dyys + dyxs * (x + 0),
1763 oys - dyys + dyxs * (x + 1),
1764 oys - dyys + dyxs * (x + 2),
1765 oys - dyys + dyxs * (x + 3) };
1767 for (y = 0; y < h; y++) {
1769 "movq %0, %%mm4 \n\t"
1770 "movq %1, %%mm5 \n\t"
1771 "paddw %2, %%mm4 \n\t"
1772 "paddw %3, %%mm5 \n\t"
1773 "movq %%mm4, %0 \n\t"
1774 "movq %%mm5, %1 \n\t"
1775 "psrlw $12, %%mm4 \n\t"
1776 "psrlw $12, %%mm5 \n\t"
1777 : "+m"(*dx4), "+m"(*dy4)
1778 : "m"(*dxy4), "m"(*dyy4)
1782 "movq %%mm6, %%mm2 \n\t"
1783 "movq %%mm6, %%mm1 \n\t"
1784 "psubw %%mm4, %%mm2 \n\t"
1785 "psubw %%mm5, %%mm1 \n\t"
1786 "movq %%mm2, %%mm0 \n\t"
1787 "movq %%mm4, %%mm3 \n\t"
1788 "pmullw %%mm1, %%mm0 \n\t" // (s - dx) * (s - dy)
1789 "pmullw %%mm5, %%mm3 \n\t" // dx * dy
1790 "pmullw %%mm5, %%mm2 \n\t" // (s - dx) * dy
1791 "pmullw %%mm4, %%mm1 \n\t" // dx * (s - dy)
1793 "movd %4, %%mm5 \n\t"
1794 "movd %3, %%mm4 \n\t"
1795 "punpcklbw %%mm7, %%mm5 \n\t"
1796 "punpcklbw %%mm7, %%mm4 \n\t"
1797 "pmullw %%mm5, %%mm3 \n\t" // src[1, 1] * dx * dy
1798 "pmullw %%mm4, %%mm2 \n\t" // src[0, 1] * (s - dx) * dy
1800 "movd %2, %%mm5 \n\t"
1801 "movd %1, %%mm4 \n\t"
1802 "punpcklbw %%mm7, %%mm5 \n\t"
1803 "punpcklbw %%mm7, %%mm4 \n\t"
1804 "pmullw %%mm5, %%mm1 \n\t" // src[1, 0] * dx * (s - dy)
1805 "pmullw %%mm4, %%mm0 \n\t" // src[0, 0] * (s - dx) * (s - dy)
1806 "paddw %5, %%mm1 \n\t"
1807 "paddw %%mm3, %%mm2 \n\t"
1808 "paddw %%mm1, %%mm0 \n\t"
1809 "paddw %%mm2, %%mm0 \n\t"
1811 "psrlw %6, %%mm0 \n\t"
1812 "packuswb %%mm0, %%mm0 \n\t"
1813 "movd %%mm0, %0 \n\t"
1815 : "=m"(dst[x + y * stride])
1816 : "m"(src[0]), "m"(src[1]),
1817 "m"(src[stride]), "m"(src[stride + 1]),
1818 "m"(*r4), "m"(shift2)
1822 src += 4 - h * stride;
1826 #define PREFETCH(name, op) \
1827 static void name(void *mem, int stride, int h) \
1829 const uint8_t *p = mem; \
1831 __asm__ volatile (#op" %0" :: "m"(*p)); \
1836 PREFETCH(prefetch_mmxext, prefetcht0)
1837 PREFETCH(prefetch_3dnow, prefetch)
1840 #endif /* HAVE_INLINE_ASM */
1842 #include "h264_qpel.c"
1844 void ff_put_h264_chroma_mc8_rnd_mmx (uint8_t *dst, uint8_t *src,
1845 int stride, int h, int x, int y);
1846 void ff_avg_h264_chroma_mc8_rnd_mmxext(uint8_t *dst, uint8_t *src,
1847 int stride, int h, int x, int y);
1848 void ff_avg_h264_chroma_mc8_rnd_3dnow(uint8_t *dst, uint8_t *src,
1849 int stride, int h, int x, int y);
1851 void ff_put_h264_chroma_mc4_mmx (uint8_t *dst, uint8_t *src,
1852 int stride, int h, int x, int y);
1853 void ff_avg_h264_chroma_mc4_mmxext (uint8_t *dst, uint8_t *src,
1854 int stride, int h, int x, int y);
1855 void ff_avg_h264_chroma_mc4_3dnow (uint8_t *dst, uint8_t *src,
1856 int stride, int h, int x, int y);
1858 void ff_put_h264_chroma_mc2_mmxext (uint8_t *dst, uint8_t *src,
1859 int stride, int h, int x, int y);
1860 void ff_avg_h264_chroma_mc2_mmxext (uint8_t *dst, uint8_t *src,
1861 int stride, int h, int x, int y);
1863 void ff_put_h264_chroma_mc8_rnd_ssse3(uint8_t *dst, uint8_t *src,
1864 int stride, int h, int x, int y);
1865 void ff_put_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src,
1866 int stride, int h, int x, int y);
1868 void ff_avg_h264_chroma_mc8_rnd_ssse3(uint8_t *dst, uint8_t *src,
1869 int stride, int h, int x, int y);
1870 void ff_avg_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src,
1871 int stride, int h, int x, int y);
1873 #define CHROMA_MC(OP, NUM, DEPTH, OPT) \
1874 void ff_ ## OP ## _h264_chroma_mc ## NUM ## _ ## DEPTH ## _ ## OPT \
1875 (uint8_t *dst, uint8_t *src, \
1876 int stride, int h, int x, int y);
1878 CHROMA_MC(put, 2, 10, mmxext)
1879 CHROMA_MC(avg, 2, 10, mmxext)
1880 CHROMA_MC(put, 4, 10, mmxext)
1881 CHROMA_MC(avg, 4, 10, mmxext)
1882 CHROMA_MC(put, 8, 10, sse2)
1883 CHROMA_MC(avg, 8, 10, sse2)
1884 CHROMA_MC(put, 8, 10, avx)
1885 CHROMA_MC(avg, 8, 10, avx)
1890 void ff_put_cavs_qpel8_mc00_mmxext(uint8_t *dst, uint8_t *src, int stride)
1892 put_pixels8_mmx(dst, src, stride, 8);
1895 void ff_avg_cavs_qpel8_mc00_mmxext(uint8_t *dst, uint8_t *src, int stride)
1897 avg_pixels8_mmx(dst, src, stride, 8);
1900 void ff_put_cavs_qpel16_mc00_mmxext(uint8_t *dst, uint8_t *src, int stride)
1902 put_pixels16_mmx(dst, src, stride, 16);
1905 void ff_avg_cavs_qpel16_mc00_mmxext(uint8_t *dst, uint8_t *src, int stride)
1907 avg_pixels16_mmx(dst, src, stride, 16);
1911 void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src,
1912 int stride, int rnd)
1914 put_pixels8_mmx(dst, src, stride, 8);
1917 void ff_avg_vc1_mspel_mc00_mmxext(uint8_t *dst, const uint8_t *src,
1918 int stride, int rnd)
1920 avg_pixels8_mmxext(dst, src, stride, 8);
1923 static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize)
1926 __asm__ volatile ("pxor %%mm7, %%mm7":);
1927 for (i = 0; i < blocksize; i += 2) {
1929 "movq %0, %%mm0 \n\t"
1930 "movq %1, %%mm1 \n\t"
1931 "movq %%mm0, %%mm2 \n\t"
1932 "movq %%mm1, %%mm3 \n\t"
1933 "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0
1934 "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0
1935 "pslld $31, %%mm2 \n\t" // keep only the sign bit
1936 "pxor %%mm2, %%mm1 \n\t"
1937 "movq %%mm3, %%mm4 \n\t"
1938 "pand %%mm1, %%mm3 \n\t"
1939 "pandn %%mm1, %%mm4 \n\t"
1940 "pfadd %%mm0, %%mm3 \n\t" // a = m + ((a < 0) & (a ^ sign(m)))
1941 "pfsub %%mm4, %%mm0 \n\t" // m = m + ((a > 0) & (a ^ sign(m)))
1942 "movq %%mm3, %1 \n\t"
1943 "movq %%mm0, %0 \n\t"
1944 : "+m"(mag[i]), "+m"(ang[i])
1948 __asm__ volatile ("femms");
1951 static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize)
1956 "movaps %0, %%xmm5 \n\t"
1957 :: "m"(ff_pdw_80000000[0])
1959 for (i = 0; i < blocksize; i += 4) {
1961 "movaps %0, %%xmm0 \n\t"
1962 "movaps %1, %%xmm1 \n\t"
1963 "xorps %%xmm2, %%xmm2 \n\t"
1964 "xorps %%xmm3, %%xmm3 \n\t"
1965 "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0
1966 "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0
1967 "andps %%xmm5, %%xmm2 \n\t" // keep only the sign bit
1968 "xorps %%xmm2, %%xmm1 \n\t"
1969 "movaps %%xmm3, %%xmm4 \n\t"
1970 "andps %%xmm1, %%xmm3 \n\t"
1971 "andnps %%xmm1, %%xmm4 \n\t"
1972 "addps %%xmm0, %%xmm3 \n\t" // a = m + ((a < 0) & (a ^ sign(m)))
1973 "subps %%xmm4, %%xmm0 \n\t" // m = m + ((a > 0) & (a ^ sign(m)))
1974 "movaps %%xmm3, %1 \n\t"
1975 "movaps %%xmm0, %0 \n\t"
1976 : "+m"(mag[i]), "+m"(ang[i])
1983 static void vector_fmul_window_3dnowext(float *dst, const float *src0,
1984 const float *src1, const float *win,
1987 x86_reg i = -len * 4;
1988 x86_reg j = len * 4 - 8;
1991 "pswapd (%5, %1), %%mm1 \n"
1992 "movq (%5, %0), %%mm0 \n"
1993 "pswapd (%4, %1), %%mm5 \n"
1994 "movq (%3, %0), %%mm4 \n"
1995 "movq %%mm0, %%mm2 \n"
1996 "movq %%mm1, %%mm3 \n"
1997 "pfmul %%mm4, %%mm2 \n" // src0[len + i] * win[len + i]
1998 "pfmul %%mm5, %%mm3 \n" // src1[j] * win[len + j]
1999 "pfmul %%mm4, %%mm1 \n" // src0[len + i] * win[len + j]
2000 "pfmul %%mm5, %%mm0 \n" // src1[j] * win[len + i]
2001 "pfadd %%mm3, %%mm2 \n"
2002 "pfsub %%mm0, %%mm1 \n"
2003 "pswapd %%mm2, %%mm2 \n"
2004 "movq %%mm1, (%2, %0) \n"
2005 "movq %%mm2, (%2, %1) \n"
2011 : "r"(dst + len), "r"(src0 + len), "r"(src1), "r"(win + len)
2015 static void vector_fmul_window_sse(float *dst, const float *src0,
2016 const float *src1, const float *win, int len)
2018 x86_reg i = -len * 4;
2019 x86_reg j = len * 4 - 16;
2022 "movaps (%5, %1), %%xmm1 \n"
2023 "movaps (%5, %0), %%xmm0 \n"
2024 "movaps (%4, %1), %%xmm5 \n"
2025 "movaps (%3, %0), %%xmm4 \n"
2026 "shufps $0x1b, %%xmm1, %%xmm1 \n"
2027 "shufps $0x1b, %%xmm5, %%xmm5 \n"
2028 "movaps %%xmm0, %%xmm2 \n"
2029 "movaps %%xmm1, %%xmm3 \n"
2030 "mulps %%xmm4, %%xmm2 \n" // src0[len + i] * win[len + i]
2031 "mulps %%xmm5, %%xmm3 \n" // src1[j] * win[len + j]
2032 "mulps %%xmm4, %%xmm1 \n" // src0[len + i] * win[len + j]
2033 "mulps %%xmm5, %%xmm0 \n" // src1[j] * win[len + i]
2034 "addps %%xmm3, %%xmm2 \n"
2035 "subps %%xmm0, %%xmm1 \n"
2036 "shufps $0x1b, %%xmm2, %%xmm2 \n"
2037 "movaps %%xmm1, (%2, %0) \n"
2038 "movaps %%xmm2, (%2, %1) \n"
2043 : "r"(dst + len), "r"(src0 + len), "r"(src1), "r"(win + len)
2046 #endif /* HAVE_6REGS */
2048 static void vector_clipf_sse(float *dst, const float *src,
2049 float min, float max, int len)
2051 x86_reg i = (len - 16) * 4;
2053 "movss %3, %%xmm4 \n\t"
2054 "movss %4, %%xmm5 \n\t"
2055 "shufps $0, %%xmm4, %%xmm4 \n\t"
2056 "shufps $0, %%xmm5, %%xmm5 \n\t"
2058 "movaps (%2, %0), %%xmm0 \n\t" // 3/1 on intel
2059 "movaps 16(%2, %0), %%xmm1 \n\t"
2060 "movaps 32(%2, %0), %%xmm2 \n\t"
2061 "movaps 48(%2, %0), %%xmm3 \n\t"
2062 "maxps %%xmm4, %%xmm0 \n\t"
2063 "maxps %%xmm4, %%xmm1 \n\t"
2064 "maxps %%xmm4, %%xmm2 \n\t"
2065 "maxps %%xmm4, %%xmm3 \n\t"
2066 "minps %%xmm5, %%xmm0 \n\t"
2067 "minps %%xmm5, %%xmm1 \n\t"
2068 "minps %%xmm5, %%xmm2 \n\t"
2069 "minps %%xmm5, %%xmm3 \n\t"
2070 "movaps %%xmm0, (%1, %0) \n\t"
2071 "movaps %%xmm1, 16(%1, %0) \n\t"
2072 "movaps %%xmm2, 32(%1, %0) \n\t"
2073 "movaps %%xmm3, 48(%1, %0) \n\t"
2077 : "r"(dst), "r"(src), "m"(min), "m"(max)
2082 #endif /* HAVE_INLINE_ASM */
2084 int32_t ff_scalarproduct_int16_mmxext(const int16_t *v1, const int16_t *v2,
2086 int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2,
2088 int32_t ff_scalarproduct_and_madd_int16_mmxext(int16_t *v1, const int16_t *v2,
2090 int order, int mul);
2091 int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2,
2093 int order, int mul);
2094 int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2,
2096 int order, int mul);
2098 void ff_apply_window_int16_round_mmxext(int16_t *output, const int16_t *input,
2099 const int16_t *window, unsigned int len);
2100 void ff_apply_window_int16_round_sse2(int16_t *output, const int16_t *input,
2101 const int16_t *window, unsigned int len);
2102 void ff_apply_window_int16_mmxext(int16_t *output, const int16_t *input,
2103 const int16_t *window, unsigned int len);
2104 void ff_apply_window_int16_sse2(int16_t *output, const int16_t *input,
2105 const int16_t *window, unsigned int len);
2106 void ff_apply_window_int16_ssse3(int16_t *output, const int16_t *input,
2107 const int16_t *window, unsigned int len);
2108 void ff_apply_window_int16_ssse3_atom(int16_t *output, const int16_t *input,
2109 const int16_t *window, unsigned int len);
2111 void ff_bswap32_buf_ssse3(uint32_t *dst, const uint32_t *src, int w);
2112 void ff_bswap32_buf_sse2(uint32_t *dst, const uint32_t *src, int w);
2114 void ff_add_hfyu_median_prediction_mmxext(uint8_t *dst, const uint8_t *top,
2115 const uint8_t *diff, int w,
2116 int *left, int *left_top);
2117 int ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src,
2119 int ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src,
2122 float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order);
2124 void ff_vector_fmul_reverse_sse(float *dst, const float *src0,
2125 const float *src1, int len);
2126 void ff_vector_fmul_reverse_avx(float *dst, const float *src0,
2127 const float *src1, int len);
2129 void ff_vector_fmul_add_sse(float *dst, const float *src0, const float *src1,
2130 const float *src2, int len);
2131 void ff_vector_fmul_add_avx(float *dst, const float *src0, const float *src1,
2132 const float *src2, int len);
2134 void ff_vector_clip_int32_mmx (int32_t *dst, const int32_t *src,
2135 int32_t min, int32_t max, unsigned int len);
2136 void ff_vector_clip_int32_sse2 (int32_t *dst, const int32_t *src,
2137 int32_t min, int32_t max, unsigned int len);
2138 void ff_vector_clip_int32_int_sse2(int32_t *dst, const int32_t *src,
2139 int32_t min, int32_t max, unsigned int len);
2140 void ff_vector_clip_int32_sse4 (int32_t *dst, const int32_t *src,
2141 int32_t min, int32_t max, unsigned int len);
2143 extern void ff_butterflies_float_interleave_sse(float *dst, const float *src0,
2144 const float *src1, int len);
2145 extern void ff_butterflies_float_interleave_avx(float *dst, const float *src0,
2146 const float *src1, int len);
2148 #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX) \
2150 c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \
2151 c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \
2152 c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \
2153 c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \
2154 c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \
2155 c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \
2156 c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \
2157 c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \
2158 c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \
2159 c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \
2160 c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \
2161 c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \
2162 c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \
2163 c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \
2164 c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \
2165 c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU; \
2168 #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
2170 c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
2171 c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
2172 c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
2173 c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU; \
2176 #define H264_QPEL_FUNCS(x, y, CPU) \
2178 c->put_h264_qpel_pixels_tab[0][x + y * 4] = put_h264_qpel16_mc ## x ## y ## _ ## CPU; \
2179 c->put_h264_qpel_pixels_tab[1][x + y * 4] = put_h264_qpel8_mc ## x ## y ## _ ## CPU; \
2180 c->avg_h264_qpel_pixels_tab[0][x + y * 4] = avg_h264_qpel16_mc ## x ## y ## _ ## CPU; \
2181 c->avg_h264_qpel_pixels_tab[1][x + y * 4] = avg_h264_qpel8_mc ## x ## y ## _ ## CPU; \
2184 #define H264_QPEL_FUNCS_10(x, y, CPU) \
2186 c->put_h264_qpel_pixels_tab[0][x + y * 4] = ff_put_h264_qpel16_mc ## x ## y ## _10_ ## CPU; \
2187 c->put_h264_qpel_pixels_tab[1][x + y * 4] = ff_put_h264_qpel8_mc ## x ## y ## _10_ ## CPU; \
2188 c->avg_h264_qpel_pixels_tab[0][x + y * 4] = ff_avg_h264_qpel16_mc ## x ## y ## _10_ ## CPU; \
2189 c->avg_h264_qpel_pixels_tab[1][x + y * 4] = ff_avg_h264_qpel8_mc ## x ## y ## _10_ ## CPU; \
2192 static void dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx, int mm_flags)
2194 const int high_bit_depth = avctx->bits_per_raw_sample > 8;
2197 c->put_pixels_clamped = ff_put_pixels_clamped_mmx;
2198 c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx;
2199 c->add_pixels_clamped = ff_add_pixels_clamped_mmx;
2201 if (!high_bit_depth) {
2202 c->clear_block = clear_block_mmx;
2203 c->clear_blocks = clear_blocks_mmx;
2204 c->draw_edges = draw_edges_mmx;
2206 SET_HPEL_FUNCS(put, 0, 16, mmx);
2207 SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx);
2208 SET_HPEL_FUNCS(avg, 0, 16, mmx);
2209 SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx);
2210 SET_HPEL_FUNCS(put, 1, 8, mmx);
2211 SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx);
2212 SET_HPEL_FUNCS(avg, 1, 8, mmx);
2213 SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx);
2215 switch (avctx->idct_algo) {
2217 case FF_IDCT_SIMPLEMMX:
2218 c->idct_put = ff_simple_idct_put_mmx;
2219 c->idct_add = ff_simple_idct_add_mmx;
2220 c->idct = ff_simple_idct_mmx;
2221 c->idct_permutation_type = FF_SIMPLE_IDCT_PERM;
2223 case FF_IDCT_XVIDMMX:
2224 c->idct_put = ff_idct_xvid_mmx_put;
2225 c->idct_add = ff_idct_xvid_mmx_add;
2226 c->idct = ff_idct_xvid_mmx;
2233 c->add_bytes = add_bytes_mmx;
2235 if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
2236 c->h263_v_loop_filter = h263_v_loop_filter_mmx;
2237 c->h263_h_loop_filter = h263_h_loop_filter_mmx;
2239 #endif /* HAVE_INLINE_ASM */
2243 if (!high_bit_depth)
2244 c->emulated_edge_mc = emulated_edge_mc_mmx;
2247 if (!high_bit_depth && CONFIG_H264CHROMA) {
2248 c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_rnd_mmx;
2249 c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_mmx;
2252 c->vector_clip_int32 = ff_vector_clip_int32_mmx;
2257 static void dsputil_init_mmxext(DSPContext *c, AVCodecContext *avctx,
2260 const int bit_depth = avctx->bits_per_raw_sample;
2261 const int high_bit_depth = bit_depth > 8;
2264 c->prefetch = prefetch_mmxext;
2266 if (!high_bit_depth) {
2267 c->put_pixels_tab[0][1] = put_pixels16_x2_mmxext;
2268 c->put_pixels_tab[0][2] = put_pixels16_y2_mmxext;
2270 c->avg_pixels_tab[0][0] = avg_pixels16_mmxext;
2271 c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmxext;
2272 c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmxext;
2274 c->put_pixels_tab[1][1] = put_pixels8_x2_mmxext;
2275 c->put_pixels_tab[1][2] = put_pixels8_y2_mmxext;
2277 c->avg_pixels_tab[1][0] = avg_pixels8_mmxext;
2278 c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmxext;
2279 c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmxext;
2282 if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
2283 if (!high_bit_depth) {
2284 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmxext;
2285 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmxext;
2286 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmxext;
2287 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmxext;
2289 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmxext;
2290 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmxext;
2294 if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX) {
2295 c->idct_put = ff_idct_xvid_mmxext_put;
2296 c->idct_add = ff_idct_xvid_mmxext_add;
2297 c->idct = ff_idct_xvid_mmxext;
2300 if (CONFIG_VP3_DECODER && (avctx->codec_id == AV_CODEC_ID_VP3 ||
2301 avctx->codec_id == AV_CODEC_ID_THEORA)) {
2302 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_mmxext;
2303 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_mmxext;
2305 #endif /* HAVE_INLINE_ASM */
2307 #if HAVE_MMXEXT_EXTERNAL
2308 if (CONFIG_H264QPEL) {
2309 SET_QPEL_FUNCS(put_qpel, 0, 16, mmxext, );
2310 SET_QPEL_FUNCS(put_qpel, 1, 8, mmxext, );
2311 SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmxext, );
2312 SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmxext, );
2313 SET_QPEL_FUNCS(avg_qpel, 0, 16, mmxext, );
2314 SET_QPEL_FUNCS(avg_qpel, 1, 8, mmxext, );
2316 if (!high_bit_depth) {
2317 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmxext, );
2318 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmxext, );
2319 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmxext, );
2320 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmxext, );
2321 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmxext, );
2322 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmxext, );
2323 } else if (bit_depth == 10) {
2325 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_mmxext, ff_);
2326 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_mmxext, ff_);
2327 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 10_mmxext, ff_);
2328 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 10_mmxext, ff_);
2330 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 10_mmxext, ff_);
2331 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 10_mmxext, ff_);
2334 SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmxext, );
2335 SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmxext, );
2336 SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmxext, );
2337 SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmxext, );
2340 if (!high_bit_depth && CONFIG_H264CHROMA) {
2341 c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_rnd_mmxext;
2342 c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_mmxext;
2343 c->avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_mmxext;
2344 c->put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_mmxext;
2346 if (bit_depth == 10 && CONFIG_H264CHROMA) {
2347 c->put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_10_mmxext;
2348 c->avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_10_mmxext;
2349 c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_10_mmxext;
2350 c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_10_mmxext;
2353 /* slower than cmov version on AMD */
2354 if (!(mm_flags & AV_CPU_FLAG_3DNOW))
2355 c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmxext;
2357 c->scalarproduct_int16 = ff_scalarproduct_int16_mmxext;
2358 c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmxext;
2360 if (avctx->flags & CODEC_FLAG_BITEXACT) {
2361 c->apply_window_int16 = ff_apply_window_int16_mmxext;
2363 c->apply_window_int16 = ff_apply_window_int16_round_mmxext;
2365 #endif /* HAVE_MMXEXT_EXTERNAL */
2368 static void dsputil_init_3dnow(DSPContext *c, AVCodecContext *avctx,
2371 const int high_bit_depth = avctx->bits_per_raw_sample > 8;
2374 c->prefetch = prefetch_3dnow;
2376 if (!high_bit_depth) {
2377 c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
2378 c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
2380 c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
2381 c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
2382 c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
2384 c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
2385 c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
2387 c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
2388 c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
2389 c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
2391 if (!(avctx->flags & CODEC_FLAG_BITEXACT)){
2392 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
2393 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
2394 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
2395 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
2397 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
2398 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
2402 if (CONFIG_VP3_DECODER && (avctx->codec_id == AV_CODEC_ID_VP3 ||
2403 avctx->codec_id == AV_CODEC_ID_THEORA)) {
2404 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_3dnow;
2405 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_3dnow;
2408 c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
2409 #endif /* HAVE_INLINE_ASM */
2412 if (!high_bit_depth && CONFIG_H264CHROMA) {
2413 c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_rnd_3dnow;
2414 c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_3dnow;
2416 #endif /* HAVE_YASM */
2419 static void dsputil_init_3dnowext(DSPContext *c, AVCodecContext *avctx,
2422 #if HAVE_AMD3DNOWEXT_INLINE && HAVE_6REGS
2423 c->vector_fmul_window = vector_fmul_window_3dnowext;
2427 static void dsputil_init_sse(DSPContext *c, AVCodecContext *avctx, int mm_flags)
2429 const int high_bit_depth = avctx->bits_per_raw_sample > 8;
2432 if (!high_bit_depth) {
2433 if (!(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)) {
2434 /* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
2435 c->clear_block = clear_block_sse;
2436 c->clear_blocks = clear_blocks_sse;
2440 c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
2443 c->vector_fmul_window = vector_fmul_window_sse;
2446 c->vector_clipf = vector_clipf_sse;
2447 #endif /* HAVE_INLINE_ASM */
2450 c->vector_fmul_reverse = ff_vector_fmul_reverse_sse;
2451 c->vector_fmul_add = ff_vector_fmul_add_sse;
2453 c->scalarproduct_float = ff_scalarproduct_float_sse;
2454 c->butterflies_float_interleave = ff_butterflies_float_interleave_sse;
2456 if (!high_bit_depth)
2457 c->emulated_edge_mc = emulated_edge_mc_sse;
2458 #endif /* HAVE_YASM */
2461 static void dsputil_init_sse2(DSPContext *c, AVCodecContext *avctx,
2464 #if HAVE_SSE2_EXTERNAL
2465 const int bit_depth = avctx->bits_per_raw_sample;
2466 const int high_bit_depth = bit_depth > 8;
2468 if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
2469 // these functions are slower than mmx on AMD, but faster on Intel
2470 if (!high_bit_depth) {
2471 c->put_pixels_tab[0][0] = ff_put_pixels16_sse2;
2472 c->put_no_rnd_pixels_tab[0][0] = ff_put_pixels16_sse2;
2473 c->avg_pixels_tab[0][0] = ff_avg_pixels16_sse2;
2474 if (CONFIG_H264QPEL)
2475 H264_QPEL_FUNCS(0, 0, sse2);
2479 if (!high_bit_depth && CONFIG_H264QPEL) {
2480 H264_QPEL_FUNCS(0, 1, sse2);
2481 H264_QPEL_FUNCS(0, 2, sse2);
2482 H264_QPEL_FUNCS(0, 3, sse2);
2483 H264_QPEL_FUNCS(1, 1, sse2);
2484 H264_QPEL_FUNCS(1, 2, sse2);
2485 H264_QPEL_FUNCS(1, 3, sse2);
2486 H264_QPEL_FUNCS(2, 1, sse2);
2487 H264_QPEL_FUNCS(2, 2, sse2);
2488 H264_QPEL_FUNCS(2, 3, sse2);
2489 H264_QPEL_FUNCS(3, 1, sse2);
2490 H264_QPEL_FUNCS(3, 2, sse2);
2491 H264_QPEL_FUNCS(3, 3, sse2);
2494 if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX) {
2495 c->idct_put = ff_idct_xvid_sse2_put;
2496 c->idct_add = ff_idct_xvid_sse2_add;
2497 c->idct = ff_idct_xvid_sse2;
2498 c->idct_permutation_type = FF_SSE2_IDCT_PERM;
2501 if (bit_depth == 10) {
2502 if (CONFIG_H264QPEL) {
2503 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_sse2, ff_);
2504 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 10_sse2, ff_);
2505 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_sse2, ff_);
2506 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 10_sse2, ff_);
2507 H264_QPEL_FUNCS_10(1, 0, sse2_cache64);
2508 H264_QPEL_FUNCS_10(2, 0, sse2_cache64);
2509 H264_QPEL_FUNCS_10(3, 0, sse2_cache64);
2511 if (CONFIG_H264CHROMA) {
2512 c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_10_sse2;
2513 c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_10_sse2;
2517 c->scalarproduct_int16 = ff_scalarproduct_int16_sse2;
2518 c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
2519 if (mm_flags & AV_CPU_FLAG_ATOM) {
2520 c->vector_clip_int32 = ff_vector_clip_int32_int_sse2;
2522 c->vector_clip_int32 = ff_vector_clip_int32_sse2;
2524 if (avctx->flags & CODEC_FLAG_BITEXACT) {
2525 c->apply_window_int16 = ff_apply_window_int16_sse2;
2526 } else if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
2527 c->apply_window_int16 = ff_apply_window_int16_round_sse2;
2529 c->bswap_buf = ff_bswap32_buf_sse2;
2530 #endif /* HAVE_SSE2_EXTERNAL */
2533 static void dsputil_init_ssse3(DSPContext *c, AVCodecContext *avctx,
2536 #if HAVE_SSSE3_EXTERNAL
2537 const int high_bit_depth = avctx->bits_per_raw_sample > 8;
2538 const int bit_depth = avctx->bits_per_raw_sample;
2540 if (!high_bit_depth && CONFIG_H264QPEL) {
2541 H264_QPEL_FUNCS(1, 0, ssse3);
2542 H264_QPEL_FUNCS(1, 1, ssse3);
2543 H264_QPEL_FUNCS(1, 2, ssse3);
2544 H264_QPEL_FUNCS(1, 3, ssse3);
2545 H264_QPEL_FUNCS(2, 0, ssse3);
2546 H264_QPEL_FUNCS(2, 1, ssse3);
2547 H264_QPEL_FUNCS(2, 2, ssse3);
2548 H264_QPEL_FUNCS(2, 3, ssse3);
2549 H264_QPEL_FUNCS(3, 0, ssse3);
2550 H264_QPEL_FUNCS(3, 1, ssse3);
2551 H264_QPEL_FUNCS(3, 2, ssse3);
2552 H264_QPEL_FUNCS(3, 3, ssse3);
2554 if (bit_depth == 10 && CONFIG_H264QPEL) {
2555 H264_QPEL_FUNCS_10(1, 0, ssse3_cache64);
2556 H264_QPEL_FUNCS_10(2, 0, ssse3_cache64);
2557 H264_QPEL_FUNCS_10(3, 0, ssse3_cache64);
2559 if (!high_bit_depth && CONFIG_H264CHROMA) {
2560 c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_rnd_ssse3;
2561 c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_rnd_ssse3;
2562 c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_ssse3;
2563 c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_ssse3;
2565 c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
2566 if (mm_flags & AV_CPU_FLAG_SSE4) // not really sse4, just slow on Conroe
2567 c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
2569 if (mm_flags & AV_CPU_FLAG_ATOM)
2570 c->apply_window_int16 = ff_apply_window_int16_ssse3_atom;
2572 c->apply_window_int16 = ff_apply_window_int16_ssse3;
2573 if (!(mm_flags & (AV_CPU_FLAG_SSE42|AV_CPU_FLAG_3DNOW))) // cachesplit
2574 c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
2575 c->bswap_buf = ff_bswap32_buf_ssse3;
2576 #endif /* HAVE_SSSE3_EXTERNAL */
2579 static void dsputil_init_sse4(DSPContext *c, AVCodecContext *avctx,
2582 #if HAVE_SSE4_EXTERNAL
2583 c->vector_clip_int32 = ff_vector_clip_int32_sse4;
2584 #endif /* HAVE_SSE4_EXTERNAL */
2587 static void dsputil_init_avx(DSPContext *c, AVCodecContext *avctx, int mm_flags)
2589 #if HAVE_AVX_EXTERNAL
2590 const int bit_depth = avctx->bits_per_raw_sample;
2592 if (bit_depth == 10) {
2593 // AVX implies !cache64.
2594 // TODO: Port cache(32|64) detection from x264.
2595 if (CONFIG_H264QPEL) {
2596 H264_QPEL_FUNCS_10(1, 0, sse2);
2597 H264_QPEL_FUNCS_10(2, 0, sse2);
2598 H264_QPEL_FUNCS_10(3, 0, sse2);
2601 if (CONFIG_H264CHROMA) {
2602 c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_10_avx;
2603 c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_10_avx;
2606 c->butterflies_float_interleave = ff_butterflies_float_interleave_avx;
2607 c->vector_fmul_reverse = ff_vector_fmul_reverse_avx;
2608 c->vector_fmul_add = ff_vector_fmul_add_avx;
2609 #endif /* HAVE_AVX_EXTERNAL */
2612 void ff_dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx)
2614 int mm_flags = av_get_cpu_flags();
2616 #if HAVE_7REGS && HAVE_INLINE_ASM
2617 if (mm_flags & AV_CPU_FLAG_CMOV)
2618 c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
2621 if (mm_flags & AV_CPU_FLAG_MMX)
2622 dsputil_init_mmx(c, avctx, mm_flags);
2624 if (mm_flags & AV_CPU_FLAG_MMXEXT)
2625 dsputil_init_mmxext(c, avctx, mm_flags);
2627 if (mm_flags & AV_CPU_FLAG_3DNOW)
2628 dsputil_init_3dnow(c, avctx, mm_flags);
2630 if (mm_flags & AV_CPU_FLAG_3DNOWEXT)
2631 dsputil_init_3dnowext(c, avctx, mm_flags);
2633 if (mm_flags & AV_CPU_FLAG_SSE)
2634 dsputil_init_sse(c, avctx, mm_flags);
2636 if (mm_flags & AV_CPU_FLAG_SSE2)
2637 dsputil_init_sse2(c, avctx, mm_flags);
2639 if (mm_flags & AV_CPU_FLAG_SSSE3)
2640 dsputil_init_ssse3(c, avctx, mm_flags);
2642 if (mm_flags & AV_CPU_FLAG_SSE4)
2643 dsputil_init_sse4(c, avctx, mm_flags);
2645 if (mm_flags & AV_CPU_FLAG_AVX)
2646 dsputil_init_avx(c, avctx, mm_flags);
2648 if (CONFIG_ENCODERS)
2649 ff_dsputilenc_init_mmx(c, avctx);