2 * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
4 * This file is part of Libav.
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #if COMPILE_TEMPLATE_MMX2
26 #define PREFETCH "prefetchnta"
28 #define PREFETCH " # nop"
31 #if COMPILE_TEMPLATE_MMX2
32 #define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
34 #define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
36 #define MOVNTQ(a,b) REAL_MOVNTQ(a,b)
38 #define YSCALEYUV2YV12X(offset, dest, end, pos) \
40 "movq "DITHER16"+0(%0), %%mm3 \n\t"\
41 "movq "DITHER16"+8(%0), %%mm4 \n\t"\
42 "lea " offset "(%0), %%"REG_d" \n\t"\
43 "mov (%%"REG_d"), %%"REG_S" \n\t"\
44 ".p2align 4 \n\t" /* FIXME Unroll? */\
46 "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
47 "movq (%%"REG_S", %3, 2), %%mm2 \n\t" /* srcData */\
48 "movq 8(%%"REG_S", %3, 2), %%mm5 \n\t" /* srcData */\
49 "add $16, %%"REG_d" \n\t"\
50 "mov (%%"REG_d"), %%"REG_S" \n\t"\
51 "test %%"REG_S", %%"REG_S" \n\t"\
52 "pmulhw %%mm0, %%mm2 \n\t"\
53 "pmulhw %%mm0, %%mm5 \n\t"\
54 "paddw %%mm2, %%mm3 \n\t"\
55 "paddw %%mm5, %%mm4 \n\t"\
57 "psraw $3, %%mm3 \n\t"\
58 "psraw $3, %%mm4 \n\t"\
59 "packuswb %%mm4, %%mm3 \n\t"\
60 MOVNTQ(%%mm3, (%1, %3))\
63 "movq "DITHER16"+0(%0), %%mm3 \n\t"\
64 "movq "DITHER16"+8(%0), %%mm4 \n\t"\
65 "lea " offset "(%0), %%"REG_d" \n\t"\
66 "mov (%%"REG_d"), %%"REG_S" \n\t"\
68 :: "r" (&c->redDither),\
69 "r" (dest), "g" ((x86_reg)(end)), "r"((x86_reg)(pos))\
73 #if !COMPILE_TEMPLATE_MMX2
74 static av_always_inline void
75 dither_8to16(SwsContext *c, const uint8_t *srcDither, int rot)
78 __asm__ volatile("pxor %%mm0, %%mm0\n\t"
79 "movq (%0), %%mm3\n\t"
80 "movq %%mm3, %%mm4\n\t"
81 "psrlq $24, %%mm3\n\t"
82 "psllq $40, %%mm4\n\t"
83 "por %%mm4, %%mm3\n\t"
84 "movq %%mm3, %%mm4\n\t"
85 "punpcklbw %%mm0, %%mm3\n\t"
86 "punpckhbw %%mm0, %%mm4\n\t"
89 "movq %%mm3, "DITHER16"+0(%1)\n\t"
90 "movq %%mm4, "DITHER16"+8(%1)\n\t"
91 :: "r"(srcDither), "r"(&c->redDither)
94 __asm__ volatile("pxor %%mm0, %%mm0\n\t"
95 "movq (%0), %%mm3\n\t"
96 "movq %%mm3, %%mm4\n\t"
97 "punpcklbw %%mm0, %%mm3\n\t"
98 "punpckhbw %%mm0, %%mm4\n\t"
100 "psraw $4, %%mm4\n\t"
101 "movq %%mm3, "DITHER16"+0(%1)\n\t"
102 "movq %%mm4, "DITHER16"+8(%1)\n\t"
103 :: "r"(srcDither), "r"(&c->redDither)
109 static void RENAME(yuv2yuvX)(SwsContext *c, const int16_t *lumFilter,
110 const int16_t **lumSrc, int lumFilterSize,
111 const int16_t *chrFilter, const int16_t **chrUSrc,
112 const int16_t **chrVSrc,
113 int chrFilterSize, const int16_t **alpSrc,
114 uint8_t *dest[4], int dstW, int chrDstW)
116 uint8_t *yDest = dest[0], *uDest = dest[1], *vDest = dest[2],
117 *aDest = CONFIG_SWSCALE_ALPHA ? dest[3] : NULL;
118 const uint8_t *lumDither = c->lumDither8, *chrDither = c->chrDither8;
121 x86_reg uv_off = c->uv_off_byte >> 1;
122 dither_8to16(c, chrDither, 0);
123 YSCALEYUV2YV12X(CHR_MMX_FILTER_OFFSET, uDest, chrDstW, 0)
124 dither_8to16(c, chrDither, 1);
125 YSCALEYUV2YV12X(CHR_MMX_FILTER_OFFSET, vDest - uv_off, chrDstW + uv_off, uv_off)
127 dither_8to16(c, lumDither, 0);
128 if (CONFIG_SWSCALE_ALPHA && aDest) {
129 YSCALEYUV2YV12X(ALP_MMX_FILTER_OFFSET, aDest, dstW, 0)
132 YSCALEYUV2YV12X(LUM_MMX_FILTER_OFFSET, yDest, dstW, 0)
135 #define YSCALEYUV2YV12X_ACCURATE(offset, dest, end, pos) \
137 "lea " offset "(%0), %%"REG_d" \n\t"\
138 "movq "DITHER32"+0(%0), %%mm4 \n\t"\
139 "movq "DITHER32"+8(%0), %%mm5 \n\t"\
140 "movq "DITHER32"+16(%0), %%mm6 \n\t"\
141 "movq "DITHER32"+24(%0), %%mm7 \n\t"\
142 "mov (%%"REG_d"), %%"REG_S" \n\t"\
145 "movq (%%"REG_S", %3, 2), %%mm0 \n\t" /* srcData */\
146 "movq 8(%%"REG_S", %3, 2), %%mm2 \n\t" /* srcData */\
147 "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
148 "movq (%%"REG_S", %3, 2), %%mm1 \n\t" /* srcData */\
149 "movq %%mm0, %%mm3 \n\t"\
150 "punpcklwd %%mm1, %%mm0 \n\t"\
151 "punpckhwd %%mm1, %%mm3 \n\t"\
152 "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm1 \n\t" /* filterCoeff */\
153 "pmaddwd %%mm1, %%mm0 \n\t"\
154 "pmaddwd %%mm1, %%mm3 \n\t"\
155 "paddd %%mm0, %%mm4 \n\t"\
156 "paddd %%mm3, %%mm5 \n\t"\
157 "movq 8(%%"REG_S", %3, 2), %%mm3 \n\t" /* srcData */\
158 "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
159 "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
160 "test %%"REG_S", %%"REG_S" \n\t"\
161 "movq %%mm2, %%mm0 \n\t"\
162 "punpcklwd %%mm3, %%mm2 \n\t"\
163 "punpckhwd %%mm3, %%mm0 \n\t"\
164 "pmaddwd %%mm1, %%mm2 \n\t"\
165 "pmaddwd %%mm1, %%mm0 \n\t"\
166 "paddd %%mm2, %%mm6 \n\t"\
167 "paddd %%mm0, %%mm7 \n\t"\
169 "psrad $16, %%mm4 \n\t"\
170 "psrad $16, %%mm5 \n\t"\
171 "psrad $16, %%mm6 \n\t"\
172 "psrad $16, %%mm7 \n\t"\
173 "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
174 "packssdw %%mm5, %%mm4 \n\t"\
175 "packssdw %%mm7, %%mm6 \n\t"\
176 "paddw %%mm0, %%mm4 \n\t"\
177 "paddw %%mm0, %%mm6 \n\t"\
178 "psraw $3, %%mm4 \n\t"\
179 "psraw $3, %%mm6 \n\t"\
180 "packuswb %%mm6, %%mm4 \n\t"\
181 MOVNTQ(%%mm4, (%1, %3))\
184 "lea " offset "(%0), %%"REG_d" \n\t"\
185 "movq "DITHER32"+0(%0), %%mm4 \n\t"\
186 "movq "DITHER32"+8(%0), %%mm5 \n\t"\
187 "movq "DITHER32"+16(%0), %%mm6 \n\t"\
188 "movq "DITHER32"+24(%0), %%mm7 \n\t"\
189 "mov (%%"REG_d"), %%"REG_S" \n\t"\
191 :: "r" (&c->redDither),\
192 "r" (dest), "g" ((x86_reg)(end)), "r"((x86_reg)(pos))\
193 : "%"REG_a, "%"REG_d, "%"REG_S\
196 #if !COMPILE_TEMPLATE_MMX2
197 static av_always_inline void
198 dither_8to32(SwsContext *c, const uint8_t *srcDither, int rot)
201 __asm__ volatile("pxor %%mm0, %%mm0\n\t"
202 "movq (%0), %%mm4\n\t"
203 "movq %%mm4, %%mm5\n\t"
204 "psrlq $24, %%mm4\n\t"
205 "psllq $40, %%mm5\n\t"
206 "por %%mm5, %%mm4\n\t"
207 "movq %%mm4, %%mm6\n\t"
208 "punpcklbw %%mm0, %%mm4\n\t"
209 "punpckhbw %%mm0, %%mm6\n\t"
210 "movq %%mm4, %%mm5\n\t"
211 "movq %%mm6, %%mm7\n\t"
212 "punpcklwd %%mm0, %%mm4\n\t"
213 "punpckhwd %%mm0, %%mm5\n\t"
214 "punpcklwd %%mm0, %%mm6\n\t"
215 "punpckhwd %%mm0, %%mm7\n\t"
216 "pslld $12, %%mm4\n\t"
217 "pslld $12, %%mm5\n\t"
218 "pslld $12, %%mm6\n\t"
219 "pslld $12, %%mm7\n\t"
220 "movq %%mm4, "DITHER32"+0(%1)\n\t"
221 "movq %%mm5, "DITHER32"+8(%1)\n\t"
222 "movq %%mm6, "DITHER32"+16(%1)\n\t"
223 "movq %%mm7, "DITHER32"+24(%1)\n\t"
224 :: "r"(srcDither), "r"(&c->redDither)
227 __asm__ volatile("pxor %%mm0, %%mm0\n\t"
228 "movq (%0), %%mm4\n\t"
229 "movq %%mm4, %%mm6\n\t"
230 "punpcklbw %%mm0, %%mm4\n\t"
231 "punpckhbw %%mm0, %%mm6\n\t"
232 "movq %%mm4, %%mm5\n\t"
233 "movq %%mm6, %%mm7\n\t"
234 "punpcklwd %%mm0, %%mm4\n\t"
235 "punpckhwd %%mm0, %%mm5\n\t"
236 "punpcklwd %%mm0, %%mm6\n\t"
237 "punpckhwd %%mm0, %%mm7\n\t"
238 "pslld $12, %%mm4\n\t"
239 "pslld $12, %%mm5\n\t"
240 "pslld $12, %%mm6\n\t"
241 "pslld $12, %%mm7\n\t"
242 "movq %%mm4, "DITHER32"+0(%1)\n\t"
243 "movq %%mm5, "DITHER32"+8(%1)\n\t"
244 "movq %%mm6, "DITHER32"+16(%1)\n\t"
245 "movq %%mm7, "DITHER32"+24(%1)\n\t"
246 :: "r"(srcDither), "r"(&c->redDither)
252 static void RENAME(yuv2yuvX_ar)(SwsContext *c, const int16_t *lumFilter,
253 const int16_t **lumSrc, int lumFilterSize,
254 const int16_t *chrFilter, const int16_t **chrUSrc,
255 const int16_t **chrVSrc,
256 int chrFilterSize, const int16_t **alpSrc,
257 uint8_t *dest[4], int dstW, int chrDstW)
259 uint8_t *yDest = dest[0], *uDest = dest[1], *vDest = dest[2],
260 *aDest = CONFIG_SWSCALE_ALPHA ? dest[3] : NULL;
261 const uint8_t *lumDither = c->lumDither8, *chrDither = c->chrDither8;
264 x86_reg uv_off = c->uv_off_byte >> 1;
265 dither_8to32(c, chrDither, 0);
266 YSCALEYUV2YV12X_ACCURATE(CHR_MMX_FILTER_OFFSET, uDest, chrDstW, 0)
267 dither_8to32(c, chrDither, 1);
268 YSCALEYUV2YV12X_ACCURATE(CHR_MMX_FILTER_OFFSET, vDest - uv_off, chrDstW + uv_off, uv_off)
270 dither_8to32(c, lumDither, 0);
271 if (CONFIG_SWSCALE_ALPHA && aDest) {
272 YSCALEYUV2YV12X_ACCURATE(ALP_MMX_FILTER_OFFSET, aDest, dstW, 0)
275 YSCALEYUV2YV12X_ACCURATE(LUM_MMX_FILTER_OFFSET, yDest, dstW, 0)
278 static void RENAME(yuv2yuv1)(SwsContext *c, const int16_t *lumSrc,
279 const int16_t *chrUSrc, const int16_t *chrVSrc,
280 const int16_t *alpSrc,
281 uint8_t *dst[4], int dstW, int chrDstW)
284 const int16_t *src[4]= {
285 lumSrc + dstW, chrUSrc + chrDstW,
286 chrVSrc + chrDstW, alpSrc + dstW
288 x86_reg counter[4]= { dstW, chrDstW, chrDstW, dstW };
293 "mov %2, %%"REG_a" \n\t"
294 ".p2align 4 \n\t" /* FIXME Unroll? */
296 "movq (%0, %%"REG_a", 2), %%mm0 \n\t"
297 "movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"
298 "psraw $7, %%mm0 \n\t"
299 "psraw $7, %%mm1 \n\t"
300 "packuswb %%mm1, %%mm0 \n\t"
301 MOVNTQ(%%mm0, (%1, %%REGa))
302 "add $8, %%"REG_a" \n\t"
304 :: "r" (src[p]), "r" (dst[p] + counter[p]),
312 static void RENAME(yuv2yuv1_ar)(SwsContext *c, const int16_t *lumSrc,
313 const int16_t *chrUSrc, const int16_t *chrVSrc,
314 const int16_t *alpSrc,
315 uint8_t *dst[4], int dstW, int chrDstW)
318 const int16_t *src[4]= {
319 lumSrc + dstW, chrUSrc + chrDstW,
320 chrVSrc + chrDstW, alpSrc + dstW
322 x86_reg counter[4]= { dstW, chrDstW, chrDstW, dstW };
323 const uint8_t *lumDither = c->lumDither8, *chrDither = c->chrDither8;
327 dither_8to16(c, (p == 2 || p == 3) ? chrDither : lumDither, p == 2);
329 "mov %2, %%"REG_a" \n\t"
330 "movq "DITHER16"+0(%3), %%mm6 \n\t"
331 "movq "DITHER16"+8(%3), %%mm7 \n\t"
332 ".p2align 4 \n\t" /* FIXME Unroll? */
334 "movq (%0, %%"REG_a", 2), %%mm0 \n\t"
335 "movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"
336 "paddsw %%mm6, %%mm0 \n\t"
337 "paddsw %%mm7, %%mm1 \n\t"
338 "psraw $7, %%mm0 \n\t"
339 "psraw $7, %%mm1 \n\t"
340 "packuswb %%mm1, %%mm0 \n\t"
341 MOVNTQ(%%mm0, (%1, %%REGa))
342 "add $8, %%"REG_a" \n\t"
344 :: "r" (src[p]), "r" (dst[p] + counter[p]),
345 "g" (-counter[p]), "r"(&c->redDither)
352 #define YSCALEYUV2PACKEDX_UV \
354 "xor %%"REG_a", %%"REG_a" \n\t"\
358 "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
359 "mov (%%"REG_d"), %%"REG_S" \n\t"\
360 "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
361 "movq %%mm3, %%mm4 \n\t"\
364 "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
365 "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* UsrcData */\
366 "add %6, %%"REG_S" \n\t" \
367 "movq (%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\
368 "add $16, %%"REG_d" \n\t"\
369 "mov (%%"REG_d"), %%"REG_S" \n\t"\
370 "pmulhw %%mm0, %%mm2 \n\t"\
371 "pmulhw %%mm0, %%mm5 \n\t"\
372 "paddw %%mm2, %%mm3 \n\t"\
373 "paddw %%mm5, %%mm4 \n\t"\
374 "test %%"REG_S", %%"REG_S" \n\t"\
377 #define YSCALEYUV2PACKEDX_YA(offset,coeff,src1,src2,dst1,dst2) \
378 "lea "offset"(%0), %%"REG_d" \n\t"\
379 "mov (%%"REG_d"), %%"REG_S" \n\t"\
380 "movq "VROUNDER_OFFSET"(%0), "#dst1" \n\t"\
381 "movq "#dst1", "#dst2" \n\t"\
384 "movq 8(%%"REG_d"), "#coeff" \n\t" /* filterCoeff */\
385 "movq (%%"REG_S", %%"REG_a", 2), "#src1" \n\t" /* Y1srcData */\
386 "movq 8(%%"REG_S", %%"REG_a", 2), "#src2" \n\t" /* Y2srcData */\
387 "add $16, %%"REG_d" \n\t"\
388 "mov (%%"REG_d"), %%"REG_S" \n\t"\
389 "pmulhw "#coeff", "#src1" \n\t"\
390 "pmulhw "#coeff", "#src2" \n\t"\
391 "paddw "#src1", "#dst1" \n\t"\
392 "paddw "#src2", "#dst2" \n\t"\
393 "test %%"REG_S", %%"REG_S" \n\t"\
396 #define YSCALEYUV2PACKEDX \
397 YSCALEYUV2PACKEDX_UV \
398 YSCALEYUV2PACKEDX_YA(LUM_MMX_FILTER_OFFSET,%%mm0,%%mm2,%%mm5,%%mm1,%%mm7) \
400 #define YSCALEYUV2PACKEDX_END \
401 :: "r" (&c->redDither), \
402 "m" (dummy), "m" (dummy), "m" (dummy),\
403 "r" (dest), "m" (dstW_reg), "m"(uv_off) \
404 : "%"REG_a, "%"REG_d, "%"REG_S \
407 #define YSCALEYUV2PACKEDX_ACCURATE_UV \
409 "xor %%"REG_a", %%"REG_a" \n\t"\
413 "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
414 "mov (%%"REG_d"), %%"REG_S" \n\t"\
415 "pxor %%mm4, %%mm4 \n\t"\
416 "pxor %%mm5, %%mm5 \n\t"\
417 "pxor %%mm6, %%mm6 \n\t"\
418 "pxor %%mm7, %%mm7 \n\t"\
421 "movq (%%"REG_S", %%"REG_a"), %%mm0 \n\t" /* UsrcData */\
422 "add %6, %%"REG_S" \n\t" \
423 "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* VsrcData */\
424 "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
425 "movq (%%"REG_S", %%"REG_a"), %%mm1 \n\t" /* UsrcData */\
426 "movq %%mm0, %%mm3 \n\t"\
427 "punpcklwd %%mm1, %%mm0 \n\t"\
428 "punpckhwd %%mm1, %%mm3 \n\t"\
429 "movq "STR(APCK_COEF)"(%%"REG_d"),%%mm1 \n\t" /* filterCoeff */\
430 "pmaddwd %%mm1, %%mm0 \n\t"\
431 "pmaddwd %%mm1, %%mm3 \n\t"\
432 "paddd %%mm0, %%mm4 \n\t"\
433 "paddd %%mm3, %%mm5 \n\t"\
434 "add %6, %%"REG_S" \n\t" \
435 "movq (%%"REG_S", %%"REG_a"), %%mm3 \n\t" /* VsrcData */\
436 "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
437 "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
438 "test %%"REG_S", %%"REG_S" \n\t"\
439 "movq %%mm2, %%mm0 \n\t"\
440 "punpcklwd %%mm3, %%mm2 \n\t"\
441 "punpckhwd %%mm3, %%mm0 \n\t"\
442 "pmaddwd %%mm1, %%mm2 \n\t"\
443 "pmaddwd %%mm1, %%mm0 \n\t"\
444 "paddd %%mm2, %%mm6 \n\t"\
445 "paddd %%mm0, %%mm7 \n\t"\
447 "psrad $16, %%mm4 \n\t"\
448 "psrad $16, %%mm5 \n\t"\
449 "psrad $16, %%mm6 \n\t"\
450 "psrad $16, %%mm7 \n\t"\
451 "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
452 "packssdw %%mm5, %%mm4 \n\t"\
453 "packssdw %%mm7, %%mm6 \n\t"\
454 "paddw %%mm0, %%mm4 \n\t"\
455 "paddw %%mm0, %%mm6 \n\t"\
456 "movq %%mm4, "U_TEMP"(%0) \n\t"\
457 "movq %%mm6, "V_TEMP"(%0) \n\t"\
459 #define YSCALEYUV2PACKEDX_ACCURATE_YA(offset) \
460 "lea "offset"(%0), %%"REG_d" \n\t"\
461 "mov (%%"REG_d"), %%"REG_S" \n\t"\
462 "pxor %%mm1, %%mm1 \n\t"\
463 "pxor %%mm5, %%mm5 \n\t"\
464 "pxor %%mm7, %%mm7 \n\t"\
465 "pxor %%mm6, %%mm6 \n\t"\
468 "movq (%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* Y1srcData */\
469 "movq 8(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y2srcData */\
470 "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
471 "movq (%%"REG_S", %%"REG_a", 2), %%mm4 \n\t" /* Y1srcData */\
472 "movq %%mm0, %%mm3 \n\t"\
473 "punpcklwd %%mm4, %%mm0 \n\t"\
474 "punpckhwd %%mm4, %%mm3 \n\t"\
475 "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm4 \n\t" /* filterCoeff */\
476 "pmaddwd %%mm4, %%mm0 \n\t"\
477 "pmaddwd %%mm4, %%mm3 \n\t"\
478 "paddd %%mm0, %%mm1 \n\t"\
479 "paddd %%mm3, %%mm5 \n\t"\
480 "movq 8(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* Y2srcData */\
481 "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
482 "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
483 "test %%"REG_S", %%"REG_S" \n\t"\
484 "movq %%mm2, %%mm0 \n\t"\
485 "punpcklwd %%mm3, %%mm2 \n\t"\
486 "punpckhwd %%mm3, %%mm0 \n\t"\
487 "pmaddwd %%mm4, %%mm2 \n\t"\
488 "pmaddwd %%mm4, %%mm0 \n\t"\
489 "paddd %%mm2, %%mm7 \n\t"\
490 "paddd %%mm0, %%mm6 \n\t"\
492 "psrad $16, %%mm1 \n\t"\
493 "psrad $16, %%mm5 \n\t"\
494 "psrad $16, %%mm7 \n\t"\
495 "psrad $16, %%mm6 \n\t"\
496 "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
497 "packssdw %%mm5, %%mm1 \n\t"\
498 "packssdw %%mm6, %%mm7 \n\t"\
499 "paddw %%mm0, %%mm1 \n\t"\
500 "paddw %%mm0, %%mm7 \n\t"\
501 "movq "U_TEMP"(%0), %%mm3 \n\t"\
502 "movq "V_TEMP"(%0), %%mm4 \n\t"\
504 #define YSCALEYUV2PACKEDX_ACCURATE \
505 YSCALEYUV2PACKEDX_ACCURATE_UV \
506 YSCALEYUV2PACKEDX_ACCURATE_YA(LUM_MMX_FILTER_OFFSET)
508 #define YSCALEYUV2RGBX \
509 "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\
510 "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\
511 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
512 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
513 "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\
514 "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\
515 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
516 "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\
517 "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\
518 "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\
519 "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\
520 "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\
521 "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\
522 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
523 "paddw %%mm3, %%mm4 \n\t"\
524 "movq %%mm2, %%mm0 \n\t"\
525 "movq %%mm5, %%mm6 \n\t"\
526 "movq %%mm4, %%mm3 \n\t"\
527 "punpcklwd %%mm2, %%mm2 \n\t"\
528 "punpcklwd %%mm5, %%mm5 \n\t"\
529 "punpcklwd %%mm4, %%mm4 \n\t"\
530 "paddw %%mm1, %%mm2 \n\t"\
531 "paddw %%mm1, %%mm5 \n\t"\
532 "paddw %%mm1, %%mm4 \n\t"\
533 "punpckhwd %%mm0, %%mm0 \n\t"\
534 "punpckhwd %%mm6, %%mm6 \n\t"\
535 "punpckhwd %%mm3, %%mm3 \n\t"\
536 "paddw %%mm7, %%mm0 \n\t"\
537 "paddw %%mm7, %%mm6 \n\t"\
538 "paddw %%mm7, %%mm3 \n\t"\
539 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
540 "packuswb %%mm0, %%mm2 \n\t"\
541 "packuswb %%mm6, %%mm5 \n\t"\
542 "packuswb %%mm3, %%mm4 \n\t"\
544 #define REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) \
545 "movq "#b", "#q2" \n\t" /* B */\
546 "movq "#r", "#t" \n\t" /* R */\
547 "punpcklbw "#g", "#b" \n\t" /* GBGBGBGB 0 */\
548 "punpcklbw "#a", "#r" \n\t" /* ARARARAR 0 */\
549 "punpckhbw "#g", "#q2" \n\t" /* GBGBGBGB 2 */\
550 "punpckhbw "#a", "#t" \n\t" /* ARARARAR 2 */\
551 "movq "#b", "#q0" \n\t" /* GBGBGBGB 0 */\
552 "movq "#q2", "#q3" \n\t" /* GBGBGBGB 2 */\
553 "punpcklwd "#r", "#q0" \n\t" /* ARGBARGB 0 */\
554 "punpckhwd "#r", "#b" \n\t" /* ARGBARGB 1 */\
555 "punpcklwd "#t", "#q2" \n\t" /* ARGBARGB 2 */\
556 "punpckhwd "#t", "#q3" \n\t" /* ARGBARGB 3 */\
558 MOVNTQ( q0, (dst, index, 4))\
559 MOVNTQ( b, 8(dst, index, 4))\
560 MOVNTQ( q2, 16(dst, index, 4))\
561 MOVNTQ( q3, 24(dst, index, 4))\
563 "add $8, "#index" \n\t"\
564 "cmp "#dstw", "#index" \n\t"\
566 #define WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t)
568 static void RENAME(yuv2rgb32_X_ar)(SwsContext *c, const int16_t *lumFilter,
569 const int16_t **lumSrc, int lumFilterSize,
570 const int16_t *chrFilter, const int16_t **chrUSrc,
571 const int16_t **chrVSrc,
572 int chrFilterSize, const int16_t **alpSrc,
573 uint8_t *dest, int dstW, int dstY)
576 x86_reg dstW_reg = dstW;
577 x86_reg uv_off = c->uv_off_byte;
579 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
580 YSCALEYUV2PACKEDX_ACCURATE
582 "movq %%mm2, "U_TEMP"(%0) \n\t"
583 "movq %%mm4, "V_TEMP"(%0) \n\t"
584 "movq %%mm5, "Y_TEMP"(%0) \n\t"
585 YSCALEYUV2PACKEDX_ACCURATE_YA(ALP_MMX_FILTER_OFFSET)
586 "movq "Y_TEMP"(%0), %%mm5 \n\t"
587 "psraw $3, %%mm1 \n\t"
588 "psraw $3, %%mm7 \n\t"
589 "packuswb %%mm7, %%mm1 \n\t"
590 WRITEBGR32(%4, %5, %%REGa, %%mm3, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm2, %%mm6)
591 YSCALEYUV2PACKEDX_END
593 YSCALEYUV2PACKEDX_ACCURATE
595 "pcmpeqd %%mm7, %%mm7 \n\t"
596 WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
597 YSCALEYUV2PACKEDX_END
601 static void RENAME(yuv2rgb32_X)(SwsContext *c, const int16_t *lumFilter,
602 const int16_t **lumSrc, int lumFilterSize,
603 const int16_t *chrFilter, const int16_t **chrUSrc,
604 const int16_t **chrVSrc,
605 int chrFilterSize, const int16_t **alpSrc,
606 uint8_t *dest, int dstW, int dstY)
609 x86_reg dstW_reg = dstW;
610 x86_reg uv_off = c->uv_off_byte;
612 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
615 YSCALEYUV2PACKEDX_YA(ALP_MMX_FILTER_OFFSET, %%mm0, %%mm3, %%mm6, %%mm1, %%mm7)
616 "psraw $3, %%mm1 \n\t"
617 "psraw $3, %%mm7 \n\t"
618 "packuswb %%mm7, %%mm1 \n\t"
619 WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
620 YSCALEYUV2PACKEDX_END
624 "pcmpeqd %%mm7, %%mm7 \n\t"
625 WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
626 YSCALEYUV2PACKEDX_END
630 #define REAL_WRITERGB16(dst, dstw, index) \
631 "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
632 "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
633 "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
634 "psrlq $3, %%mm2 \n\t"\
636 "movq %%mm2, %%mm1 \n\t"\
637 "movq %%mm4, %%mm3 \n\t"\
639 "punpcklbw %%mm7, %%mm3 \n\t"\
640 "punpcklbw %%mm5, %%mm2 \n\t"\
641 "punpckhbw %%mm7, %%mm4 \n\t"\
642 "punpckhbw %%mm5, %%mm1 \n\t"\
644 "psllq $3, %%mm3 \n\t"\
645 "psllq $3, %%mm4 \n\t"\
647 "por %%mm3, %%mm2 \n\t"\
648 "por %%mm4, %%mm1 \n\t"\
650 MOVNTQ(%%mm2, (dst, index, 2))\
651 MOVNTQ(%%mm1, 8(dst, index, 2))\
653 "add $8, "#index" \n\t"\
654 "cmp "#dstw", "#index" \n\t"\
656 #define WRITERGB16(dst, dstw, index) REAL_WRITERGB16(dst, dstw, index)
658 static void RENAME(yuv2rgb565_X_ar)(SwsContext *c, const int16_t *lumFilter,
659 const int16_t **lumSrc, int lumFilterSize,
660 const int16_t *chrFilter, const int16_t **chrUSrc,
661 const int16_t **chrVSrc,
662 int chrFilterSize, const int16_t **alpSrc,
663 uint8_t *dest, int dstW, int dstY)
666 x86_reg dstW_reg = dstW;
667 x86_reg uv_off = c->uv_off_byte;
669 YSCALEYUV2PACKEDX_ACCURATE
671 "pxor %%mm7, %%mm7 \n\t"
672 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
674 "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
675 "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
676 "paddusb "RED_DITHER"(%0), %%mm5\n\t"
678 WRITERGB16(%4, %5, %%REGa)
679 YSCALEYUV2PACKEDX_END
682 static void RENAME(yuv2rgb565_X)(SwsContext *c, const int16_t *lumFilter,
683 const int16_t **lumSrc, int lumFilterSize,
684 const int16_t *chrFilter, const int16_t **chrUSrc,
685 const int16_t **chrVSrc,
686 int chrFilterSize, const int16_t **alpSrc,
687 uint8_t *dest, int dstW, int dstY)
690 x86_reg dstW_reg = dstW;
691 x86_reg uv_off = c->uv_off_byte;
695 "pxor %%mm7, %%mm7 \n\t"
696 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
698 "paddusb "BLUE_DITHER"(%0), %%mm2 \n\t"
699 "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t"
700 "paddusb "RED_DITHER"(%0), %%mm5 \n\t"
702 WRITERGB16(%4, %5, %%REGa)
703 YSCALEYUV2PACKEDX_END
706 #define REAL_WRITERGB15(dst, dstw, index) \
707 "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
708 "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
709 "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
710 "psrlq $3, %%mm2 \n\t"\
711 "psrlq $1, %%mm5 \n\t"\
713 "movq %%mm2, %%mm1 \n\t"\
714 "movq %%mm4, %%mm3 \n\t"\
716 "punpcklbw %%mm7, %%mm3 \n\t"\
717 "punpcklbw %%mm5, %%mm2 \n\t"\
718 "punpckhbw %%mm7, %%mm4 \n\t"\
719 "punpckhbw %%mm5, %%mm1 \n\t"\
721 "psllq $2, %%mm3 \n\t"\
722 "psllq $2, %%mm4 \n\t"\
724 "por %%mm3, %%mm2 \n\t"\
725 "por %%mm4, %%mm1 \n\t"\
727 MOVNTQ(%%mm2, (dst, index, 2))\
728 MOVNTQ(%%mm1, 8(dst, index, 2))\
730 "add $8, "#index" \n\t"\
731 "cmp "#dstw", "#index" \n\t"\
733 #define WRITERGB15(dst, dstw, index) REAL_WRITERGB15(dst, dstw, index)
735 static void RENAME(yuv2rgb555_X_ar)(SwsContext *c, const int16_t *lumFilter,
736 const int16_t **lumSrc, int lumFilterSize,
737 const int16_t *chrFilter, const int16_t **chrUSrc,
738 const int16_t **chrVSrc,
739 int chrFilterSize, const int16_t **alpSrc,
740 uint8_t *dest, int dstW, int dstY)
743 x86_reg dstW_reg = dstW;
744 x86_reg uv_off = c->uv_off_byte;
746 YSCALEYUV2PACKEDX_ACCURATE
748 "pxor %%mm7, %%mm7 \n\t"
749 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
751 "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
752 "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
753 "paddusb "RED_DITHER"(%0), %%mm5\n\t"
755 WRITERGB15(%4, %5, %%REGa)
756 YSCALEYUV2PACKEDX_END
759 static void RENAME(yuv2rgb555_X)(SwsContext *c, const int16_t *lumFilter,
760 const int16_t **lumSrc, int lumFilterSize,
761 const int16_t *chrFilter, const int16_t **chrUSrc,
762 const int16_t **chrVSrc,
763 int chrFilterSize, const int16_t **alpSrc,
764 uint8_t *dest, int dstW, int dstY)
767 x86_reg dstW_reg = dstW;
768 x86_reg uv_off = c->uv_off_byte;
772 "pxor %%mm7, %%mm7 \n\t"
773 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
775 "paddusb "BLUE_DITHER"(%0), %%mm2 \n\t"
776 "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t"
777 "paddusb "RED_DITHER"(%0), %%mm5 \n\t"
779 WRITERGB15(%4, %5, %%REGa)
780 YSCALEYUV2PACKEDX_END
783 #define WRITEBGR24MMX(dst, dstw, index) \
784 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
785 "movq %%mm2, %%mm1 \n\t" /* B */\
786 "movq %%mm5, %%mm6 \n\t" /* R */\
787 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
788 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
789 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
790 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
791 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
792 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
793 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
794 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
795 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
796 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
798 "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
799 "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
800 "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
801 "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
803 "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
804 "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
805 "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
806 "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
808 "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
809 "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
810 "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
811 "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
813 "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
814 "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
815 "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
816 "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
817 MOVNTQ(%%mm0, (dst))\
819 "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
820 "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
821 "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
822 "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
823 MOVNTQ(%%mm6, 8(dst))\
825 "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
826 "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
827 "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
828 MOVNTQ(%%mm5, 16(dst))\
830 "add $24, "#dst" \n\t"\
832 "add $8, "#index" \n\t"\
833 "cmp "#dstw", "#index" \n\t"\
836 #define WRITEBGR24MMX2(dst, dstw, index) \
837 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
838 "movq "MANGLE(ff_M24A)", %%mm0 \n\t"\
839 "movq "MANGLE(ff_M24C)", %%mm7 \n\t"\
840 "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
841 "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
842 "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
844 "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
845 "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
846 "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
848 "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
849 "por %%mm1, %%mm6 \n\t"\
850 "por %%mm3, %%mm6 \n\t"\
851 MOVNTQ(%%mm6, (dst))\
853 "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
854 "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
855 "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
856 "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
858 "pand "MANGLE(ff_M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
859 "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
860 "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
862 "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
863 "por %%mm3, %%mm6 \n\t"\
864 MOVNTQ(%%mm6, 8(dst))\
866 "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
867 "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
868 "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
870 "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
871 "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
872 "pand "MANGLE(ff_M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
874 "por %%mm1, %%mm3 \n\t"\
875 "por %%mm3, %%mm6 \n\t"\
876 MOVNTQ(%%mm6, 16(dst))\
878 "add $24, "#dst" \n\t"\
880 "add $8, "#index" \n\t"\
881 "cmp "#dstw", "#index" \n\t"\
884 #if COMPILE_TEMPLATE_MMX2
886 #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX2(dst, dstw, index)
889 #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX(dst, dstw, index)
892 static void RENAME(yuv2bgr24_X_ar)(SwsContext *c, const int16_t *lumFilter,
893 const int16_t **lumSrc, int lumFilterSize,
894 const int16_t *chrFilter, const int16_t **chrUSrc,
895 const int16_t **chrVSrc,
896 int chrFilterSize, const int16_t **alpSrc,
897 uint8_t *dest, int dstW, int dstY)
900 x86_reg dstW_reg = dstW;
901 x86_reg uv_off = c->uv_off_byte;
903 YSCALEYUV2PACKEDX_ACCURATE
905 "pxor %%mm7, %%mm7 \n\t"
906 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"\n\t" //FIXME optimize
907 "add %4, %%"REG_c" \n\t"
908 WRITEBGR24(%%REGc, %5, %%REGa)
909 :: "r" (&c->redDither),
910 "m" (dummy), "m" (dummy), "m" (dummy),
911 "r" (dest), "m" (dstW_reg), "m"(uv_off)
912 : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
916 static void RENAME(yuv2bgr24_X)(SwsContext *c, const int16_t *lumFilter,
917 const int16_t **lumSrc, int lumFilterSize,
918 const int16_t *chrFilter, const int16_t **chrUSrc,
919 const int16_t **chrVSrc,
920 int chrFilterSize, const int16_t **alpSrc,
921 uint8_t *dest, int dstW, int dstY)
924 x86_reg dstW_reg = dstW;
925 x86_reg uv_off = c->uv_off_byte;
929 "pxor %%mm7, %%mm7 \n\t"
930 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c" \n\t" //FIXME optimize
931 "add %4, %%"REG_c" \n\t"
932 WRITEBGR24(%%REGc, %5, %%REGa)
933 :: "r" (&c->redDither),
934 "m" (dummy), "m" (dummy), "m" (dummy),
935 "r" (dest), "m" (dstW_reg), "m"(uv_off)
936 : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
940 #define REAL_WRITEYUY2(dst, dstw, index) \
941 "packuswb %%mm3, %%mm3 \n\t"\
942 "packuswb %%mm4, %%mm4 \n\t"\
943 "packuswb %%mm7, %%mm1 \n\t"\
944 "punpcklbw %%mm4, %%mm3 \n\t"\
945 "movq %%mm1, %%mm7 \n\t"\
946 "punpcklbw %%mm3, %%mm1 \n\t"\
947 "punpckhbw %%mm3, %%mm7 \n\t"\
949 MOVNTQ(%%mm1, (dst, index, 2))\
950 MOVNTQ(%%mm7, 8(dst, index, 2))\
952 "add $8, "#index" \n\t"\
953 "cmp "#dstw", "#index" \n\t"\
955 #define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index)
957 static void RENAME(yuv2yuyv422_X_ar)(SwsContext *c, const int16_t *lumFilter,
958 const int16_t **lumSrc, int lumFilterSize,
959 const int16_t *chrFilter, const int16_t **chrUSrc,
960 const int16_t **chrVSrc,
961 int chrFilterSize, const int16_t **alpSrc,
962 uint8_t *dest, int dstW, int dstY)
965 x86_reg dstW_reg = dstW;
966 x86_reg uv_off = c->uv_off_byte;
968 YSCALEYUV2PACKEDX_ACCURATE
969 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
970 "psraw $3, %%mm3 \n\t"
971 "psraw $3, %%mm4 \n\t"
972 "psraw $3, %%mm1 \n\t"
973 "psraw $3, %%mm7 \n\t"
974 WRITEYUY2(%4, %5, %%REGa)
975 YSCALEYUV2PACKEDX_END
978 static void RENAME(yuv2yuyv422_X)(SwsContext *c, const int16_t *lumFilter,
979 const int16_t **lumSrc, int lumFilterSize,
980 const int16_t *chrFilter, const int16_t **chrUSrc,
981 const int16_t **chrVSrc,
982 int chrFilterSize, const int16_t **alpSrc,
983 uint8_t *dest, int dstW, int dstY)
986 x86_reg dstW_reg = dstW;
987 x86_reg uv_off = c->uv_off_byte;
990 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
991 "psraw $3, %%mm3 \n\t"
992 "psraw $3, %%mm4 \n\t"
993 "psraw $3, %%mm1 \n\t"
994 "psraw $3, %%mm7 \n\t"
995 WRITEYUY2(%4, %5, %%REGa)
996 YSCALEYUV2PACKEDX_END
999 #define REAL_YSCALEYUV2RGB_UV(index, c) \
1000 "xor "#index", "#index" \n\t"\
1003 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
1004 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
1005 "add "UV_OFF_PX"("#c"), "#index" \n\t" \
1006 "movq (%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
1007 "movq (%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
1008 "sub "UV_OFF_PX"("#c"), "#index" \n\t" \
1009 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
1010 "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
1011 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
1012 "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
1013 "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
1014 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
1015 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
1016 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
1017 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
1018 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
1019 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
1020 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
1021 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
1022 "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
1023 "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
1024 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
1026 #define REAL_YSCALEYUV2RGB_YA(index, c, b1, b2) \
1027 "movq ("#b1", "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
1028 "movq ("#b2", "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
1029 "movq 8("#b1", "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
1030 "movq 8("#b2", "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
1031 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
1032 "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
1033 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
1034 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
1035 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
1036 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
1037 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
1038 "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
1040 #define REAL_YSCALEYUV2RGB_COEFF(c) \
1041 "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
1042 "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
1043 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
1044 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
1045 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
1046 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
1047 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
1048 "paddw %%mm3, %%mm4 \n\t"\
1049 "movq %%mm2, %%mm0 \n\t"\
1050 "movq %%mm5, %%mm6 \n\t"\
1051 "movq %%mm4, %%mm3 \n\t"\
1052 "punpcklwd %%mm2, %%mm2 \n\t"\
1053 "punpcklwd %%mm5, %%mm5 \n\t"\
1054 "punpcklwd %%mm4, %%mm4 \n\t"\
1055 "paddw %%mm1, %%mm2 \n\t"\
1056 "paddw %%mm1, %%mm5 \n\t"\
1057 "paddw %%mm1, %%mm4 \n\t"\
1058 "punpckhwd %%mm0, %%mm0 \n\t"\
1059 "punpckhwd %%mm6, %%mm6 \n\t"\
1060 "punpckhwd %%mm3, %%mm3 \n\t"\
1061 "paddw %%mm7, %%mm0 \n\t"\
1062 "paddw %%mm7, %%mm6 \n\t"\
1063 "paddw %%mm7, %%mm3 \n\t"\
1064 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
1065 "packuswb %%mm0, %%mm2 \n\t"\
1066 "packuswb %%mm6, %%mm5 \n\t"\
1067 "packuswb %%mm3, %%mm4 \n\t"\
1069 #define YSCALEYUV2RGB_YA(index, c, b1, b2) REAL_YSCALEYUV2RGB_YA(index, c, b1, b2)
1071 #define YSCALEYUV2RGB(index, c) \
1072 REAL_YSCALEYUV2RGB_UV(index, c) \
1073 REAL_YSCALEYUV2RGB_YA(index, c, %0, %1) \
1074 REAL_YSCALEYUV2RGB_COEFF(c)
1077 * vertical bilinear scale YV12 to RGB
1079 static void RENAME(yuv2rgb32_2)(SwsContext *c, const int16_t *buf[2],
1080 const int16_t *ubuf[2], const int16_t *vbuf[2],
1081 const int16_t *abuf[2], uint8_t *dest,
1082 int dstW, int yalpha, int uvalpha, int y)
1084 const int16_t *buf0 = buf[0], *buf1 = buf[1],
1085 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
1087 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
1088 const int16_t *abuf0 = abuf[0], *abuf1 = abuf[1];
1091 YSCALEYUV2RGB(%%r8, %5)
1092 YSCALEYUV2RGB_YA(%%r8, %5, %6, %7)
1093 "psraw $3, %%mm1 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
1094 "psraw $3, %%mm7 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
1095 "packuswb %%mm7, %%mm1 \n\t"
1096 WRITEBGR32(%4, 8280(%5), %%r8, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
1097 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "r" (dest),
1098 "a" (&c->redDither),
1099 "r" (abuf0), "r" (abuf1)
1103 *(const uint16_t **)(&c->u_temp)=abuf0;
1104 *(const uint16_t **)(&c->v_temp)=abuf1;
1106 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1107 "mov %4, %%"REG_b" \n\t"
1108 "push %%"REG_BP" \n\t"
1109 YSCALEYUV2RGB(%%REGBP, %5)
1112 "mov "U_TEMP"(%5), %0 \n\t"
1113 "mov "V_TEMP"(%5), %1 \n\t"
1114 YSCALEYUV2RGB_YA(%%REGBP, %5, %0, %1)
1115 "psraw $3, %%mm1 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
1116 "psraw $3, %%mm7 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
1117 "packuswb %%mm7, %%mm1 \n\t"
1120 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
1121 "pop %%"REG_BP" \n\t"
1122 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1123 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1129 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1130 "mov %4, %%"REG_b" \n\t"
1131 "push %%"REG_BP" \n\t"
1132 YSCALEYUV2RGB(%%REGBP, %5)
1133 "pcmpeqd %%mm7, %%mm7 \n\t"
1134 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1135 "pop %%"REG_BP" \n\t"
1136 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1137 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1143 static void RENAME(yuv2bgr24_2)(SwsContext *c, const int16_t *buf[2],
1144 const int16_t *ubuf[2], const int16_t *vbuf[2],
1145 const int16_t *abuf[2], uint8_t *dest,
1146 int dstW, int yalpha, int uvalpha, int y)
1148 const int16_t *buf0 = buf[0], *buf1 = buf[1],
1149 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
1151 //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
1153 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1154 "mov %4, %%"REG_b" \n\t"
1155 "push %%"REG_BP" \n\t"
1156 YSCALEYUV2RGB(%%REGBP, %5)
1157 "pxor %%mm7, %%mm7 \n\t"
1158 WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1159 "pop %%"REG_BP" \n\t"
1160 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1161 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1166 static void RENAME(yuv2rgb555_2)(SwsContext *c, const int16_t *buf[2],
1167 const int16_t *ubuf[2], const int16_t *vbuf[2],
1168 const int16_t *abuf[2], uint8_t *dest,
1169 int dstW, int yalpha, int uvalpha, int y)
1171 const int16_t *buf0 = buf[0], *buf1 = buf[1],
1172 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
1174 //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
1176 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1177 "mov %4, %%"REG_b" \n\t"
1178 "push %%"REG_BP" \n\t"
1179 YSCALEYUV2RGB(%%REGBP, %5)
1180 "pxor %%mm7, %%mm7 \n\t"
1181 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1183 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1184 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1185 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1187 WRITERGB15(%%REGb, 8280(%5), %%REGBP)
1188 "pop %%"REG_BP" \n\t"
1189 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1190 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1195 static void RENAME(yuv2rgb565_2)(SwsContext *c, const int16_t *buf[2],
1196 const int16_t *ubuf[2], const int16_t *vbuf[2],
1197 const int16_t *abuf[2], uint8_t *dest,
1198 int dstW, int yalpha, int uvalpha, int y)
1200 const int16_t *buf0 = buf[0], *buf1 = buf[1],
1201 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
1203 //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
1205 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1206 "mov %4, %%"REG_b" \n\t"
1207 "push %%"REG_BP" \n\t"
1208 YSCALEYUV2RGB(%%REGBP, %5)
1209 "pxor %%mm7, %%mm7 \n\t"
1210 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1212 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1213 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1214 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1216 WRITERGB16(%%REGb, 8280(%5), %%REGBP)
1217 "pop %%"REG_BP" \n\t"
1218 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1219 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1224 #define REAL_YSCALEYUV2PACKED(index, c) \
1225 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
1226 "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1 \n\t"\
1227 "psraw $3, %%mm0 \n\t"\
1228 "psraw $3, %%mm1 \n\t"\
1229 "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c") \n\t"\
1230 "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c") \n\t"\
1231 "xor "#index", "#index" \n\t"\
1234 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
1235 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
1236 "add "UV_OFF_PX"("#c"), "#index" \n\t" \
1237 "movq (%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
1238 "movq (%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
1239 "sub "UV_OFF_PX"("#c"), "#index" \n\t" \
1240 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
1241 "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
1242 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
1243 "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
1244 "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
1245 "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
1246 "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
1247 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
1248 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
1249 "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
1250 "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
1251 "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
1252 "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
1253 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
1254 "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
1255 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
1256 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
1257 "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
1258 "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
1259 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
1260 "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
1262 #define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c)
1264 static void RENAME(yuv2yuyv422_2)(SwsContext *c, const int16_t *buf[2],
1265 const int16_t *ubuf[2], const int16_t *vbuf[2],
1266 const int16_t *abuf[2], uint8_t *dest,
1267 int dstW, int yalpha, int uvalpha, int y)
1269 const int16_t *buf0 = buf[0], *buf1 = buf[1],
1270 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
1272 //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
1274 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1275 "mov %4, %%"REG_b" \n\t"
1276 "push %%"REG_BP" \n\t"
1277 YSCALEYUV2PACKED(%%REGBP, %5)
1278 WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1279 "pop %%"REG_BP" \n\t"
1280 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1281 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1286 #define REAL_YSCALEYUV2RGB1(index, c) \
1287 "xor "#index", "#index" \n\t"\
1290 "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
1291 "add "UV_OFF_PX"("#c"), "#index" \n\t" \
1292 "movq (%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
1293 "sub "UV_OFF_PX"("#c"), "#index" \n\t" \
1294 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
1295 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
1296 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
1297 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
1298 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
1299 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
1300 "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
1301 "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
1302 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
1303 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
1304 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
1305 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
1306 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
1307 "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
1308 "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
1309 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
1310 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
1311 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
1312 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
1313 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
1314 "paddw %%mm3, %%mm4 \n\t"\
1315 "movq %%mm2, %%mm0 \n\t"\
1316 "movq %%mm5, %%mm6 \n\t"\
1317 "movq %%mm4, %%mm3 \n\t"\
1318 "punpcklwd %%mm2, %%mm2 \n\t"\
1319 "punpcklwd %%mm5, %%mm5 \n\t"\
1320 "punpcklwd %%mm4, %%mm4 \n\t"\
1321 "paddw %%mm1, %%mm2 \n\t"\
1322 "paddw %%mm1, %%mm5 \n\t"\
1323 "paddw %%mm1, %%mm4 \n\t"\
1324 "punpckhwd %%mm0, %%mm0 \n\t"\
1325 "punpckhwd %%mm6, %%mm6 \n\t"\
1326 "punpckhwd %%mm3, %%mm3 \n\t"\
1327 "paddw %%mm7, %%mm0 \n\t"\
1328 "paddw %%mm7, %%mm6 \n\t"\
1329 "paddw %%mm7, %%mm3 \n\t"\
1330 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
1331 "packuswb %%mm0, %%mm2 \n\t"\
1332 "packuswb %%mm6, %%mm5 \n\t"\
1333 "packuswb %%mm3, %%mm4 \n\t"\
1335 #define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c)
1337 // do vertical chrominance interpolation
1338 #define REAL_YSCALEYUV2RGB1b(index, c) \
1339 "xor "#index", "#index" \n\t"\
1342 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
1343 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
1344 "add "UV_OFF_PX"("#c"), "#index" \n\t" \
1345 "movq (%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
1346 "movq (%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
1347 "sub "UV_OFF_PX"("#c"), "#index" \n\t" \
1348 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
1349 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
1350 "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
1351 "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
1352 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
1353 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
1354 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
1355 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
1356 "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
1357 "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
1358 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
1359 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
1360 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
1361 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
1362 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
1363 "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
1364 "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
1365 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
1366 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
1367 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
1368 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
1369 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
1370 "paddw %%mm3, %%mm4 \n\t"\
1371 "movq %%mm2, %%mm0 \n\t"\
1372 "movq %%mm5, %%mm6 \n\t"\
1373 "movq %%mm4, %%mm3 \n\t"\
1374 "punpcklwd %%mm2, %%mm2 \n\t"\
1375 "punpcklwd %%mm5, %%mm5 \n\t"\
1376 "punpcklwd %%mm4, %%mm4 \n\t"\
1377 "paddw %%mm1, %%mm2 \n\t"\
1378 "paddw %%mm1, %%mm5 \n\t"\
1379 "paddw %%mm1, %%mm4 \n\t"\
1380 "punpckhwd %%mm0, %%mm0 \n\t"\
1381 "punpckhwd %%mm6, %%mm6 \n\t"\
1382 "punpckhwd %%mm3, %%mm3 \n\t"\
1383 "paddw %%mm7, %%mm0 \n\t"\
1384 "paddw %%mm7, %%mm6 \n\t"\
1385 "paddw %%mm7, %%mm3 \n\t"\
1386 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
1387 "packuswb %%mm0, %%mm2 \n\t"\
1388 "packuswb %%mm6, %%mm5 \n\t"\
1389 "packuswb %%mm3, %%mm4 \n\t"\
1391 #define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c)
1393 #define REAL_YSCALEYUV2RGB1_ALPHA(index) \
1394 "movq (%1, "#index", 2), %%mm7 \n\t" /* abuf0[index ] */\
1395 "movq 8(%1, "#index", 2), %%mm1 \n\t" /* abuf0[index+4] */\
1396 "psraw $7, %%mm7 \n\t" /* abuf0[index ] >>7 */\
1397 "psraw $7, %%mm1 \n\t" /* abuf0[index+4] >>7 */\
1398 "packuswb %%mm1, %%mm7 \n\t"
1399 #define YSCALEYUV2RGB1_ALPHA(index) REAL_YSCALEYUV2RGB1_ALPHA(index)
1402 * YV12 to RGB without scaling or interpolating
1404 static void RENAME(yuv2rgb32_1)(SwsContext *c, const int16_t *buf0,
1405 const int16_t *ubuf[2], const int16_t *bguf[2],
1406 const int16_t *abuf0, uint8_t *dest,
1407 int dstW, int uvalpha, int y)
1409 const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
1410 const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
1412 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
1413 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
1415 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1416 "mov %4, %%"REG_b" \n\t"
1417 "push %%"REG_BP" \n\t"
1418 YSCALEYUV2RGB1(%%REGBP, %5)
1419 YSCALEYUV2RGB1_ALPHA(%%REGBP)
1420 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1421 "pop %%"REG_BP" \n\t"
1422 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1423 :: "c" (buf0), "d" (abuf0), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1428 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1429 "mov %4, %%"REG_b" \n\t"
1430 "push %%"REG_BP" \n\t"
1431 YSCALEYUV2RGB1(%%REGBP, %5)
1432 "pcmpeqd %%mm7, %%mm7 \n\t"
1433 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1434 "pop %%"REG_BP" \n\t"
1435 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1436 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1441 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
1443 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1444 "mov %4, %%"REG_b" \n\t"
1445 "push %%"REG_BP" \n\t"
1446 YSCALEYUV2RGB1b(%%REGBP, %5)
1447 YSCALEYUV2RGB1_ALPHA(%%REGBP)
1448 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1449 "pop %%"REG_BP" \n\t"
1450 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1451 :: "c" (buf0), "d" (abuf0), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1456 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1457 "mov %4, %%"REG_b" \n\t"
1458 "push %%"REG_BP" \n\t"
1459 YSCALEYUV2RGB1b(%%REGBP, %5)
1460 "pcmpeqd %%mm7, %%mm7 \n\t"
1461 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1462 "pop %%"REG_BP" \n\t"
1463 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1464 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1471 static void RENAME(yuv2bgr24_1)(SwsContext *c, const int16_t *buf0,
1472 const int16_t *ubuf[2], const int16_t *bguf[2],
1473 const int16_t *abuf0, uint8_t *dest,
1474 int dstW, int uvalpha, int y)
1476 const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
1477 const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
1479 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
1481 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1482 "mov %4, %%"REG_b" \n\t"
1483 "push %%"REG_BP" \n\t"
1484 YSCALEYUV2RGB1(%%REGBP, %5)
1485 "pxor %%mm7, %%mm7 \n\t"
1486 WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1487 "pop %%"REG_BP" \n\t"
1488 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1489 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1494 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1495 "mov %4, %%"REG_b" \n\t"
1496 "push %%"REG_BP" \n\t"
1497 YSCALEYUV2RGB1b(%%REGBP, %5)
1498 "pxor %%mm7, %%mm7 \n\t"
1499 WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1500 "pop %%"REG_BP" \n\t"
1501 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1502 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1508 static void RENAME(yuv2rgb555_1)(SwsContext *c, const int16_t *buf0,
1509 const int16_t *ubuf[2], const int16_t *bguf[2],
1510 const int16_t *abuf0, uint8_t *dest,
1511 int dstW, int uvalpha, int y)
1513 const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
1514 const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
1516 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
1518 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1519 "mov %4, %%"REG_b" \n\t"
1520 "push %%"REG_BP" \n\t"
1521 YSCALEYUV2RGB1(%%REGBP, %5)
1522 "pxor %%mm7, %%mm7 \n\t"
1523 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1525 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1526 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1527 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1529 WRITERGB15(%%REGb, 8280(%5), %%REGBP)
1530 "pop %%"REG_BP" \n\t"
1531 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1532 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1537 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1538 "mov %4, %%"REG_b" \n\t"
1539 "push %%"REG_BP" \n\t"
1540 YSCALEYUV2RGB1b(%%REGBP, %5)
1541 "pxor %%mm7, %%mm7 \n\t"
1542 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1544 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1545 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1546 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1548 WRITERGB15(%%REGb, 8280(%5), %%REGBP)
1549 "pop %%"REG_BP" \n\t"
1550 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1551 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1557 static void RENAME(yuv2rgb565_1)(SwsContext *c, const int16_t *buf0,
1558 const int16_t *ubuf[2], const int16_t *bguf[2],
1559 const int16_t *abuf0, uint8_t *dest,
1560 int dstW, int uvalpha, int y)
1562 const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
1563 const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
1565 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
1567 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1568 "mov %4, %%"REG_b" \n\t"
1569 "push %%"REG_BP" \n\t"
1570 YSCALEYUV2RGB1(%%REGBP, %5)
1571 "pxor %%mm7, %%mm7 \n\t"
1572 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1574 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1575 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1576 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1578 WRITERGB16(%%REGb, 8280(%5), %%REGBP)
1579 "pop %%"REG_BP" \n\t"
1580 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1581 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1586 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1587 "mov %4, %%"REG_b" \n\t"
1588 "push %%"REG_BP" \n\t"
1589 YSCALEYUV2RGB1b(%%REGBP, %5)
1590 "pxor %%mm7, %%mm7 \n\t"
1591 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1593 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1594 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1595 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1597 WRITERGB16(%%REGb, 8280(%5), %%REGBP)
1598 "pop %%"REG_BP" \n\t"
1599 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1600 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1606 #define REAL_YSCALEYUV2PACKED1(index, c) \
1607 "xor "#index", "#index" \n\t"\
1610 "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
1611 "add "UV_OFF_PX"("#c"), "#index" \n\t" \
1612 "movq (%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
1613 "sub "UV_OFF_PX"("#c"), "#index" \n\t" \
1614 "psraw $7, %%mm3 \n\t" \
1615 "psraw $7, %%mm4 \n\t" \
1616 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
1617 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
1618 "psraw $7, %%mm1 \n\t" \
1619 "psraw $7, %%mm7 \n\t" \
1621 #define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c)
1623 #define REAL_YSCALEYUV2PACKED1b(index, c) \
1624 "xor "#index", "#index" \n\t"\
1627 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
1628 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
1629 "add "UV_OFF_PX"("#c"), "#index" \n\t" \
1630 "movq (%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
1631 "movq (%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
1632 "sub "UV_OFF_PX"("#c"), "#index" \n\t" \
1633 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
1634 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
1635 "psrlw $8, %%mm3 \n\t" \
1636 "psrlw $8, %%mm4 \n\t" \
1637 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
1638 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
1639 "psraw $7, %%mm1 \n\t" \
1640 "psraw $7, %%mm7 \n\t"
1641 #define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c)
1643 static void RENAME(yuv2yuyv422_1)(SwsContext *c, const int16_t *buf0,
1644 const int16_t *ubuf[2], const int16_t *bguf[2],
1645 const int16_t *abuf0, uint8_t *dest,
1646 int dstW, int uvalpha, int y)
1648 const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
1649 const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
1651 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
1653 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1654 "mov %4, %%"REG_b" \n\t"
1655 "push %%"REG_BP" \n\t"
1656 YSCALEYUV2PACKED1(%%REGBP, %5)
1657 WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1658 "pop %%"REG_BP" \n\t"
1659 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1660 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1665 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1666 "mov %4, %%"REG_b" \n\t"
1667 "push %%"REG_BP" \n\t"
1668 YSCALEYUV2PACKED1b(%%REGBP, %5)
1669 WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1670 "pop %%"REG_BP" \n\t"
1671 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1672 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1678 #if !COMPILE_TEMPLATE_MMX2
1679 //FIXME yuy2* can read up to 7 samples too much
1681 static void RENAME(yuy2ToY)(uint8_t *dst, const uint8_t *src,
1682 int width, uint32_t *unused)
1685 "movq "MANGLE(bm01010101)", %%mm2 \n\t"
1686 "mov %0, %%"REG_a" \n\t"
1688 "movq (%1, %%"REG_a",2), %%mm0 \n\t"
1689 "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
1690 "pand %%mm2, %%mm0 \n\t"
1691 "pand %%mm2, %%mm1 \n\t"
1692 "packuswb %%mm1, %%mm0 \n\t"
1693 "movq %%mm0, (%2, %%"REG_a") \n\t"
1694 "add $8, %%"REG_a" \n\t"
1696 : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst+width)
1701 static void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV,
1702 const uint8_t *src1, const uint8_t *src2,
1703 int width, uint32_t *unused)
1706 "movq "MANGLE(bm01010101)", %%mm4 \n\t"
1707 "mov %0, %%"REG_a" \n\t"
1709 "movq (%1, %%"REG_a",4), %%mm0 \n\t"
1710 "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
1711 "psrlw $8, %%mm0 \n\t"
1712 "psrlw $8, %%mm1 \n\t"
1713 "packuswb %%mm1, %%mm0 \n\t"
1714 "movq %%mm0, %%mm1 \n\t"
1715 "psrlw $8, %%mm0 \n\t"
1716 "pand %%mm4, %%mm1 \n\t"
1717 "packuswb %%mm0, %%mm0 \n\t"
1718 "packuswb %%mm1, %%mm1 \n\t"
1719 "movd %%mm0, (%3, %%"REG_a") \n\t"
1720 "movd %%mm1, (%2, %%"REG_a") \n\t"
1721 "add $4, %%"REG_a" \n\t"
1723 : : "g" ((x86_reg)-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
1726 assert(src1 == src2);
1729 /* This is almost identical to the previous, end exists only because
1730 * yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses. */
1731 static void RENAME(uyvyToY)(uint8_t *dst, const uint8_t *src,
1732 int width, uint32_t *unused)
1735 "mov %0, %%"REG_a" \n\t"
1737 "movq (%1, %%"REG_a",2), %%mm0 \n\t"
1738 "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
1739 "psrlw $8, %%mm0 \n\t"
1740 "psrlw $8, %%mm1 \n\t"
1741 "packuswb %%mm1, %%mm0 \n\t"
1742 "movq %%mm0, (%2, %%"REG_a") \n\t"
1743 "add $8, %%"REG_a" \n\t"
1745 : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst+width)
1750 static void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV,
1751 const uint8_t *src1, const uint8_t *src2,
1752 int width, uint32_t *unused)
1755 "movq "MANGLE(bm01010101)", %%mm4 \n\t"
1756 "mov %0, %%"REG_a" \n\t"
1758 "movq (%1, %%"REG_a",4), %%mm0 \n\t"
1759 "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
1760 "pand %%mm4, %%mm0 \n\t"
1761 "pand %%mm4, %%mm1 \n\t"
1762 "packuswb %%mm1, %%mm0 \n\t"
1763 "movq %%mm0, %%mm1 \n\t"
1764 "psrlw $8, %%mm0 \n\t"
1765 "pand %%mm4, %%mm1 \n\t"
1766 "packuswb %%mm0, %%mm0 \n\t"
1767 "packuswb %%mm1, %%mm1 \n\t"
1768 "movd %%mm0, (%3, %%"REG_a") \n\t"
1769 "movd %%mm1, (%2, %%"REG_a") \n\t"
1770 "add $4, %%"REG_a" \n\t"
1772 : : "g" ((x86_reg)-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
1775 assert(src1 == src2);
1778 static av_always_inline void RENAME(nvXXtoUV)(uint8_t *dst1, uint8_t *dst2,
1779 const uint8_t *src, int width)
1782 "movq "MANGLE(bm01010101)", %%mm4 \n\t"
1783 "mov %0, %%"REG_a" \n\t"
1785 "movq (%1, %%"REG_a",2), %%mm0 \n\t"
1786 "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
1787 "movq %%mm0, %%mm2 \n\t"
1788 "movq %%mm1, %%mm3 \n\t"
1789 "pand %%mm4, %%mm0 \n\t"
1790 "pand %%mm4, %%mm1 \n\t"
1791 "psrlw $8, %%mm2 \n\t"
1792 "psrlw $8, %%mm3 \n\t"
1793 "packuswb %%mm1, %%mm0 \n\t"
1794 "packuswb %%mm3, %%mm2 \n\t"
1795 "movq %%mm0, (%2, %%"REG_a") \n\t"
1796 "movq %%mm2, (%3, %%"REG_a") \n\t"
1797 "add $8, %%"REG_a" \n\t"
1799 : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst1+width), "r" (dst2+width)
1804 static void RENAME(nv12ToUV)(uint8_t *dstU, uint8_t *dstV,
1805 const uint8_t *src1, const uint8_t *src2,
1806 int width, uint32_t *unused)
1808 RENAME(nvXXtoUV)(dstU, dstV, src1, width);
1811 static void RENAME(nv21ToUV)(uint8_t *dstU, uint8_t *dstV,
1812 const uint8_t *src1, const uint8_t *src2,
1813 int width, uint32_t *unused)
1815 RENAME(nvXXtoUV)(dstV, dstU, src1, width);
1817 #endif /* !COMPILE_TEMPLATE_MMX2 */
1819 static av_always_inline void RENAME(bgr24ToY_mmx)(uint8_t *dst, const uint8_t *src,
1820 int width, enum PixelFormat srcFormat)
1823 if(srcFormat == PIX_FMT_BGR24) {
1825 "movq "MANGLE(ff_bgr24toY1Coeff)", %%mm5 \n\t"
1826 "movq "MANGLE(ff_bgr24toY2Coeff)", %%mm6 \n\t"
1831 "movq "MANGLE(ff_rgb24toY1Coeff)", %%mm5 \n\t"
1832 "movq "MANGLE(ff_rgb24toY2Coeff)", %%mm6 \n\t"
1838 "movq "MANGLE(ff_bgr24toYOffset)", %%mm4 \n\t"
1839 "mov %2, %%"REG_a" \n\t"
1840 "pxor %%mm7, %%mm7 \n\t"
1842 PREFETCH" 64(%0) \n\t"
1843 "movd (%0), %%mm0 \n\t"
1844 "movd 2(%0), %%mm1 \n\t"
1845 "movd 6(%0), %%mm2 \n\t"
1846 "movd 8(%0), %%mm3 \n\t"
1848 "punpcklbw %%mm7, %%mm0 \n\t"
1849 "punpcklbw %%mm7, %%mm1 \n\t"
1850 "punpcklbw %%mm7, %%mm2 \n\t"
1851 "punpcklbw %%mm7, %%mm3 \n\t"
1852 "pmaddwd %%mm5, %%mm0 \n\t"
1853 "pmaddwd %%mm6, %%mm1 \n\t"
1854 "pmaddwd %%mm5, %%mm2 \n\t"
1855 "pmaddwd %%mm6, %%mm3 \n\t"
1856 "paddd %%mm1, %%mm0 \n\t"
1857 "paddd %%mm3, %%mm2 \n\t"
1858 "paddd %%mm4, %%mm0 \n\t"
1859 "paddd %%mm4, %%mm2 \n\t"
1860 "psrad $15, %%mm0 \n\t"
1861 "psrad $15, %%mm2 \n\t"
1862 "packssdw %%mm2, %%mm0 \n\t"
1863 "packuswb %%mm0, %%mm0 \n\t"
1864 "movd %%mm0, (%1, %%"REG_a") \n\t"
1865 "add $4, %%"REG_a" \n\t"
1868 : "r" (dst+width), "g" ((x86_reg)-width)
1873 static void RENAME(bgr24ToY)(uint8_t *dst, const uint8_t *src,
1874 int width, uint32_t *unused)
1876 RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_BGR24);
1879 static void RENAME(rgb24ToY)(uint8_t *dst, const uint8_t *src,
1880 int width, uint32_t *unused)
1882 RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_RGB24);
1885 static av_always_inline void RENAME(bgr24ToUV_mmx)(uint8_t *dstU, uint8_t *dstV,
1886 const uint8_t *src, int width,
1887 enum PixelFormat srcFormat)
1890 "movq 24(%4), %%mm6 \n\t"
1891 "mov %3, %%"REG_a" \n\t"
1892 "pxor %%mm7, %%mm7 \n\t"
1894 PREFETCH" 64(%0) \n\t"
1895 "movd (%0), %%mm0 \n\t"
1896 "movd 2(%0), %%mm1 \n\t"
1897 "punpcklbw %%mm7, %%mm0 \n\t"
1898 "punpcklbw %%mm7, %%mm1 \n\t"
1899 "movq %%mm0, %%mm2 \n\t"
1900 "movq %%mm1, %%mm3 \n\t"
1901 "pmaddwd (%4), %%mm0 \n\t"
1902 "pmaddwd 8(%4), %%mm1 \n\t"
1903 "pmaddwd 16(%4), %%mm2 \n\t"
1904 "pmaddwd %%mm6, %%mm3 \n\t"
1905 "paddd %%mm1, %%mm0 \n\t"
1906 "paddd %%mm3, %%mm2 \n\t"
1908 "movd 6(%0), %%mm1 \n\t"
1909 "movd 8(%0), %%mm3 \n\t"
1911 "punpcklbw %%mm7, %%mm1 \n\t"
1912 "punpcklbw %%mm7, %%mm3 \n\t"
1913 "movq %%mm1, %%mm4 \n\t"
1914 "movq %%mm3, %%mm5 \n\t"
1915 "pmaddwd (%4), %%mm1 \n\t"
1916 "pmaddwd 8(%4), %%mm3 \n\t"
1917 "pmaddwd 16(%4), %%mm4 \n\t"
1918 "pmaddwd %%mm6, %%mm5 \n\t"
1919 "paddd %%mm3, %%mm1 \n\t"
1920 "paddd %%mm5, %%mm4 \n\t"
1922 "movq "MANGLE(ff_bgr24toUVOffset)", %%mm3 \n\t"
1923 "paddd %%mm3, %%mm0 \n\t"
1924 "paddd %%mm3, %%mm2 \n\t"
1925 "paddd %%mm3, %%mm1 \n\t"
1926 "paddd %%mm3, %%mm4 \n\t"
1927 "psrad $15, %%mm0 \n\t"
1928 "psrad $15, %%mm2 \n\t"
1929 "psrad $15, %%mm1 \n\t"
1930 "psrad $15, %%mm4 \n\t"
1931 "packssdw %%mm1, %%mm0 \n\t"
1932 "packssdw %%mm4, %%mm2 \n\t"
1933 "packuswb %%mm0, %%mm0 \n\t"
1934 "packuswb %%mm2, %%mm2 \n\t"
1935 "movd %%mm0, (%1, %%"REG_a") \n\t"
1936 "movd %%mm2, (%2, %%"REG_a") \n\t"
1937 "add $4, %%"REG_a" \n\t"
1940 : "r" (dstU+width), "r" (dstV+width), "g" ((x86_reg)-width), "r"(ff_bgr24toUV[srcFormat == PIX_FMT_RGB24])
1945 static void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV,
1946 const uint8_t *src1, const uint8_t *src2,
1947 int width, uint32_t *unused)
1949 RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_BGR24);
1950 assert(src1 == src2);
1953 static void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV,
1954 const uint8_t *src1, const uint8_t *src2,
1955 int width, uint32_t *unused)
1958 RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_RGB24);
1961 #if !COMPILE_TEMPLATE_MMX2
1962 // bilinear / bicubic scaling
1963 static void RENAME(hScale)(SwsContext *c, int16_t *dst, int dstW,
1964 const uint8_t *src, const int16_t *filter,
1965 const int16_t *filterPos, int filterSize)
1967 assert(filterSize % 4 == 0 && filterSize>0);
1968 if (filterSize==4) { // Always true for upscaling, sometimes for down, too.
1969 x86_reg counter= -2*dstW;
1971 filterPos-= counter/2;
1975 "push %%"REG_b" \n\t"
1977 "pxor %%mm7, %%mm7 \n\t"
1978 "push %%"REG_BP" \n\t" // we use 7 regs here ...
1979 "mov %%"REG_a", %%"REG_BP" \n\t"
1982 "movzwl (%2, %%"REG_BP"), %%eax \n\t"
1983 "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
1984 "movq (%1, %%"REG_BP", 4), %%mm1 \n\t"
1985 "movq 8(%1, %%"REG_BP", 4), %%mm3 \n\t"
1986 "movd (%3, %%"REG_a"), %%mm0 \n\t"
1987 "movd (%3, %%"REG_b"), %%mm2 \n\t"
1988 "punpcklbw %%mm7, %%mm0 \n\t"
1989 "punpcklbw %%mm7, %%mm2 \n\t"
1990 "pmaddwd %%mm1, %%mm0 \n\t"
1991 "pmaddwd %%mm2, %%mm3 \n\t"
1992 "movq %%mm0, %%mm4 \n\t"
1993 "punpckldq %%mm3, %%mm0 \n\t"
1994 "punpckhdq %%mm3, %%mm4 \n\t"
1995 "paddd %%mm4, %%mm0 \n\t"
1996 "psrad $7, %%mm0 \n\t"
1997 "packssdw %%mm0, %%mm0 \n\t"
1998 "movd %%mm0, (%4, %%"REG_BP") \n\t"
1999 "add $4, %%"REG_BP" \n\t"
2002 "pop %%"REG_BP" \n\t"
2004 "pop %%"REG_b" \n\t"
2007 : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
2012 } else if (filterSize==8) {
2013 x86_reg counter= -2*dstW;
2015 filterPos-= counter/2;
2019 "push %%"REG_b" \n\t"
2021 "pxor %%mm7, %%mm7 \n\t"
2022 "push %%"REG_BP" \n\t" // we use 7 regs here ...
2023 "mov %%"REG_a", %%"REG_BP" \n\t"
2026 "movzwl (%2, %%"REG_BP"), %%eax \n\t"
2027 "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
2028 "movq (%1, %%"REG_BP", 8), %%mm1 \n\t"
2029 "movq 16(%1, %%"REG_BP", 8), %%mm3 \n\t"
2030 "movd (%3, %%"REG_a"), %%mm0 \n\t"
2031 "movd (%3, %%"REG_b"), %%mm2 \n\t"
2032 "punpcklbw %%mm7, %%mm0 \n\t"
2033 "punpcklbw %%mm7, %%mm2 \n\t"
2034 "pmaddwd %%mm1, %%mm0 \n\t"
2035 "pmaddwd %%mm2, %%mm3 \n\t"
2037 "movq 8(%1, %%"REG_BP", 8), %%mm1 \n\t"
2038 "movq 24(%1, %%"REG_BP", 8), %%mm5 \n\t"
2039 "movd 4(%3, %%"REG_a"), %%mm4 \n\t"
2040 "movd 4(%3, %%"REG_b"), %%mm2 \n\t"
2041 "punpcklbw %%mm7, %%mm4 \n\t"
2042 "punpcklbw %%mm7, %%mm2 \n\t"
2043 "pmaddwd %%mm1, %%mm4 \n\t"
2044 "pmaddwd %%mm2, %%mm5 \n\t"
2045 "paddd %%mm4, %%mm0 \n\t"
2046 "paddd %%mm5, %%mm3 \n\t"
2047 "movq %%mm0, %%mm4 \n\t"
2048 "punpckldq %%mm3, %%mm0 \n\t"
2049 "punpckhdq %%mm3, %%mm4 \n\t"
2050 "paddd %%mm4, %%mm0 \n\t"
2051 "psrad $7, %%mm0 \n\t"
2052 "packssdw %%mm0, %%mm0 \n\t"
2053 "movd %%mm0, (%4, %%"REG_BP") \n\t"
2054 "add $4, %%"REG_BP" \n\t"
2057 "pop %%"REG_BP" \n\t"
2059 "pop %%"REG_b" \n\t"
2062 : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
2068 const uint8_t *offset = src+filterSize;
2069 x86_reg counter= -2*dstW;
2070 //filter-= counter*filterSize/2;
2071 filterPos-= counter/2;
2074 "pxor %%mm7, %%mm7 \n\t"
2077 "mov %2, %%"REG_c" \n\t"
2078 "movzwl (%%"REG_c", %0), %%eax \n\t"
2079 "movzwl 2(%%"REG_c", %0), %%edx \n\t"
2080 "mov %5, %%"REG_c" \n\t"
2081 "pxor %%mm4, %%mm4 \n\t"
2082 "pxor %%mm5, %%mm5 \n\t"
2084 "movq (%1), %%mm1 \n\t"
2085 "movq (%1, %6), %%mm3 \n\t"
2086 "movd (%%"REG_c", %%"REG_a"), %%mm0 \n\t"
2087 "movd (%%"REG_c", %%"REG_d"), %%mm2 \n\t"
2088 "punpcklbw %%mm7, %%mm0 \n\t"
2089 "punpcklbw %%mm7, %%mm2 \n\t"
2090 "pmaddwd %%mm1, %%mm0 \n\t"
2091 "pmaddwd %%mm2, %%mm3 \n\t"
2092 "paddd %%mm3, %%mm5 \n\t"
2093 "paddd %%mm0, %%mm4 \n\t"
2095 "add $4, %%"REG_c" \n\t"
2096 "cmp %4, %%"REG_c" \n\t"
2099 "movq %%mm4, %%mm0 \n\t"
2100 "punpckldq %%mm5, %%mm4 \n\t"
2101 "punpckhdq %%mm5, %%mm0 \n\t"
2102 "paddd %%mm0, %%mm4 \n\t"
2103 "psrad $7, %%mm4 \n\t"
2104 "packssdw %%mm4, %%mm4 \n\t"
2105 "mov %3, %%"REG_a" \n\t"
2106 "movd %%mm4, (%%"REG_a", %0) \n\t"
2110 : "+r" (counter), "+r" (filter)
2111 : "m" (filterPos), "m" (dst), "m"(offset),
2112 "m" (src), "r" ((x86_reg)filterSize*2)
2113 : "%"REG_a, "%"REG_c, "%"REG_d
2117 #endif /* !COMPILE_TEMPLATE_MMX2 */
2119 #if COMPILE_TEMPLATE_MMX2
2120 static void RENAME(hyscale_fast)(SwsContext *c, int16_t *dst,
2121 int dstWidth, const uint8_t *src,
2124 int16_t *filterPos = c->hLumFilterPos;
2125 int16_t *filter = c->hLumFilter;
2126 void *mmx2FilterCode= c->lumMmx2FilterCode;
2129 DECLARE_ALIGNED(8, uint64_t, ebxsave);
2134 "mov %%"REG_b", %5 \n\t"
2136 "pxor %%mm7, %%mm7 \n\t"
2137 "mov %0, %%"REG_c" \n\t"
2138 "mov %1, %%"REG_D" \n\t"
2139 "mov %2, %%"REG_d" \n\t"
2140 "mov %3, %%"REG_b" \n\t"
2141 "xor %%"REG_a", %%"REG_a" \n\t" // i
2142 PREFETCH" (%%"REG_c") \n\t"
2143 PREFETCH" 32(%%"REG_c") \n\t"
2144 PREFETCH" 64(%%"REG_c") \n\t"
2147 #define CALL_MMX2_FILTER_CODE \
2148 "movl (%%"REG_b"), %%esi \n\t"\
2150 "movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\
2151 "add %%"REG_S", %%"REG_c" \n\t"\
2152 "add %%"REG_a", %%"REG_D" \n\t"\
2153 "xor %%"REG_a", %%"REG_a" \n\t"\
2156 #define CALL_MMX2_FILTER_CODE \
2157 "movl (%%"REG_b"), %%esi \n\t"\
2159 "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
2160 "add %%"REG_a", %%"REG_D" \n\t"\
2161 "xor %%"REG_a", %%"REG_a" \n\t"\
2163 #endif /* ARCH_X86_64 */
2165 CALL_MMX2_FILTER_CODE
2166 CALL_MMX2_FILTER_CODE
2167 CALL_MMX2_FILTER_CODE
2168 CALL_MMX2_FILTER_CODE
2169 CALL_MMX2_FILTER_CODE
2170 CALL_MMX2_FILTER_CODE
2171 CALL_MMX2_FILTER_CODE
2172 CALL_MMX2_FILTER_CODE
2175 "mov %5, %%"REG_b" \n\t"
2177 :: "m" (src), "m" (dst), "m" (filter), "m" (filterPos),
2178 "m" (mmx2FilterCode)
2182 : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
2188 for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
2189 dst[i] = src[srcW-1]*128;
2192 static void RENAME(hcscale_fast)(SwsContext *c, int16_t *dst1, int16_t *dst2,
2193 int dstWidth, const uint8_t *src1,
2194 const uint8_t *src2, int srcW, int xInc)
2196 int16_t *filterPos = c->hChrFilterPos;
2197 int16_t *filter = c->hChrFilter;
2198 void *mmx2FilterCode= c->chrMmx2FilterCode;
2201 DECLARE_ALIGNED(8, uint64_t, ebxsave);
2206 "mov %%"REG_b", %7 \n\t"
2208 "pxor %%mm7, %%mm7 \n\t"
2209 "mov %0, %%"REG_c" \n\t"
2210 "mov %1, %%"REG_D" \n\t"
2211 "mov %2, %%"REG_d" \n\t"
2212 "mov %3, %%"REG_b" \n\t"
2213 "xor %%"REG_a", %%"REG_a" \n\t" // i
2214 PREFETCH" (%%"REG_c") \n\t"
2215 PREFETCH" 32(%%"REG_c") \n\t"
2216 PREFETCH" 64(%%"REG_c") \n\t"
2218 CALL_MMX2_FILTER_CODE
2219 CALL_MMX2_FILTER_CODE
2220 CALL_MMX2_FILTER_CODE
2221 CALL_MMX2_FILTER_CODE
2222 "xor %%"REG_a", %%"REG_a" \n\t" // i
2223 "mov %5, %%"REG_c" \n\t" // src
2224 "mov %6, %%"REG_D" \n\t" // buf2
2225 PREFETCH" (%%"REG_c") \n\t"
2226 PREFETCH" 32(%%"REG_c") \n\t"
2227 PREFETCH" 64(%%"REG_c") \n\t"
2229 CALL_MMX2_FILTER_CODE
2230 CALL_MMX2_FILTER_CODE
2231 CALL_MMX2_FILTER_CODE
2232 CALL_MMX2_FILTER_CODE
2235 "mov %7, %%"REG_b" \n\t"
2237 :: "m" (src1), "m" (dst1), "m" (filter), "m" (filterPos),
2238 "m" (mmx2FilterCode), "m" (src2), "m"(dst2)
2242 : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
2248 for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) {
2249 dst1[i] = src1[srcW-1]*128;
2250 dst2[i] = src2[srcW-1]*128;
2253 #endif /* COMPILE_TEMPLATE_MMX2 */
2255 static av_cold void RENAME(sws_init_swScale)(SwsContext *c)
2257 enum PixelFormat srcFormat = c->srcFormat,
2258 dstFormat = c->dstFormat;
2260 if (!is16BPS(dstFormat) && !is9_OR_10BPS(dstFormat) &&
2261 dstFormat != PIX_FMT_NV12 && dstFormat != PIX_FMT_NV21) {
2262 if (!(c->flags & SWS_BITEXACT)) {
2263 if (c->flags & SWS_ACCURATE_RND) {
2264 c->yuv2yuv1 = RENAME(yuv2yuv1_ar );
2265 c->yuv2yuvX = RENAME(yuv2yuvX_ar );
2266 if (!(c->flags & SWS_FULL_CHR_H_INT)) {
2267 switch (c->dstFormat) {
2268 case PIX_FMT_RGB32: c->yuv2packedX = RENAME(yuv2rgb32_X_ar); break;
2269 case PIX_FMT_BGR24: c->yuv2packedX = RENAME(yuv2bgr24_X_ar); break;
2270 case PIX_FMT_RGB555: c->yuv2packedX = RENAME(yuv2rgb555_X_ar); break;
2271 case PIX_FMT_RGB565: c->yuv2packedX = RENAME(yuv2rgb565_X_ar); break;
2272 case PIX_FMT_YUYV422: c->yuv2packedX = RENAME(yuv2yuyv422_X_ar); break;
2277 c->yuv2yuv1 = RENAME(yuv2yuv1 );
2278 c->yuv2yuvX = RENAME(yuv2yuvX );
2279 if (!(c->flags & SWS_FULL_CHR_H_INT)) {
2280 switch (c->dstFormat) {
2281 case PIX_FMT_RGB32: c->yuv2packedX = RENAME(yuv2rgb32_X); break;
2282 case PIX_FMT_BGR24: c->yuv2packedX = RENAME(yuv2bgr24_X); break;
2283 case PIX_FMT_RGB555: c->yuv2packedX = RENAME(yuv2rgb555_X); break;
2284 case PIX_FMT_RGB565: c->yuv2packedX = RENAME(yuv2rgb565_X); break;
2285 case PIX_FMT_YUYV422: c->yuv2packedX = RENAME(yuv2yuyv422_X); break;
2291 if (!(c->flags & SWS_FULL_CHR_H_INT)) {
2292 switch (c->dstFormat) {
2294 c->yuv2packed1 = RENAME(yuv2rgb32_1);
2295 c->yuv2packed2 = RENAME(yuv2rgb32_2);
2298 c->yuv2packed1 = RENAME(yuv2bgr24_1);
2299 c->yuv2packed2 = RENAME(yuv2bgr24_2);
2301 case PIX_FMT_RGB555:
2302 c->yuv2packed1 = RENAME(yuv2rgb555_1);
2303 c->yuv2packed2 = RENAME(yuv2rgb555_2);
2305 case PIX_FMT_RGB565:
2306 c->yuv2packed1 = RENAME(yuv2rgb565_1);
2307 c->yuv2packed2 = RENAME(yuv2rgb565_2);
2309 case PIX_FMT_YUYV422:
2310 c->yuv2packed1 = RENAME(yuv2yuyv422_1);
2311 c->yuv2packed2 = RENAME(yuv2yuyv422_2);
2319 if (c->scalingBpp == 8) {
2320 #if !COMPILE_TEMPLATE_MMX2
2321 c->hScale = RENAME(hScale );
2322 #endif /* !COMPILE_TEMPLATE_MMX2 */
2324 // Use the new MMX scaler if the MMX2 one can't be used (it is faster than the x86 ASM one).
2325 #if COMPILE_TEMPLATE_MMX2
2326 if (c->flags & SWS_FAST_BILINEAR && c->canMMX2BeUsed)
2328 c->hyscale_fast = RENAME(hyscale_fast);
2329 c->hcscale_fast = RENAME(hcscale_fast);
2331 #endif /* COMPILE_TEMPLATE_MMX2 */
2332 c->hyscale_fast = NULL;
2333 c->hcscale_fast = NULL;
2334 #if COMPILE_TEMPLATE_MMX2
2336 #endif /* COMPILE_TEMPLATE_MMX2 */
2339 #if !COMPILE_TEMPLATE_MMX2
2341 case PIX_FMT_YUYV422 : c->chrToYV12 = RENAME(yuy2ToUV); break;
2342 case PIX_FMT_UYVY422 : c->chrToYV12 = RENAME(uyvyToUV); break;
2343 case PIX_FMT_NV12 : c->chrToYV12 = RENAME(nv12ToUV); break;
2344 case PIX_FMT_NV21 : c->chrToYV12 = RENAME(nv21ToUV); break;
2347 #endif /* !COMPILE_TEMPLATE_MMX2 */
2348 if (!c->chrSrcHSubSample) {
2350 case PIX_FMT_BGR24 : c->chrToYV12 = RENAME(bgr24ToUV); break;
2351 case PIX_FMT_RGB24 : c->chrToYV12 = RENAME(rgb24ToUV); break;
2356 switch (srcFormat) {
2357 #if !COMPILE_TEMPLATE_MMX2
2358 case PIX_FMT_YUYV422 :
2359 case PIX_FMT_Y400A : c->lumToYV12 = RENAME(yuy2ToY); break;
2360 case PIX_FMT_UYVY422 : c->lumToYV12 = RENAME(uyvyToY); break;
2361 #endif /* !COMPILE_TEMPLATE_MMX2 */
2362 case PIX_FMT_BGR24 : c->lumToYV12 = RENAME(bgr24ToY); break;
2363 case PIX_FMT_RGB24 : c->lumToYV12 = RENAME(rgb24ToY); break;
2366 #if !COMPILE_TEMPLATE_MMX2
2368 switch (srcFormat) {
2369 case PIX_FMT_Y400A : c->alpToYV12 = RENAME(yuy2ToY); break;
2373 #endif /* !COMPILE_TEMPLATE_MMX2 */