2 * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
4 * This file is part of Libav.
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "swscale_template.h"
27 #if COMPILE_TEMPLATE_MMX2
28 #define PREFETCH "prefetchnta"
30 #define PREFETCH " # nop"
33 #if COMPILE_TEMPLATE_MMX2
34 #define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
36 #define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
38 #define MOVNTQ(a,b) REAL_MOVNTQ(a,b)
40 #define YSCALEYUV2YV12X(x, offset, dest, width) \
42 "xor %%"REG_a", %%"REG_a" \n\t"\
43 "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
44 "movq %%mm3, %%mm4 \n\t"\
45 "lea " offset "(%0), %%"REG_d" \n\t"\
46 "mov (%%"REG_d"), %%"REG_S" \n\t"\
47 ".p2align 4 \n\t" /* FIXME Unroll? */\
49 "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
50 "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* srcData */\
51 "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* srcData */\
52 "add $16, %%"REG_d" \n\t"\
53 "mov (%%"REG_d"), %%"REG_S" \n\t"\
54 "test %%"REG_S", %%"REG_S" \n\t"\
55 "pmulhw %%mm0, %%mm2 \n\t"\
56 "pmulhw %%mm0, %%mm5 \n\t"\
57 "paddw %%mm2, %%mm3 \n\t"\
58 "paddw %%mm5, %%mm4 \n\t"\
60 "psraw $3, %%mm3 \n\t"\
61 "psraw $3, %%mm4 \n\t"\
62 "packuswb %%mm4, %%mm3 \n\t"\
63 MOVNTQ(%%mm3, (%1, %%REGa))\
64 "add $8, %%"REG_a" \n\t"\
65 "cmp %2, %%"REG_a" \n\t"\
66 "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
67 "movq %%mm3, %%mm4 \n\t"\
68 "lea " offset "(%0), %%"REG_d" \n\t"\
69 "mov (%%"REG_d"), %%"REG_S" \n\t"\
71 :: "r" (&c->redDither),\
72 "r" (dest), "g" ((x86_reg)width)\
73 : "%"REG_a, "%"REG_d, "%"REG_S\
76 #define YSCALEYUV2YV12X_ACCURATE(x, offset, dest, width) \
78 "lea " offset "(%0), %%"REG_d" \n\t"\
79 "xor %%"REG_a", %%"REG_a" \n\t"\
80 "pxor %%mm4, %%mm4 \n\t"\
81 "pxor %%mm5, %%mm5 \n\t"\
82 "pxor %%mm6, %%mm6 \n\t"\
83 "pxor %%mm7, %%mm7 \n\t"\
84 "mov (%%"REG_d"), %%"REG_S" \n\t"\
87 "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* srcData */\
88 "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* srcData */\
89 "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
90 "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm1 \n\t" /* srcData */\
91 "movq %%mm0, %%mm3 \n\t"\
92 "punpcklwd %%mm1, %%mm0 \n\t"\
93 "punpckhwd %%mm1, %%mm3 \n\t"\
94 "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm1 \n\t" /* filterCoeff */\
95 "pmaddwd %%mm1, %%mm0 \n\t"\
96 "pmaddwd %%mm1, %%mm3 \n\t"\
97 "paddd %%mm0, %%mm4 \n\t"\
98 "paddd %%mm3, %%mm5 \n\t"\
99 "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* srcData */\
100 "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
101 "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
102 "test %%"REG_S", %%"REG_S" \n\t"\
103 "movq %%mm2, %%mm0 \n\t"\
104 "punpcklwd %%mm3, %%mm2 \n\t"\
105 "punpckhwd %%mm3, %%mm0 \n\t"\
106 "pmaddwd %%mm1, %%mm2 \n\t"\
107 "pmaddwd %%mm1, %%mm0 \n\t"\
108 "paddd %%mm2, %%mm6 \n\t"\
109 "paddd %%mm0, %%mm7 \n\t"\
111 "psrad $16, %%mm4 \n\t"\
112 "psrad $16, %%mm5 \n\t"\
113 "psrad $16, %%mm6 \n\t"\
114 "psrad $16, %%mm7 \n\t"\
115 "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
116 "packssdw %%mm5, %%mm4 \n\t"\
117 "packssdw %%mm7, %%mm6 \n\t"\
118 "paddw %%mm0, %%mm4 \n\t"\
119 "paddw %%mm0, %%mm6 \n\t"\
120 "psraw $3, %%mm4 \n\t"\
121 "psraw $3, %%mm6 \n\t"\
122 "packuswb %%mm6, %%mm4 \n\t"\
123 MOVNTQ(%%mm4, (%1, %%REGa))\
124 "add $8, %%"REG_a" \n\t"\
125 "cmp %2, %%"REG_a" \n\t"\
126 "lea " offset "(%0), %%"REG_d" \n\t"\
127 "pxor %%mm4, %%mm4 \n\t"\
128 "pxor %%mm5, %%mm5 \n\t"\
129 "pxor %%mm6, %%mm6 \n\t"\
130 "pxor %%mm7, %%mm7 \n\t"\
131 "mov (%%"REG_d"), %%"REG_S" \n\t"\
133 :: "r" (&c->redDither),\
134 "r" (dest), "g" ((x86_reg)width)\
135 : "%"REG_a, "%"REG_d, "%"REG_S\
138 #define YSCALEYUV2YV121 \
139 "mov %2, %%"REG_a" \n\t"\
140 ".p2align 4 \n\t" /* FIXME Unroll? */\
142 "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
143 "movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"\
144 "psraw $7, %%mm0 \n\t"\
145 "psraw $7, %%mm1 \n\t"\
146 "packuswb %%mm1, %%mm0 \n\t"\
147 MOVNTQ(%%mm0, (%1, %%REGa))\
148 "add $8, %%"REG_a" \n\t"\
151 #define YSCALEYUV2YV121_ACCURATE \
152 "mov %2, %%"REG_a" \n\t"\
153 "pcmpeqw %%mm7, %%mm7 \n\t"\
154 "psrlw $15, %%mm7 \n\t"\
155 "psllw $6, %%mm7 \n\t"\
156 ".p2align 4 \n\t" /* FIXME Unroll? */\
158 "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
159 "movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"\
160 "paddsw %%mm7, %%mm0 \n\t"\
161 "paddsw %%mm7, %%mm1 \n\t"\
162 "psraw $7, %%mm0 \n\t"\
163 "psraw $7, %%mm1 \n\t"\
164 "packuswb %%mm1, %%mm0 \n\t"\
165 MOVNTQ(%%mm0, (%1, %%REGa))\
166 "add $8, %%"REG_a" \n\t"\
170 :: "m" (-lumFilterSize), "m" (-chrFilterSize),
171 "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
172 "r" (dest), "m" (dstW_reg),
173 "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
174 : "%eax", "%ebx", "%ecx", "%edx", "%esi"
176 #define YSCALEYUV2PACKEDX_UV \
178 "xor %%"REG_a", %%"REG_a" \n\t"\
182 "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
183 "mov (%%"REG_d"), %%"REG_S" \n\t"\
184 "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
185 "movq %%mm3, %%mm4 \n\t"\
188 "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
189 "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* UsrcData */\
190 "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\
191 "add $16, %%"REG_d" \n\t"\
192 "mov (%%"REG_d"), %%"REG_S" \n\t"\
193 "pmulhw %%mm0, %%mm2 \n\t"\
194 "pmulhw %%mm0, %%mm5 \n\t"\
195 "paddw %%mm2, %%mm3 \n\t"\
196 "paddw %%mm5, %%mm4 \n\t"\
197 "test %%"REG_S", %%"REG_S" \n\t"\
200 #define YSCALEYUV2PACKEDX_YA(offset,coeff,src1,src2,dst1,dst2) \
201 "lea "offset"(%0), %%"REG_d" \n\t"\
202 "mov (%%"REG_d"), %%"REG_S" \n\t"\
203 "movq "VROUNDER_OFFSET"(%0), "#dst1" \n\t"\
204 "movq "#dst1", "#dst2" \n\t"\
207 "movq 8(%%"REG_d"), "#coeff" \n\t" /* filterCoeff */\
208 "movq (%%"REG_S", %%"REG_a", 2), "#src1" \n\t" /* Y1srcData */\
209 "movq 8(%%"REG_S", %%"REG_a", 2), "#src2" \n\t" /* Y2srcData */\
210 "add $16, %%"REG_d" \n\t"\
211 "mov (%%"REG_d"), %%"REG_S" \n\t"\
212 "pmulhw "#coeff", "#src1" \n\t"\
213 "pmulhw "#coeff", "#src2" \n\t"\
214 "paddw "#src1", "#dst1" \n\t"\
215 "paddw "#src2", "#dst2" \n\t"\
216 "test %%"REG_S", %%"REG_S" \n\t"\
219 #define YSCALEYUV2PACKEDX \
220 YSCALEYUV2PACKEDX_UV \
221 YSCALEYUV2PACKEDX_YA(LUM_MMX_FILTER_OFFSET,%%mm0,%%mm2,%%mm5,%%mm1,%%mm7) \
223 #define YSCALEYUV2PACKEDX_END \
224 :: "r" (&c->redDither), \
225 "m" (dummy), "m" (dummy), "m" (dummy),\
226 "r" (dest), "m" (dstW_reg) \
227 : "%"REG_a, "%"REG_d, "%"REG_S \
230 #define YSCALEYUV2PACKEDX_ACCURATE_UV \
232 "xor %%"REG_a", %%"REG_a" \n\t"\
236 "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
237 "mov (%%"REG_d"), %%"REG_S" \n\t"\
238 "pxor %%mm4, %%mm4 \n\t"\
239 "pxor %%mm5, %%mm5 \n\t"\
240 "pxor %%mm6, %%mm6 \n\t"\
241 "pxor %%mm7, %%mm7 \n\t"\
244 "movq (%%"REG_S", %%"REG_a"), %%mm0 \n\t" /* UsrcData */\
245 "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* VsrcData */\
246 "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
247 "movq (%%"REG_S", %%"REG_a"), %%mm1 \n\t" /* UsrcData */\
248 "movq %%mm0, %%mm3 \n\t"\
249 "punpcklwd %%mm1, %%mm0 \n\t"\
250 "punpckhwd %%mm1, %%mm3 \n\t"\
251 "movq "STR(APCK_COEF)"(%%"REG_d"),%%mm1 \n\t" /* filterCoeff */\
252 "pmaddwd %%mm1, %%mm0 \n\t"\
253 "pmaddwd %%mm1, %%mm3 \n\t"\
254 "paddd %%mm0, %%mm4 \n\t"\
255 "paddd %%mm3, %%mm5 \n\t"\
256 "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm3 \n\t" /* VsrcData */\
257 "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
258 "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
259 "test %%"REG_S", %%"REG_S" \n\t"\
260 "movq %%mm2, %%mm0 \n\t"\
261 "punpcklwd %%mm3, %%mm2 \n\t"\
262 "punpckhwd %%mm3, %%mm0 \n\t"\
263 "pmaddwd %%mm1, %%mm2 \n\t"\
264 "pmaddwd %%mm1, %%mm0 \n\t"\
265 "paddd %%mm2, %%mm6 \n\t"\
266 "paddd %%mm0, %%mm7 \n\t"\
268 "psrad $16, %%mm4 \n\t"\
269 "psrad $16, %%mm5 \n\t"\
270 "psrad $16, %%mm6 \n\t"\
271 "psrad $16, %%mm7 \n\t"\
272 "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
273 "packssdw %%mm5, %%mm4 \n\t"\
274 "packssdw %%mm7, %%mm6 \n\t"\
275 "paddw %%mm0, %%mm4 \n\t"\
276 "paddw %%mm0, %%mm6 \n\t"\
277 "movq %%mm4, "U_TEMP"(%0) \n\t"\
278 "movq %%mm6, "V_TEMP"(%0) \n\t"\
280 #define YSCALEYUV2PACKEDX_ACCURATE_YA(offset) \
281 "lea "offset"(%0), %%"REG_d" \n\t"\
282 "mov (%%"REG_d"), %%"REG_S" \n\t"\
283 "pxor %%mm1, %%mm1 \n\t"\
284 "pxor %%mm5, %%mm5 \n\t"\
285 "pxor %%mm7, %%mm7 \n\t"\
286 "pxor %%mm6, %%mm6 \n\t"\
289 "movq (%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* Y1srcData */\
290 "movq 8(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y2srcData */\
291 "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
292 "movq (%%"REG_S", %%"REG_a", 2), %%mm4 \n\t" /* Y1srcData */\
293 "movq %%mm0, %%mm3 \n\t"\
294 "punpcklwd %%mm4, %%mm0 \n\t"\
295 "punpckhwd %%mm4, %%mm3 \n\t"\
296 "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm4 \n\t" /* filterCoeff */\
297 "pmaddwd %%mm4, %%mm0 \n\t"\
298 "pmaddwd %%mm4, %%mm3 \n\t"\
299 "paddd %%mm0, %%mm1 \n\t"\
300 "paddd %%mm3, %%mm5 \n\t"\
301 "movq 8(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* Y2srcData */\
302 "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
303 "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
304 "test %%"REG_S", %%"REG_S" \n\t"\
305 "movq %%mm2, %%mm0 \n\t"\
306 "punpcklwd %%mm3, %%mm2 \n\t"\
307 "punpckhwd %%mm3, %%mm0 \n\t"\
308 "pmaddwd %%mm4, %%mm2 \n\t"\
309 "pmaddwd %%mm4, %%mm0 \n\t"\
310 "paddd %%mm2, %%mm7 \n\t"\
311 "paddd %%mm0, %%mm6 \n\t"\
313 "psrad $16, %%mm1 \n\t"\
314 "psrad $16, %%mm5 \n\t"\
315 "psrad $16, %%mm7 \n\t"\
316 "psrad $16, %%mm6 \n\t"\
317 "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
318 "packssdw %%mm5, %%mm1 \n\t"\
319 "packssdw %%mm6, %%mm7 \n\t"\
320 "paddw %%mm0, %%mm1 \n\t"\
321 "paddw %%mm0, %%mm7 \n\t"\
322 "movq "U_TEMP"(%0), %%mm3 \n\t"\
323 "movq "V_TEMP"(%0), %%mm4 \n\t"\
325 #define YSCALEYUV2PACKEDX_ACCURATE \
326 YSCALEYUV2PACKEDX_ACCURATE_UV \
327 YSCALEYUV2PACKEDX_ACCURATE_YA(LUM_MMX_FILTER_OFFSET)
329 #define YSCALEYUV2RGBX \
330 "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\
331 "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\
332 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
333 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
334 "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\
335 "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\
336 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
337 "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\
338 "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\
339 "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\
340 "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\
341 "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\
342 "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\
343 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
344 "paddw %%mm3, %%mm4 \n\t"\
345 "movq %%mm2, %%mm0 \n\t"\
346 "movq %%mm5, %%mm6 \n\t"\
347 "movq %%mm4, %%mm3 \n\t"\
348 "punpcklwd %%mm2, %%mm2 \n\t"\
349 "punpcklwd %%mm5, %%mm5 \n\t"\
350 "punpcklwd %%mm4, %%mm4 \n\t"\
351 "paddw %%mm1, %%mm2 \n\t"\
352 "paddw %%mm1, %%mm5 \n\t"\
353 "paddw %%mm1, %%mm4 \n\t"\
354 "punpckhwd %%mm0, %%mm0 \n\t"\
355 "punpckhwd %%mm6, %%mm6 \n\t"\
356 "punpckhwd %%mm3, %%mm3 \n\t"\
357 "paddw %%mm7, %%mm0 \n\t"\
358 "paddw %%mm7, %%mm6 \n\t"\
359 "paddw %%mm7, %%mm3 \n\t"\
360 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
361 "packuswb %%mm0, %%mm2 \n\t"\
362 "packuswb %%mm6, %%mm5 \n\t"\
363 "packuswb %%mm3, %%mm4 \n\t"\
365 #define REAL_YSCALEYUV2PACKED(index, c) \
366 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
367 "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1 \n\t"\
368 "psraw $3, %%mm0 \n\t"\
369 "psraw $3, %%mm1 \n\t"\
370 "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c") \n\t"\
371 "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c") \n\t"\
372 "xor "#index", "#index" \n\t"\
375 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
376 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
377 "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
378 "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
379 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
380 "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
381 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
382 "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
383 "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
384 "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
385 "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
386 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
387 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
388 "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
389 "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
390 "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
391 "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
392 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
393 "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
394 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
395 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
396 "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
397 "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
398 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
399 "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
401 #define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c)
403 #define REAL_YSCALEYUV2RGB_UV(index, c) \
404 "xor "#index", "#index" \n\t"\
407 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
408 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
409 "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
410 "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
411 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
412 "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
413 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
414 "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
415 "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
416 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
417 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
418 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
419 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
420 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
421 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
422 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
423 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
424 "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
425 "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
426 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
428 #define REAL_YSCALEYUV2RGB_YA(index, c, b1, b2) \
429 "movq ("#b1", "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
430 "movq ("#b2", "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
431 "movq 8("#b1", "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
432 "movq 8("#b2", "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
433 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
434 "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
435 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
436 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
437 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
438 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
439 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
440 "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
442 #define REAL_YSCALEYUV2RGB_COEFF(c) \
443 "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
444 "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
445 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
446 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
447 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
448 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
449 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
450 "paddw %%mm3, %%mm4 \n\t"\
451 "movq %%mm2, %%mm0 \n\t"\
452 "movq %%mm5, %%mm6 \n\t"\
453 "movq %%mm4, %%mm3 \n\t"\
454 "punpcklwd %%mm2, %%mm2 \n\t"\
455 "punpcklwd %%mm5, %%mm5 \n\t"\
456 "punpcklwd %%mm4, %%mm4 \n\t"\
457 "paddw %%mm1, %%mm2 \n\t"\
458 "paddw %%mm1, %%mm5 \n\t"\
459 "paddw %%mm1, %%mm4 \n\t"\
460 "punpckhwd %%mm0, %%mm0 \n\t"\
461 "punpckhwd %%mm6, %%mm6 \n\t"\
462 "punpckhwd %%mm3, %%mm3 \n\t"\
463 "paddw %%mm7, %%mm0 \n\t"\
464 "paddw %%mm7, %%mm6 \n\t"\
465 "paddw %%mm7, %%mm3 \n\t"\
466 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
467 "packuswb %%mm0, %%mm2 \n\t"\
468 "packuswb %%mm6, %%mm5 \n\t"\
469 "packuswb %%mm3, %%mm4 \n\t"\
471 #define YSCALEYUV2RGB_YA(index, c, b1, b2) REAL_YSCALEYUV2RGB_YA(index, c, b1, b2)
473 #define YSCALEYUV2RGB(index, c) \
474 REAL_YSCALEYUV2RGB_UV(index, c) \
475 REAL_YSCALEYUV2RGB_YA(index, c, %0, %1) \
476 REAL_YSCALEYUV2RGB_COEFF(c)
478 #define REAL_YSCALEYUV2PACKED1(index, c) \
479 "xor "#index", "#index" \n\t"\
482 "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
483 "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
484 "psraw $7, %%mm3 \n\t" \
485 "psraw $7, %%mm4 \n\t" \
486 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
487 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
488 "psraw $7, %%mm1 \n\t" \
489 "psraw $7, %%mm7 \n\t" \
491 #define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c)
493 #define REAL_YSCALEYUV2RGB1(index, c) \
494 "xor "#index", "#index" \n\t"\
497 "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
498 "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
499 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
500 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
501 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
502 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
503 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
504 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
505 "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
506 "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
507 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
508 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
509 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
510 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
511 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
512 "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
513 "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
514 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
515 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
516 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
517 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
518 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
519 "paddw %%mm3, %%mm4 \n\t"\
520 "movq %%mm2, %%mm0 \n\t"\
521 "movq %%mm5, %%mm6 \n\t"\
522 "movq %%mm4, %%mm3 \n\t"\
523 "punpcklwd %%mm2, %%mm2 \n\t"\
524 "punpcklwd %%mm5, %%mm5 \n\t"\
525 "punpcklwd %%mm4, %%mm4 \n\t"\
526 "paddw %%mm1, %%mm2 \n\t"\
527 "paddw %%mm1, %%mm5 \n\t"\
528 "paddw %%mm1, %%mm4 \n\t"\
529 "punpckhwd %%mm0, %%mm0 \n\t"\
530 "punpckhwd %%mm6, %%mm6 \n\t"\
531 "punpckhwd %%mm3, %%mm3 \n\t"\
532 "paddw %%mm7, %%mm0 \n\t"\
533 "paddw %%mm7, %%mm6 \n\t"\
534 "paddw %%mm7, %%mm3 \n\t"\
535 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
536 "packuswb %%mm0, %%mm2 \n\t"\
537 "packuswb %%mm6, %%mm5 \n\t"\
538 "packuswb %%mm3, %%mm4 \n\t"\
540 #define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c)
542 #define REAL_YSCALEYUV2PACKED1b(index, c) \
543 "xor "#index", "#index" \n\t"\
546 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
547 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
548 "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
549 "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
550 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
551 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
552 "psrlw $8, %%mm3 \n\t" \
553 "psrlw $8, %%mm4 \n\t" \
554 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
555 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
556 "psraw $7, %%mm1 \n\t" \
557 "psraw $7, %%mm7 \n\t"
558 #define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c)
560 // do vertical chrominance interpolation
561 #define REAL_YSCALEYUV2RGB1b(index, c) \
562 "xor "#index", "#index" \n\t"\
565 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
566 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
567 "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
568 "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
569 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
570 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
571 "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
572 "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
573 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
574 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
575 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
576 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
577 "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
578 "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
579 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
580 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
581 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
582 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
583 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
584 "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
585 "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
586 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
587 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
588 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
589 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
590 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
591 "paddw %%mm3, %%mm4 \n\t"\
592 "movq %%mm2, %%mm0 \n\t"\
593 "movq %%mm5, %%mm6 \n\t"\
594 "movq %%mm4, %%mm3 \n\t"\
595 "punpcklwd %%mm2, %%mm2 \n\t"\
596 "punpcklwd %%mm5, %%mm5 \n\t"\
597 "punpcklwd %%mm4, %%mm4 \n\t"\
598 "paddw %%mm1, %%mm2 \n\t"\
599 "paddw %%mm1, %%mm5 \n\t"\
600 "paddw %%mm1, %%mm4 \n\t"\
601 "punpckhwd %%mm0, %%mm0 \n\t"\
602 "punpckhwd %%mm6, %%mm6 \n\t"\
603 "punpckhwd %%mm3, %%mm3 \n\t"\
604 "paddw %%mm7, %%mm0 \n\t"\
605 "paddw %%mm7, %%mm6 \n\t"\
606 "paddw %%mm7, %%mm3 \n\t"\
607 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
608 "packuswb %%mm0, %%mm2 \n\t"\
609 "packuswb %%mm6, %%mm5 \n\t"\
610 "packuswb %%mm3, %%mm4 \n\t"\
612 #define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c)
614 #define REAL_YSCALEYUV2RGB1_ALPHA(index) \
615 "movq (%1, "#index", 2), %%mm7 \n\t" /* abuf0[index ] */\
616 "movq 8(%1, "#index", 2), %%mm1 \n\t" /* abuf0[index+4] */\
617 "psraw $7, %%mm7 \n\t" /* abuf0[index ] >>7 */\
618 "psraw $7, %%mm1 \n\t" /* abuf0[index+4] >>7 */\
619 "packuswb %%mm1, %%mm7 \n\t"
620 #define YSCALEYUV2RGB1_ALPHA(index) REAL_YSCALEYUV2RGB1_ALPHA(index)
622 #define REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) \
623 "movq "#b", "#q2" \n\t" /* B */\
624 "movq "#r", "#t" \n\t" /* R */\
625 "punpcklbw "#g", "#b" \n\t" /* GBGBGBGB 0 */\
626 "punpcklbw "#a", "#r" \n\t" /* ARARARAR 0 */\
627 "punpckhbw "#g", "#q2" \n\t" /* GBGBGBGB 2 */\
628 "punpckhbw "#a", "#t" \n\t" /* ARARARAR 2 */\
629 "movq "#b", "#q0" \n\t" /* GBGBGBGB 0 */\
630 "movq "#q2", "#q3" \n\t" /* GBGBGBGB 2 */\
631 "punpcklwd "#r", "#q0" \n\t" /* ARGBARGB 0 */\
632 "punpckhwd "#r", "#b" \n\t" /* ARGBARGB 1 */\
633 "punpcklwd "#t", "#q2" \n\t" /* ARGBARGB 2 */\
634 "punpckhwd "#t", "#q3" \n\t" /* ARGBARGB 3 */\
636 MOVNTQ( q0, (dst, index, 4))\
637 MOVNTQ( b, 8(dst, index, 4))\
638 MOVNTQ( q2, 16(dst, index, 4))\
639 MOVNTQ( q3, 24(dst, index, 4))\
641 "add $8, "#index" \n\t"\
642 "cmp "#dstw", "#index" \n\t"\
644 #define WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t)
646 #define REAL_WRITERGB16(dst, dstw, index) \
647 "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
648 "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
649 "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
650 "psrlq $3, %%mm2 \n\t"\
652 "movq %%mm2, %%mm1 \n\t"\
653 "movq %%mm4, %%mm3 \n\t"\
655 "punpcklbw %%mm7, %%mm3 \n\t"\
656 "punpcklbw %%mm5, %%mm2 \n\t"\
657 "punpckhbw %%mm7, %%mm4 \n\t"\
658 "punpckhbw %%mm5, %%mm1 \n\t"\
660 "psllq $3, %%mm3 \n\t"\
661 "psllq $3, %%mm4 \n\t"\
663 "por %%mm3, %%mm2 \n\t"\
664 "por %%mm4, %%mm1 \n\t"\
666 MOVNTQ(%%mm2, (dst, index, 2))\
667 MOVNTQ(%%mm1, 8(dst, index, 2))\
669 "add $8, "#index" \n\t"\
670 "cmp "#dstw", "#index" \n\t"\
672 #define WRITERGB16(dst, dstw, index) REAL_WRITERGB16(dst, dstw, index)
674 #define REAL_WRITERGB15(dst, dstw, index) \
675 "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
676 "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
677 "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
678 "psrlq $3, %%mm2 \n\t"\
679 "psrlq $1, %%mm5 \n\t"\
681 "movq %%mm2, %%mm1 \n\t"\
682 "movq %%mm4, %%mm3 \n\t"\
684 "punpcklbw %%mm7, %%mm3 \n\t"\
685 "punpcklbw %%mm5, %%mm2 \n\t"\
686 "punpckhbw %%mm7, %%mm4 \n\t"\
687 "punpckhbw %%mm5, %%mm1 \n\t"\
689 "psllq $2, %%mm3 \n\t"\
690 "psllq $2, %%mm4 \n\t"\
692 "por %%mm3, %%mm2 \n\t"\
693 "por %%mm4, %%mm1 \n\t"\
695 MOVNTQ(%%mm2, (dst, index, 2))\
696 MOVNTQ(%%mm1, 8(dst, index, 2))\
698 "add $8, "#index" \n\t"\
699 "cmp "#dstw", "#index" \n\t"\
701 #define WRITERGB15(dst, dstw, index) REAL_WRITERGB15(dst, dstw, index)
703 #define WRITEBGR24OLD(dst, dstw, index) \
704 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
705 "movq %%mm2, %%mm1 \n\t" /* B */\
706 "movq %%mm5, %%mm6 \n\t" /* R */\
707 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
708 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
709 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
710 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
711 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
712 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
713 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
714 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
715 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
716 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
718 "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
719 "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
720 "pand "MANGLE(bm00000111)", %%mm4 \n\t" /* 00000RGB 0 */\
721 "pand "MANGLE(bm11111000)", %%mm0 \n\t" /* 00RGB000 0.5 */\
722 "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
723 "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
724 "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
725 "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
727 "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
728 "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
729 "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
730 "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
731 "pand "MANGLE(bm00001111)", %%mm2 \n\t" /* 0000RGBR 1 */\
732 "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
733 "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
734 "pand "MANGLE(bm00000111)", %%mm4 \n\t" /* 00000RGB 2 */\
735 "pand "MANGLE(bm11111000)", %%mm1 \n\t" /* 00RGB000 2.5 */\
736 "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
737 "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
738 "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
739 "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
741 "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
742 "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
743 "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
744 "pand "MANGLE(bm00000111)", %%mm5 \n\t" /* 00000RGB 3 */\
745 "pand "MANGLE(bm11111000)", %%mm3 \n\t" /* 00RGB000 3.5 */\
746 "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
747 "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
748 "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
750 MOVNTQ(%%mm0, (dst))\
751 MOVNTQ(%%mm2, 8(dst))\
752 MOVNTQ(%%mm3, 16(dst))\
753 "add $24, "#dst" \n\t"\
755 "add $8, "#index" \n\t"\
756 "cmp "#dstw", "#index" \n\t"\
759 #define WRITEBGR24MMX(dst, dstw, index) \
760 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
761 "movq %%mm2, %%mm1 \n\t" /* B */\
762 "movq %%mm5, %%mm6 \n\t" /* R */\
763 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
764 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
765 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
766 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
767 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
768 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
769 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
770 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
771 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
772 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
774 "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
775 "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
776 "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
777 "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
779 "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
780 "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
781 "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
782 "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
784 "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
785 "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
786 "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
787 "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
789 "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
790 "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
791 "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
792 "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
793 MOVNTQ(%%mm0, (dst))\
795 "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
796 "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
797 "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
798 "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
799 MOVNTQ(%%mm6, 8(dst))\
801 "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
802 "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
803 "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
804 MOVNTQ(%%mm5, 16(dst))\
806 "add $24, "#dst" \n\t"\
808 "add $8, "#index" \n\t"\
809 "cmp "#dstw", "#index" \n\t"\
812 #define WRITEBGR24MMX2(dst, dstw, index) \
813 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
814 "movq "MANGLE(ff_M24A)", %%mm0 \n\t"\
815 "movq "MANGLE(ff_M24C)", %%mm7 \n\t"\
816 "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
817 "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
818 "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
820 "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
821 "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
822 "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
824 "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
825 "por %%mm1, %%mm6 \n\t"\
826 "por %%mm3, %%mm6 \n\t"\
827 MOVNTQ(%%mm6, (dst))\
829 "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
830 "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
831 "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
832 "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
834 "pand "MANGLE(ff_M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
835 "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
836 "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
838 "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
839 "por %%mm3, %%mm6 \n\t"\
840 MOVNTQ(%%mm6, 8(dst))\
842 "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
843 "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
844 "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
846 "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
847 "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
848 "pand "MANGLE(ff_M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
850 "por %%mm1, %%mm3 \n\t"\
851 "por %%mm3, %%mm6 \n\t"\
852 MOVNTQ(%%mm6, 16(dst))\
854 "add $24, "#dst" \n\t"\
856 "add $8, "#index" \n\t"\
857 "cmp "#dstw", "#index" \n\t"\
860 #if COMPILE_TEMPLATE_MMX2
862 #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX2(dst, dstw, index)
865 #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX(dst, dstw, index)
868 #define REAL_WRITEYUY2(dst, dstw, index) \
869 "packuswb %%mm3, %%mm3 \n\t"\
870 "packuswb %%mm4, %%mm4 \n\t"\
871 "packuswb %%mm7, %%mm1 \n\t"\
872 "punpcklbw %%mm4, %%mm3 \n\t"\
873 "movq %%mm1, %%mm7 \n\t"\
874 "punpcklbw %%mm3, %%mm1 \n\t"\
875 "punpckhbw %%mm3, %%mm7 \n\t"\
877 MOVNTQ(%%mm1, (dst, index, 2))\
878 MOVNTQ(%%mm7, 8(dst, index, 2))\
880 "add $8, "#index" \n\t"\
881 "cmp "#dstw", "#index" \n\t"\
883 #define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index)
886 static inline void RENAME(yuv2yuvX)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
887 const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize, const int16_t **alpSrc,
888 uint8_t *dest, uint8_t *uDest, uint8_t *vDest, uint8_t *aDest, long dstW, long chrDstW)
890 if(!(c->flags & SWS_BITEXACT)) {
891 if (c->flags & SWS_ACCURATE_RND) {
893 YSCALEYUV2YV12X_ACCURATE( "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
894 YSCALEYUV2YV12X_ACCURATE(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
896 if (CONFIG_SWSCALE_ALPHA && aDest) {
897 YSCALEYUV2YV12X_ACCURATE( "0", ALP_MMX_FILTER_OFFSET, aDest, dstW)
900 YSCALEYUV2YV12X_ACCURATE("0", LUM_MMX_FILTER_OFFSET, dest, dstW)
903 YSCALEYUV2YV12X( "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
904 YSCALEYUV2YV12X(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
906 if (CONFIG_SWSCALE_ALPHA && aDest) {
907 YSCALEYUV2YV12X( "0", ALP_MMX_FILTER_OFFSET, aDest, dstW)
910 YSCALEYUV2YV12X("0", LUM_MMX_FILTER_OFFSET, dest, dstW)
914 yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize,
915 chrFilter, chrSrc, chrFilterSize,
916 alpSrc, dest, uDest, vDest, aDest, dstW, chrDstW);
919 static inline void RENAME(yuv2yuv1)(SwsContext *c, const int16_t *lumSrc, const int16_t *chrSrc, const int16_t *alpSrc,
920 uint8_t *dest, uint8_t *uDest, uint8_t *vDest, uint8_t *aDest, long dstW, long chrDstW)
922 if(!(c->flags & SWS_BITEXACT)) {
924 const uint8_t *src[4]= {alpSrc + dstW, lumSrc + dstW, chrSrc + chrDstW, chrSrc + VOFW + chrDstW};
925 uint8_t *dst[4]= {aDest, dest, uDest, vDest};
926 x86_reg counter[4]= {dstW, dstW, chrDstW, chrDstW};
928 if (c->flags & SWS_ACCURATE_RND) {
932 YSCALEYUV2YV121_ACCURATE
933 :: "r" (src[p]), "r" (dst[p] + counter[p]),
944 :: "r" (src[p]), "r" (dst[p] + counter[p]),
953 yuv2yuv1_c(c, lumSrc, chrSrc, alpSrc, dest, uDest, vDest, aDest,
959 * vertical scale YV12 to RGB
961 static inline void RENAME(yuv2packedX)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
962 const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
963 const int16_t **alpSrc, uint8_t *dest, long dstW, long dstY)
966 x86_reg dstW_reg = dstW;
967 if(!(c->flags & SWS_BITEXACT)) {
968 if (c->flags & SWS_ACCURATE_RND) {
969 switch(c->dstFormat) {
971 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
972 YSCALEYUV2PACKEDX_ACCURATE
974 "movq %%mm2, "U_TEMP"(%0) \n\t"
975 "movq %%mm4, "V_TEMP"(%0) \n\t"
976 "movq %%mm5, "Y_TEMP"(%0) \n\t"
977 YSCALEYUV2PACKEDX_ACCURATE_YA(ALP_MMX_FILTER_OFFSET)
978 "movq "Y_TEMP"(%0), %%mm5 \n\t"
979 "psraw $3, %%mm1 \n\t"
980 "psraw $3, %%mm7 \n\t"
981 "packuswb %%mm7, %%mm1 \n\t"
982 WRITEBGR32(%4, %5, %%REGa, %%mm3, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm2, %%mm6)
984 YSCALEYUV2PACKEDX_END
986 YSCALEYUV2PACKEDX_ACCURATE
988 "pcmpeqd %%mm7, %%mm7 \n\t"
989 WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
991 YSCALEYUV2PACKEDX_END
995 YSCALEYUV2PACKEDX_ACCURATE
997 "pxor %%mm7, %%mm7 \n\t"
998 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"\n\t" //FIXME optimize
999 "add %4, %%"REG_c" \n\t"
1000 WRITEBGR24(%%REGc, %5, %%REGa)
1003 :: "r" (&c->redDither),
1004 "m" (dummy), "m" (dummy), "m" (dummy),
1005 "r" (dest), "m" (dstW_reg)
1006 : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
1009 case PIX_FMT_RGB555:
1010 YSCALEYUV2PACKEDX_ACCURATE
1012 "pxor %%mm7, %%mm7 \n\t"
1013 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1015 "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
1016 "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
1017 "paddusb "RED_DITHER"(%0), %%mm5\n\t"
1020 WRITERGB15(%4, %5, %%REGa)
1021 YSCALEYUV2PACKEDX_END
1023 case PIX_FMT_RGB565:
1024 YSCALEYUV2PACKEDX_ACCURATE
1026 "pxor %%mm7, %%mm7 \n\t"
1027 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1029 "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
1030 "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
1031 "paddusb "RED_DITHER"(%0), %%mm5\n\t"
1034 WRITERGB16(%4, %5, %%REGa)
1035 YSCALEYUV2PACKEDX_END
1037 case PIX_FMT_YUYV422:
1038 YSCALEYUV2PACKEDX_ACCURATE
1039 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1041 "psraw $3, %%mm3 \n\t"
1042 "psraw $3, %%mm4 \n\t"
1043 "psraw $3, %%mm1 \n\t"
1044 "psraw $3, %%mm7 \n\t"
1045 WRITEYUY2(%4, %5, %%REGa)
1046 YSCALEYUV2PACKEDX_END
1050 switch(c->dstFormat) {
1052 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
1055 YSCALEYUV2PACKEDX_YA(ALP_MMX_FILTER_OFFSET, %%mm0, %%mm3, %%mm6, %%mm1, %%mm7)
1056 "psraw $3, %%mm1 \n\t"
1057 "psraw $3, %%mm7 \n\t"
1058 "packuswb %%mm7, %%mm1 \n\t"
1059 WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
1060 YSCALEYUV2PACKEDX_END
1064 "pcmpeqd %%mm7, %%mm7 \n\t"
1065 WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1066 YSCALEYUV2PACKEDX_END
1072 "pxor %%mm7, %%mm7 \n\t"
1073 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c" \n\t" //FIXME optimize
1074 "add %4, %%"REG_c" \n\t"
1075 WRITEBGR24(%%REGc, %5, %%REGa)
1077 :: "r" (&c->redDither),
1078 "m" (dummy), "m" (dummy), "m" (dummy),
1079 "r" (dest), "m" (dstW_reg)
1080 : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
1083 case PIX_FMT_RGB555:
1086 "pxor %%mm7, %%mm7 \n\t"
1087 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1089 "paddusb "BLUE_DITHER"(%0), %%mm2 \n\t"
1090 "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t"
1091 "paddusb "RED_DITHER"(%0), %%mm5 \n\t"
1094 WRITERGB15(%4, %5, %%REGa)
1095 YSCALEYUV2PACKEDX_END
1097 case PIX_FMT_RGB565:
1100 "pxor %%mm7, %%mm7 \n\t"
1101 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1103 "paddusb "BLUE_DITHER"(%0), %%mm2 \n\t"
1104 "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t"
1105 "paddusb "RED_DITHER"(%0), %%mm5 \n\t"
1108 WRITERGB16(%4, %5, %%REGa)
1109 YSCALEYUV2PACKEDX_END
1111 case PIX_FMT_YUYV422:
1113 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1115 "psraw $3, %%mm3 \n\t"
1116 "psraw $3, %%mm4 \n\t"
1117 "psraw $3, %%mm1 \n\t"
1118 "psraw $3, %%mm7 \n\t"
1119 WRITEYUY2(%4, %5, %%REGa)
1120 YSCALEYUV2PACKEDX_END
1125 yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize,
1126 chrFilter, chrSrc, chrFilterSize,
1127 alpSrc, dest, dstW, dstY);
1131 * vertical bilinear scale YV12 to RGB
1133 static inline void RENAME(yuv2packed2)(SwsContext *c, const uint16_t *buf0, const uint16_t *buf1, const uint16_t *uvbuf0, const uint16_t *uvbuf1,
1134 const uint16_t *abuf0, const uint16_t *abuf1, uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
1136 if(!(c->flags & SWS_BITEXACT)) {
1137 switch(c->dstFormat) {
1138 //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
1140 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
1143 YSCALEYUV2RGB(%%r8, %5)
1144 YSCALEYUV2RGB_YA(%%r8, %5, %6, %7)
1145 "psraw $3, %%mm1 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
1146 "psraw $3, %%mm7 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
1147 "packuswb %%mm7, %%mm1 \n\t"
1148 WRITEBGR32(%4, 8280(%5), %%r8, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
1150 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "r" (dest),
1152 ,"r" (abuf0), "r" (abuf1)
1156 *(const uint16_t **)(&c->u_temp)=abuf0;
1157 *(const uint16_t **)(&c->v_temp)=abuf1;
1159 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1160 "mov %4, %%"REG_b" \n\t"
1161 "push %%"REG_BP" \n\t"
1162 YSCALEYUV2RGB(%%REGBP, %5)
1165 "mov "U_TEMP"(%5), %0 \n\t"
1166 "mov "V_TEMP"(%5), %1 \n\t"
1167 YSCALEYUV2RGB_YA(%%REGBP, %5, %0, %1)
1168 "psraw $3, %%mm1 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
1169 "psraw $3, %%mm7 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
1170 "packuswb %%mm7, %%mm1 \n\t"
1173 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
1174 "pop %%"REG_BP" \n\t"
1175 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1177 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1183 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1184 "mov %4, %%"REG_b" \n\t"
1185 "push %%"REG_BP" \n\t"
1186 YSCALEYUV2RGB(%%REGBP, %5)
1187 "pcmpeqd %%mm7, %%mm7 \n\t"
1188 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1189 "pop %%"REG_BP" \n\t"
1190 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1192 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1199 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1200 "mov %4, %%"REG_b" \n\t"
1201 "push %%"REG_BP" \n\t"
1202 YSCALEYUV2RGB(%%REGBP, %5)
1203 "pxor %%mm7, %%mm7 \n\t"
1204 WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1205 "pop %%"REG_BP" \n\t"
1206 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1207 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1211 case PIX_FMT_RGB555:
1213 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1214 "mov %4, %%"REG_b" \n\t"
1215 "push %%"REG_BP" \n\t"
1216 YSCALEYUV2RGB(%%REGBP, %5)
1217 "pxor %%mm7, %%mm7 \n\t"
1218 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1220 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1221 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1222 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1225 WRITERGB15(%%REGb, 8280(%5), %%REGBP)
1226 "pop %%"REG_BP" \n\t"
1227 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1229 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1233 case PIX_FMT_RGB565:
1235 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1236 "mov %4, %%"REG_b" \n\t"
1237 "push %%"REG_BP" \n\t"
1238 YSCALEYUV2RGB(%%REGBP, %5)
1239 "pxor %%mm7, %%mm7 \n\t"
1240 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1242 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1243 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1244 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1247 WRITERGB16(%%REGb, 8280(%5), %%REGBP)
1248 "pop %%"REG_BP" \n\t"
1249 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1250 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1254 case PIX_FMT_YUYV422:
1256 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1257 "mov %4, %%"REG_b" \n\t"
1258 "push %%"REG_BP" \n\t"
1259 YSCALEYUV2PACKED(%%REGBP, %5)
1260 WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1261 "pop %%"REG_BP" \n\t"
1262 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1263 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1270 yuv2packed2_c(c, buf0, buf1, uvbuf0, uvbuf1, abuf0, abuf1,
1271 dest, dstW, yalpha, uvalpha, y);
1275 * YV12 to RGB without scaling or interpolating
1277 static inline void RENAME(yuv2packed1)(SwsContext *c, const uint16_t *buf0, const uint16_t *uvbuf0, const uint16_t *uvbuf1,
1278 const uint16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, enum PixelFormat dstFormat, int flags, int y)
1280 if(!(flags & SWS_BITEXACT)) {
1281 const uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
1283 if (flags&SWS_FULL_CHR_H_INT) {
1284 c->yuv2packed2(c, buf0, buf0, uvbuf0, uvbuf1, abuf0, abuf0, dest, dstW, 0, uvalpha, y);
1288 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
1291 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
1293 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1294 "mov %4, %%"REG_b" \n\t"
1295 "push %%"REG_BP" \n\t"
1296 YSCALEYUV2RGB1(%%REGBP, %5)
1297 YSCALEYUV2RGB1_ALPHA(%%REGBP)
1298 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1299 "pop %%"REG_BP" \n\t"
1300 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1302 :: "c" (buf0), "d" (abuf0), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1307 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1308 "mov %4, %%"REG_b" \n\t"
1309 "push %%"REG_BP" \n\t"
1310 YSCALEYUV2RGB1(%%REGBP, %5)
1311 "pcmpeqd %%mm7, %%mm7 \n\t"
1312 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1313 "pop %%"REG_BP" \n\t"
1314 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1316 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1323 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1324 "mov %4, %%"REG_b" \n\t"
1325 "push %%"REG_BP" \n\t"
1326 YSCALEYUV2RGB1(%%REGBP, %5)
1327 "pxor %%mm7, %%mm7 \n\t"
1328 WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1329 "pop %%"REG_BP" \n\t"
1330 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1332 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1336 case PIX_FMT_RGB555:
1338 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1339 "mov %4, %%"REG_b" \n\t"
1340 "push %%"REG_BP" \n\t"
1341 YSCALEYUV2RGB1(%%REGBP, %5)
1342 "pxor %%mm7, %%mm7 \n\t"
1343 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1345 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1346 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1347 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1349 WRITERGB15(%%REGb, 8280(%5), %%REGBP)
1350 "pop %%"REG_BP" \n\t"
1351 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1353 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1357 case PIX_FMT_RGB565:
1359 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1360 "mov %4, %%"REG_b" \n\t"
1361 "push %%"REG_BP" \n\t"
1362 YSCALEYUV2RGB1(%%REGBP, %5)
1363 "pxor %%mm7, %%mm7 \n\t"
1364 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1366 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1367 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1368 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1371 WRITERGB16(%%REGb, 8280(%5), %%REGBP)
1372 "pop %%"REG_BP" \n\t"
1373 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1375 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1379 case PIX_FMT_YUYV422:
1381 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1382 "mov %4, %%"REG_b" \n\t"
1383 "push %%"REG_BP" \n\t"
1384 YSCALEYUV2PACKED1(%%REGBP, %5)
1385 WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1386 "pop %%"REG_BP" \n\t"
1387 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1389 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1397 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
1399 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1400 "mov %4, %%"REG_b" \n\t"
1401 "push %%"REG_BP" \n\t"
1402 YSCALEYUV2RGB1b(%%REGBP, %5)
1403 YSCALEYUV2RGB1_ALPHA(%%REGBP)
1404 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1405 "pop %%"REG_BP" \n\t"
1406 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1408 :: "c" (buf0), "d" (abuf0), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1413 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1414 "mov %4, %%"REG_b" \n\t"
1415 "push %%"REG_BP" \n\t"
1416 YSCALEYUV2RGB1b(%%REGBP, %5)
1417 "pcmpeqd %%mm7, %%mm7 \n\t"
1418 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1419 "pop %%"REG_BP" \n\t"
1420 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1422 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1429 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1430 "mov %4, %%"REG_b" \n\t"
1431 "push %%"REG_BP" \n\t"
1432 YSCALEYUV2RGB1b(%%REGBP, %5)
1433 "pxor %%mm7, %%mm7 \n\t"
1434 WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1435 "pop %%"REG_BP" \n\t"
1436 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1438 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1442 case PIX_FMT_RGB555:
1444 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1445 "mov %4, %%"REG_b" \n\t"
1446 "push %%"REG_BP" \n\t"
1447 YSCALEYUV2RGB1b(%%REGBP, %5)
1448 "pxor %%mm7, %%mm7 \n\t"
1449 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1451 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1452 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1453 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1455 WRITERGB15(%%REGb, 8280(%5), %%REGBP)
1456 "pop %%"REG_BP" \n\t"
1457 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1459 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1463 case PIX_FMT_RGB565:
1465 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1466 "mov %4, %%"REG_b" \n\t"
1467 "push %%"REG_BP" \n\t"
1468 YSCALEYUV2RGB1b(%%REGBP, %5)
1469 "pxor %%mm7, %%mm7 \n\t"
1470 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1472 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1473 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1474 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1477 WRITERGB16(%%REGb, 8280(%5), %%REGBP)
1478 "pop %%"REG_BP" \n\t"
1479 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1481 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1485 case PIX_FMT_YUYV422:
1487 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1488 "mov %4, %%"REG_b" \n\t"
1489 "push %%"REG_BP" \n\t"
1490 YSCALEYUV2PACKED1b(%%REGBP, %5)
1491 WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1492 "pop %%"REG_BP" \n\t"
1493 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1495 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1502 yuv2packed1_c(c, buf0, uvbuf0, uvbuf1, abuf0, dest,
1503 dstW, uvalpha, dstFormat, flags, y);
1506 //FIXME yuy2* can read up to 7 samples too much
1508 static inline void RENAME(yuy2ToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
1511 "movq "MANGLE(bm01010101)", %%mm2 \n\t"
1512 "mov %0, %%"REG_a" \n\t"
1514 "movq (%1, %%"REG_a",2), %%mm0 \n\t"
1515 "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
1516 "pand %%mm2, %%mm0 \n\t"
1517 "pand %%mm2, %%mm1 \n\t"
1518 "packuswb %%mm1, %%mm0 \n\t"
1519 "movq %%mm0, (%2, %%"REG_a") \n\t"
1520 "add $8, %%"REG_a" \n\t"
1522 : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst+width)
1527 static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
1530 "movq "MANGLE(bm01010101)", %%mm4 \n\t"
1531 "mov %0, %%"REG_a" \n\t"
1533 "movq (%1, %%"REG_a",4), %%mm0 \n\t"
1534 "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
1535 "psrlw $8, %%mm0 \n\t"
1536 "psrlw $8, %%mm1 \n\t"
1537 "packuswb %%mm1, %%mm0 \n\t"
1538 "movq %%mm0, %%mm1 \n\t"
1539 "psrlw $8, %%mm0 \n\t"
1540 "pand %%mm4, %%mm1 \n\t"
1541 "packuswb %%mm0, %%mm0 \n\t"
1542 "packuswb %%mm1, %%mm1 \n\t"
1543 "movd %%mm0, (%3, %%"REG_a") \n\t"
1544 "movd %%mm1, (%2, %%"REG_a") \n\t"
1545 "add $4, %%"REG_a" \n\t"
1547 : : "g" ((x86_reg)-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
1550 assert(src1 == src2);
1553 static inline void RENAME(LEToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
1556 "mov %0, %%"REG_a" \n\t"
1558 "movq (%1, %%"REG_a",2), %%mm0 \n\t"
1559 "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
1560 "movq (%2, %%"REG_a",2), %%mm2 \n\t"
1561 "movq 8(%2, %%"REG_a",2), %%mm3 \n\t"
1562 "psrlw $8, %%mm0 \n\t"
1563 "psrlw $8, %%mm1 \n\t"
1564 "psrlw $8, %%mm2 \n\t"
1565 "psrlw $8, %%mm3 \n\t"
1566 "packuswb %%mm1, %%mm0 \n\t"
1567 "packuswb %%mm3, %%mm2 \n\t"
1568 "movq %%mm0, (%3, %%"REG_a") \n\t"
1569 "movq %%mm2, (%4, %%"REG_a") \n\t"
1570 "add $8, %%"REG_a" \n\t"
1572 : : "g" ((x86_reg)-width), "r" (src1+width*2), "r" (src2+width*2), "r" (dstU+width), "r" (dstV+width)
1577 /* This is almost identical to the previous, end exists only because
1578 * yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses. */
1579 static inline void RENAME(uyvyToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
1582 "mov %0, %%"REG_a" \n\t"
1584 "movq (%1, %%"REG_a",2), %%mm0 \n\t"
1585 "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
1586 "psrlw $8, %%mm0 \n\t"
1587 "psrlw $8, %%mm1 \n\t"
1588 "packuswb %%mm1, %%mm0 \n\t"
1589 "movq %%mm0, (%2, %%"REG_a") \n\t"
1590 "add $8, %%"REG_a" \n\t"
1592 : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst+width)
1597 static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
1600 "movq "MANGLE(bm01010101)", %%mm4 \n\t"
1601 "mov %0, %%"REG_a" \n\t"
1603 "movq (%1, %%"REG_a",4), %%mm0 \n\t"
1604 "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
1605 "pand %%mm4, %%mm0 \n\t"
1606 "pand %%mm4, %%mm1 \n\t"
1607 "packuswb %%mm1, %%mm0 \n\t"
1608 "movq %%mm0, %%mm1 \n\t"
1609 "psrlw $8, %%mm0 \n\t"
1610 "pand %%mm4, %%mm1 \n\t"
1611 "packuswb %%mm0, %%mm0 \n\t"
1612 "packuswb %%mm1, %%mm1 \n\t"
1613 "movd %%mm0, (%3, %%"REG_a") \n\t"
1614 "movd %%mm1, (%2, %%"REG_a") \n\t"
1615 "add $4, %%"REG_a" \n\t"
1617 : : "g" ((x86_reg)-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
1620 assert(src1 == src2);
1623 static inline void RENAME(BEToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
1626 "movq "MANGLE(bm01010101)", %%mm4 \n\t"
1627 "mov %0, %%"REG_a" \n\t"
1629 "movq (%1, %%"REG_a",2), %%mm0 \n\t"
1630 "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
1631 "movq (%2, %%"REG_a",2), %%mm2 \n\t"
1632 "movq 8(%2, %%"REG_a",2), %%mm3 \n\t"
1633 "pand %%mm4, %%mm0 \n\t"
1634 "pand %%mm4, %%mm1 \n\t"
1635 "pand %%mm4, %%mm2 \n\t"
1636 "pand %%mm4, %%mm3 \n\t"
1637 "packuswb %%mm1, %%mm0 \n\t"
1638 "packuswb %%mm3, %%mm2 \n\t"
1639 "movq %%mm0, (%3, %%"REG_a") \n\t"
1640 "movq %%mm2, (%4, %%"REG_a") \n\t"
1641 "add $8, %%"REG_a" \n\t"
1643 : : "g" ((x86_reg)-width), "r" (src1+width*2), "r" (src2+width*2), "r" (dstU+width), "r" (dstV+width)
1648 static inline void RENAME(nvXXtoUV)(uint8_t *dst1, uint8_t *dst2,
1649 const uint8_t *src, long width)
1652 "movq "MANGLE(bm01010101)", %%mm4 \n\t"
1653 "mov %0, %%"REG_a" \n\t"
1655 "movq (%1, %%"REG_a",2), %%mm0 \n\t"
1656 "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
1657 "movq %%mm0, %%mm2 \n\t"
1658 "movq %%mm1, %%mm3 \n\t"
1659 "pand %%mm4, %%mm0 \n\t"
1660 "pand %%mm4, %%mm1 \n\t"
1661 "psrlw $8, %%mm2 \n\t"
1662 "psrlw $8, %%mm3 \n\t"
1663 "packuswb %%mm1, %%mm0 \n\t"
1664 "packuswb %%mm3, %%mm2 \n\t"
1665 "movq %%mm0, (%2, %%"REG_a") \n\t"
1666 "movq %%mm2, (%3, %%"REG_a") \n\t"
1667 "add $8, %%"REG_a" \n\t"
1669 : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst1+width), "r" (dst2+width)
1674 static inline void RENAME(nv12ToUV)(uint8_t *dstU, uint8_t *dstV,
1675 const uint8_t *src1, const uint8_t *src2,
1676 long width, uint32_t *unused)
1678 RENAME(nvXXtoUV)(dstU, dstV, src1, width);
1681 static inline void RENAME(nv21ToUV)(uint8_t *dstU, uint8_t *dstV,
1682 const uint8_t *src1, const uint8_t *src2,
1683 long width, uint32_t *unused)
1685 RENAME(nvXXtoUV)(dstV, dstU, src1, width);
1688 static inline void RENAME(bgr24ToY_mmx)(uint8_t *dst, const uint8_t *src, long width, enum PixelFormat srcFormat)
1691 if(srcFormat == PIX_FMT_BGR24) {
1693 "movq "MANGLE(ff_bgr24toY1Coeff)", %%mm5 \n\t"
1694 "movq "MANGLE(ff_bgr24toY2Coeff)", %%mm6 \n\t"
1699 "movq "MANGLE(ff_rgb24toY1Coeff)", %%mm5 \n\t"
1700 "movq "MANGLE(ff_rgb24toY2Coeff)", %%mm6 \n\t"
1706 "movq "MANGLE(ff_bgr24toYOffset)", %%mm4 \n\t"
1707 "mov %2, %%"REG_a" \n\t"
1708 "pxor %%mm7, %%mm7 \n\t"
1710 PREFETCH" 64(%0) \n\t"
1711 "movd (%0), %%mm0 \n\t"
1712 "movd 2(%0), %%mm1 \n\t"
1713 "movd 6(%0), %%mm2 \n\t"
1714 "movd 8(%0), %%mm3 \n\t"
1716 "punpcklbw %%mm7, %%mm0 \n\t"
1717 "punpcklbw %%mm7, %%mm1 \n\t"
1718 "punpcklbw %%mm7, %%mm2 \n\t"
1719 "punpcklbw %%mm7, %%mm3 \n\t"
1720 "pmaddwd %%mm5, %%mm0 \n\t"
1721 "pmaddwd %%mm6, %%mm1 \n\t"
1722 "pmaddwd %%mm5, %%mm2 \n\t"
1723 "pmaddwd %%mm6, %%mm3 \n\t"
1724 "paddd %%mm1, %%mm0 \n\t"
1725 "paddd %%mm3, %%mm2 \n\t"
1726 "paddd %%mm4, %%mm0 \n\t"
1727 "paddd %%mm4, %%mm2 \n\t"
1728 "psrad $15, %%mm0 \n\t"
1729 "psrad $15, %%mm2 \n\t"
1730 "packssdw %%mm2, %%mm0 \n\t"
1731 "packuswb %%mm0, %%mm0 \n\t"
1732 "movd %%mm0, (%1, %%"REG_a") \n\t"
1733 "add $4, %%"REG_a" \n\t"
1736 : "r" (dst+width), "g" ((x86_reg)-width)
1741 static inline void RENAME(bgr24ToUV_mmx)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src, long width, enum PixelFormat srcFormat)
1744 "movq 24(%4), %%mm6 \n\t"
1745 "mov %3, %%"REG_a" \n\t"
1746 "pxor %%mm7, %%mm7 \n\t"
1748 PREFETCH" 64(%0) \n\t"
1749 "movd (%0), %%mm0 \n\t"
1750 "movd 2(%0), %%mm1 \n\t"
1751 "punpcklbw %%mm7, %%mm0 \n\t"
1752 "punpcklbw %%mm7, %%mm1 \n\t"
1753 "movq %%mm0, %%mm2 \n\t"
1754 "movq %%mm1, %%mm3 \n\t"
1755 "pmaddwd (%4), %%mm0 \n\t"
1756 "pmaddwd 8(%4), %%mm1 \n\t"
1757 "pmaddwd 16(%4), %%mm2 \n\t"
1758 "pmaddwd %%mm6, %%mm3 \n\t"
1759 "paddd %%mm1, %%mm0 \n\t"
1760 "paddd %%mm3, %%mm2 \n\t"
1762 "movd 6(%0), %%mm1 \n\t"
1763 "movd 8(%0), %%mm3 \n\t"
1765 "punpcklbw %%mm7, %%mm1 \n\t"
1766 "punpcklbw %%mm7, %%mm3 \n\t"
1767 "movq %%mm1, %%mm4 \n\t"
1768 "movq %%mm3, %%mm5 \n\t"
1769 "pmaddwd (%4), %%mm1 \n\t"
1770 "pmaddwd 8(%4), %%mm3 \n\t"
1771 "pmaddwd 16(%4), %%mm4 \n\t"
1772 "pmaddwd %%mm6, %%mm5 \n\t"
1773 "paddd %%mm3, %%mm1 \n\t"
1774 "paddd %%mm5, %%mm4 \n\t"
1776 "movq "MANGLE(ff_bgr24toUVOffset)", %%mm3 \n\t"
1777 "paddd %%mm3, %%mm0 \n\t"
1778 "paddd %%mm3, %%mm2 \n\t"
1779 "paddd %%mm3, %%mm1 \n\t"
1780 "paddd %%mm3, %%mm4 \n\t"
1781 "psrad $15, %%mm0 \n\t"
1782 "psrad $15, %%mm2 \n\t"
1783 "psrad $15, %%mm1 \n\t"
1784 "psrad $15, %%mm4 \n\t"
1785 "packssdw %%mm1, %%mm0 \n\t"
1786 "packssdw %%mm4, %%mm2 \n\t"
1787 "packuswb %%mm0, %%mm0 \n\t"
1788 "packuswb %%mm2, %%mm2 \n\t"
1789 "movd %%mm0, (%1, %%"REG_a") \n\t"
1790 "movd %%mm2, (%2, %%"REG_a") \n\t"
1791 "add $4, %%"REG_a" \n\t"
1794 : "r" (dstU+width), "r" (dstV+width), "g" ((x86_reg)-width), "r"(ff_bgr24toUV[srcFormat == PIX_FMT_RGB24])
1799 static inline void RENAME(bgr24ToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
1801 RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_BGR24);
1804 static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
1806 RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_BGR24);
1807 assert(src1 == src2);
1810 static inline void RENAME(rgb24ToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
1812 RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_RGB24);
1815 static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
1818 RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_RGB24);
1822 // bilinear / bicubic scaling
1823 static inline void RENAME(hScale)(int16_t *dst, int dstW, const uint8_t *src, int srcW, int xInc,
1824 const int16_t *filter, const int16_t *filterPos, long filterSize)
1826 assert(filterSize % 4 == 0 && filterSize>0);
1827 if (filterSize==4) { // Always true for upscaling, sometimes for down, too.
1828 x86_reg counter= -2*dstW;
1830 filterPos-= counter/2;
1834 "push %%"REG_b" \n\t"
1836 "pxor %%mm7, %%mm7 \n\t"
1837 "push %%"REG_BP" \n\t" // we use 7 regs here ...
1838 "mov %%"REG_a", %%"REG_BP" \n\t"
1841 "movzwl (%2, %%"REG_BP"), %%eax \n\t"
1842 "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
1843 "movq (%1, %%"REG_BP", 4), %%mm1 \n\t"
1844 "movq 8(%1, %%"REG_BP", 4), %%mm3 \n\t"
1845 "movd (%3, %%"REG_a"), %%mm0 \n\t"
1846 "movd (%3, %%"REG_b"), %%mm2 \n\t"
1847 "punpcklbw %%mm7, %%mm0 \n\t"
1848 "punpcklbw %%mm7, %%mm2 \n\t"
1849 "pmaddwd %%mm1, %%mm0 \n\t"
1850 "pmaddwd %%mm2, %%mm3 \n\t"
1851 "movq %%mm0, %%mm4 \n\t"
1852 "punpckldq %%mm3, %%mm0 \n\t"
1853 "punpckhdq %%mm3, %%mm4 \n\t"
1854 "paddd %%mm4, %%mm0 \n\t"
1855 "psrad $7, %%mm0 \n\t"
1856 "packssdw %%mm0, %%mm0 \n\t"
1857 "movd %%mm0, (%4, %%"REG_BP") \n\t"
1858 "add $4, %%"REG_BP" \n\t"
1861 "pop %%"REG_BP" \n\t"
1863 "pop %%"REG_b" \n\t"
1866 : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
1871 } else if (filterSize==8) {
1872 x86_reg counter= -2*dstW;
1874 filterPos-= counter/2;
1878 "push %%"REG_b" \n\t"
1880 "pxor %%mm7, %%mm7 \n\t"
1881 "push %%"REG_BP" \n\t" // we use 7 regs here ...
1882 "mov %%"REG_a", %%"REG_BP" \n\t"
1885 "movzwl (%2, %%"REG_BP"), %%eax \n\t"
1886 "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
1887 "movq (%1, %%"REG_BP", 8), %%mm1 \n\t"
1888 "movq 16(%1, %%"REG_BP", 8), %%mm3 \n\t"
1889 "movd (%3, %%"REG_a"), %%mm0 \n\t"
1890 "movd (%3, %%"REG_b"), %%mm2 \n\t"
1891 "punpcklbw %%mm7, %%mm0 \n\t"
1892 "punpcklbw %%mm7, %%mm2 \n\t"
1893 "pmaddwd %%mm1, %%mm0 \n\t"
1894 "pmaddwd %%mm2, %%mm3 \n\t"
1896 "movq 8(%1, %%"REG_BP", 8), %%mm1 \n\t"
1897 "movq 24(%1, %%"REG_BP", 8), %%mm5 \n\t"
1898 "movd 4(%3, %%"REG_a"), %%mm4 \n\t"
1899 "movd 4(%3, %%"REG_b"), %%mm2 \n\t"
1900 "punpcklbw %%mm7, %%mm4 \n\t"
1901 "punpcklbw %%mm7, %%mm2 \n\t"
1902 "pmaddwd %%mm1, %%mm4 \n\t"
1903 "pmaddwd %%mm2, %%mm5 \n\t"
1904 "paddd %%mm4, %%mm0 \n\t"
1905 "paddd %%mm5, %%mm3 \n\t"
1906 "movq %%mm0, %%mm4 \n\t"
1907 "punpckldq %%mm3, %%mm0 \n\t"
1908 "punpckhdq %%mm3, %%mm4 \n\t"
1909 "paddd %%mm4, %%mm0 \n\t"
1910 "psrad $7, %%mm0 \n\t"
1911 "packssdw %%mm0, %%mm0 \n\t"
1912 "movd %%mm0, (%4, %%"REG_BP") \n\t"
1913 "add $4, %%"REG_BP" \n\t"
1916 "pop %%"REG_BP" \n\t"
1918 "pop %%"REG_b" \n\t"
1921 : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
1927 const uint8_t *offset = src+filterSize;
1928 x86_reg counter= -2*dstW;
1929 //filter-= counter*filterSize/2;
1930 filterPos-= counter/2;
1933 "pxor %%mm7, %%mm7 \n\t"
1936 "mov %2, %%"REG_c" \n\t"
1937 "movzwl (%%"REG_c", %0), %%eax \n\t"
1938 "movzwl 2(%%"REG_c", %0), %%edx \n\t"
1939 "mov %5, %%"REG_c" \n\t"
1940 "pxor %%mm4, %%mm4 \n\t"
1941 "pxor %%mm5, %%mm5 \n\t"
1943 "movq (%1), %%mm1 \n\t"
1944 "movq (%1, %6), %%mm3 \n\t"
1945 "movd (%%"REG_c", %%"REG_a"), %%mm0 \n\t"
1946 "movd (%%"REG_c", %%"REG_d"), %%mm2 \n\t"
1947 "punpcklbw %%mm7, %%mm0 \n\t"
1948 "punpcklbw %%mm7, %%mm2 \n\t"
1949 "pmaddwd %%mm1, %%mm0 \n\t"
1950 "pmaddwd %%mm2, %%mm3 \n\t"
1951 "paddd %%mm3, %%mm5 \n\t"
1952 "paddd %%mm0, %%mm4 \n\t"
1954 "add $4, %%"REG_c" \n\t"
1955 "cmp %4, %%"REG_c" \n\t"
1958 "movq %%mm4, %%mm0 \n\t"
1959 "punpckldq %%mm5, %%mm4 \n\t"
1960 "punpckhdq %%mm5, %%mm0 \n\t"
1961 "paddd %%mm0, %%mm4 \n\t"
1962 "psrad $7, %%mm4 \n\t"
1963 "packssdw %%mm4, %%mm4 \n\t"
1964 "mov %3, %%"REG_a" \n\t"
1965 "movd %%mm4, (%%"REG_a", %0) \n\t"
1969 : "+r" (counter), "+r" (filter)
1970 : "m" (filterPos), "m" (dst), "m"(offset),
1971 "m" (src), "r" ((x86_reg)filterSize*2)
1972 : "%"REG_a, "%"REG_c, "%"REG_d
1977 #define FAST_BILINEAR_X86 \
1978 "subl %%edi, %%esi \n\t" /* src[xx+1] - src[xx] */ \
1979 "imull %%ecx, %%esi \n\t" /* (src[xx+1] - src[xx])*xalpha */ \
1980 "shll $16, %%edi \n\t" \
1981 "addl %%edi, %%esi \n\t" /* src[xx+1]*xalpha + src[xx]*(1-xalpha) */ \
1982 "mov %1, %%"REG_D"\n\t" \
1983 "shrl $9, %%esi \n\t" \
1985 static inline void RENAME(hyscale_fast)(SwsContext *c, int16_t *dst,
1986 long dstWidth, const uint8_t *src, int srcW,
1989 #if COMPILE_TEMPLATE_MMX2
1990 int32_t *filterPos = c->hLumFilterPos;
1991 int16_t *filter = c->hLumFilter;
1992 int canMMX2BeUsed = c->canMMX2BeUsed;
1993 void *mmx2FilterCode= c->lumMmx2FilterCode;
1996 DECLARE_ALIGNED(8, uint64_t, ebxsave);
1998 if (canMMX2BeUsed) {
2001 "mov %%"REG_b", %5 \n\t"
2003 "pxor %%mm7, %%mm7 \n\t"
2004 "mov %0, %%"REG_c" \n\t"
2005 "mov %1, %%"REG_D" \n\t"
2006 "mov %2, %%"REG_d" \n\t"
2007 "mov %3, %%"REG_b" \n\t"
2008 "xor %%"REG_a", %%"REG_a" \n\t" // i
2009 PREFETCH" (%%"REG_c") \n\t"
2010 PREFETCH" 32(%%"REG_c") \n\t"
2011 PREFETCH" 64(%%"REG_c") \n\t"
2015 #define CALL_MMX2_FILTER_CODE \
2016 "movl (%%"REG_b"), %%esi \n\t"\
2018 "movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\
2019 "add %%"REG_S", %%"REG_c" \n\t"\
2020 "add %%"REG_a", %%"REG_D" \n\t"\
2021 "xor %%"REG_a", %%"REG_a" \n\t"\
2025 #define CALL_MMX2_FILTER_CODE \
2026 "movl (%%"REG_b"), %%esi \n\t"\
2028 "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
2029 "add %%"REG_a", %%"REG_D" \n\t"\
2030 "xor %%"REG_a", %%"REG_a" \n\t"\
2032 #endif /* ARCH_X86_64 */
2034 CALL_MMX2_FILTER_CODE
2035 CALL_MMX2_FILTER_CODE
2036 CALL_MMX2_FILTER_CODE
2037 CALL_MMX2_FILTER_CODE
2038 CALL_MMX2_FILTER_CODE
2039 CALL_MMX2_FILTER_CODE
2040 CALL_MMX2_FILTER_CODE
2041 CALL_MMX2_FILTER_CODE
2044 "mov %5, %%"REG_b" \n\t"
2046 :: "m" (src), "m" (dst), "m" (filter), "m" (filterPos),
2047 "m" (mmx2FilterCode)
2051 : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
2056 for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
2058 #endif /* COMPILE_TEMPLATE_MMX2 */
2059 x86_reg xInc_shr16 = xInc >> 16;
2060 uint16_t xInc_mask = xInc & 0xffff;
2061 x86_reg dstWidth_reg = dstWidth;
2062 //NO MMX just normal asm ...
2064 "xor %%"REG_a", %%"REG_a" \n\t" // i
2065 "xor %%"REG_d", %%"REG_d" \n\t" // xx
2066 "xorl %%ecx, %%ecx \n\t" // xalpha
2069 "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx]
2070 "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
2072 "movw %%si, (%%"REG_D", %%"REG_a", 2) \n\t"
2073 "addw %4, %%cx \n\t" //xalpha += xInc&0xFFFF
2074 "adc %3, %%"REG_d" \n\t" //xx+= xInc>>16 + carry
2076 "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx]
2077 "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
2079 "movw %%si, 2(%%"REG_D", %%"REG_a", 2) \n\t"
2080 "addw %4, %%cx \n\t" //xalpha += xInc&0xFFFF
2081 "adc %3, %%"REG_d" \n\t" //xx+= xInc>>16 + carry
2084 "add $2, %%"REG_a" \n\t"
2085 "cmp %2, %%"REG_a" \n\t"
2089 :: "r" (src), "m" (dst), "m" (dstWidth_reg), "m" (xInc_shr16), "m" (xInc_mask)
2090 : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
2092 #if COMPILE_TEMPLATE_MMX2
2093 } //if MMX2 can't be used
2097 static inline void RENAME(hcscale_fast)(SwsContext *c, int16_t *dst,
2098 long dstWidth, const uint8_t *src1,
2099 const uint8_t *src2, int srcW, int xInc)
2101 #if COMPILE_TEMPLATE_MMX2
2102 int32_t *filterPos = c->hChrFilterPos;
2103 int16_t *filter = c->hChrFilter;
2104 int canMMX2BeUsed = c->canMMX2BeUsed;
2105 void *mmx2FilterCode= c->chrMmx2FilterCode;
2108 DECLARE_ALIGNED(8, uint64_t, ebxsave);
2110 if (canMMX2BeUsed) {
2113 "mov %%"REG_b", %6 \n\t"
2115 "pxor %%mm7, %%mm7 \n\t"
2116 "mov %0, %%"REG_c" \n\t"
2117 "mov %1, %%"REG_D" \n\t"
2118 "mov %2, %%"REG_d" \n\t"
2119 "mov %3, %%"REG_b" \n\t"
2120 "xor %%"REG_a", %%"REG_a" \n\t" // i
2121 PREFETCH" (%%"REG_c") \n\t"
2122 PREFETCH" 32(%%"REG_c") \n\t"
2123 PREFETCH" 64(%%"REG_c") \n\t"
2125 CALL_MMX2_FILTER_CODE
2126 CALL_MMX2_FILTER_CODE
2127 CALL_MMX2_FILTER_CODE
2128 CALL_MMX2_FILTER_CODE
2129 "xor %%"REG_a", %%"REG_a" \n\t" // i
2130 "mov %5, %%"REG_c" \n\t" // src
2131 "mov %1, %%"REG_D" \n\t" // buf1
2132 "add $"AV_STRINGIFY(VOF)", %%"REG_D" \n\t"
2133 PREFETCH" (%%"REG_c") \n\t"
2134 PREFETCH" 32(%%"REG_c") \n\t"
2135 PREFETCH" 64(%%"REG_c") \n\t"
2137 CALL_MMX2_FILTER_CODE
2138 CALL_MMX2_FILTER_CODE
2139 CALL_MMX2_FILTER_CODE
2140 CALL_MMX2_FILTER_CODE
2143 "mov %6, %%"REG_b" \n\t"
2145 :: "m" (src1), "m" (dst), "m" (filter), "m" (filterPos),
2146 "m" (mmx2FilterCode), "m" (src2)
2150 : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
2155 for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) {
2156 //printf("%d %d %d\n", dstWidth, i, srcW);
2157 dst[i] = src1[srcW-1]*128;
2158 dst[i+VOFW] = src2[srcW-1]*128;
2161 #endif /* COMPILE_TEMPLATE_MMX2 */
2162 x86_reg xInc_shr16 = (x86_reg) (xInc >> 16);
2163 uint16_t xInc_mask = xInc & 0xffff;
2164 x86_reg dstWidth_reg = dstWidth;
2166 "xor %%"REG_a", %%"REG_a" \n\t" // i
2167 "xor %%"REG_d", %%"REG_d" \n\t" // xx
2168 "xorl %%ecx, %%ecx \n\t" // xalpha
2171 "mov %0, %%"REG_S" \n\t"
2172 "movzbl (%%"REG_S", %%"REG_d"), %%edi \n\t" //src[xx]
2173 "movzbl 1(%%"REG_S", %%"REG_d"), %%esi \n\t" //src[xx+1]
2175 "movw %%si, (%%"REG_D", %%"REG_a", 2) \n\t"
2177 "movzbl (%5, %%"REG_d"), %%edi \n\t" //src[xx]
2178 "movzbl 1(%5, %%"REG_d"), %%esi \n\t" //src[xx+1]
2180 "movw %%si, "AV_STRINGIFY(VOF)"(%%"REG_D", %%"REG_a", 2) \n\t"
2182 "addw %4, %%cx \n\t" //xalpha += xInc&0xFFFF
2183 "adc %3, %%"REG_d" \n\t" //xx+= xInc>>16 + carry
2184 "add $1, %%"REG_a" \n\t"
2185 "cmp %2, %%"REG_a" \n\t"
2188 /* GCC 3.3 makes MPlayer crash on IA-32 machines when using "g" operand here,
2189 which is needed to support GCC 4.0. */
2190 #if ARCH_X86_64 && AV_GCC_VERSION_AT_LEAST(3,4)
2191 :: "m" (src1), "m" (dst), "g" (dstWidth_reg), "m" (xInc_shr16), "m" (xInc_mask),
2193 :: "m" (src1), "m" (dst), "m" (dstWidth_reg), "m" (xInc_shr16), "m" (xInc_mask),
2196 : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
2198 #if COMPILE_TEMPLATE_MMX2
2199 } //if MMX2 can't be used
2203 #define DEBUG_SWSCALE_BUFFERS 0
2204 #define DEBUG_BUFFERS(...) if (DEBUG_SWSCALE_BUFFERS) av_log(c, AV_LOG_DEBUG, __VA_ARGS__)
2206 static int RENAME(swScale)(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
2207 int srcSliceH, uint8_t* dst[], int dstStride[])
2209 /* load a few things into local vars to make the code more readable? and faster */
2210 const int srcW= c->srcW;
2211 const int dstW= c->dstW;
2212 const int dstH= c->dstH;
2213 const int chrDstW= c->chrDstW;
2214 const int chrSrcW= c->chrSrcW;
2215 const int lumXInc= c->lumXInc;
2216 const int chrXInc= c->chrXInc;
2217 const enum PixelFormat dstFormat= c->dstFormat;
2218 const int flags= c->flags;
2219 int16_t *vLumFilterPos= c->vLumFilterPos;
2220 int16_t *vChrFilterPos= c->vChrFilterPos;
2221 int16_t *hLumFilterPos= c->hLumFilterPos;
2222 int16_t *hChrFilterPos= c->hChrFilterPos;
2223 int16_t *vLumFilter= c->vLumFilter;
2224 int16_t *vChrFilter= c->vChrFilter;
2225 int16_t *hLumFilter= c->hLumFilter;
2226 int16_t *hChrFilter= c->hChrFilter;
2227 int32_t *lumMmxFilter= c->lumMmxFilter;
2228 int32_t *chrMmxFilter= c->chrMmxFilter;
2229 int32_t av_unused *alpMmxFilter= c->alpMmxFilter;
2230 const int vLumFilterSize= c->vLumFilterSize;
2231 const int vChrFilterSize= c->vChrFilterSize;
2232 const int hLumFilterSize= c->hLumFilterSize;
2233 const int hChrFilterSize= c->hChrFilterSize;
2234 int16_t **lumPixBuf= c->lumPixBuf;
2235 int16_t **chrPixBuf= c->chrPixBuf;
2236 int16_t **alpPixBuf= c->alpPixBuf;
2237 const int vLumBufSize= c->vLumBufSize;
2238 const int vChrBufSize= c->vChrBufSize;
2239 uint8_t *formatConvBuffer= c->formatConvBuffer;
2240 const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample;
2241 const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample);
2243 uint32_t *pal=c->pal_yuv;
2245 /* vars which will change and which we need to store back in the context */
2247 int lumBufIndex= c->lumBufIndex;
2248 int chrBufIndex= c->chrBufIndex;
2249 int lastInLumBuf= c->lastInLumBuf;
2250 int lastInChrBuf= c->lastInChrBuf;
2252 if (isPacked(c->srcFormat)) {
2260 srcStride[3]= srcStride[0];
2262 srcStride[1]<<= c->vChrDrop;
2263 srcStride[2]<<= c->vChrDrop;
2265 DEBUG_BUFFERS("swScale() %p[%d] %p[%d] %p[%d] %p[%d] -> %p[%d] %p[%d] %p[%d] %p[%d]\n",
2266 src[0], srcStride[0], src[1], srcStride[1], src[2], srcStride[2], src[3], srcStride[3],
2267 dst[0], dstStride[0], dst[1], dstStride[1], dst[2], dstStride[2], dst[3], dstStride[3]);
2268 DEBUG_BUFFERS("srcSliceY: %d srcSliceH: %d dstY: %d dstH: %d\n",
2269 srcSliceY, srcSliceH, dstY, dstH);
2270 DEBUG_BUFFERS("vLumFilterSize: %d vLumBufSize: %d vChrFilterSize: %d vChrBufSize: %d\n",
2271 vLumFilterSize, vLumBufSize, vChrFilterSize, vChrBufSize);
2273 if (dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0 || dstStride[3]%8 != 0) {
2274 static int warnedAlready=0; //FIXME move this into the context perhaps
2275 if (flags & SWS_PRINT_INFO && !warnedAlready) {
2276 av_log(c, AV_LOG_WARNING, "Warning: dstStride is not aligned!\n"
2277 " ->cannot do aligned memory accesses anymore\n");
2282 /* Note the user might start scaling the picture in the middle so this
2283 will not get executed. This is not really intended but works
2284 currently, so people might do it. */
2285 if (srcSliceY ==0) {
2295 for (;dstY < dstH; dstY++) {
2296 unsigned char *dest =dst[0]+dstStride[0]*dstY;
2297 const int chrDstY= dstY>>c->chrDstVSubSample;
2298 unsigned char *uDest=dst[1]+dstStride[1]*chrDstY;
2299 unsigned char *vDest=dst[2]+dstStride[2]*chrDstY;
2300 unsigned char *aDest=(CONFIG_SWSCALE_ALPHA && alpPixBuf) ? dst[3]+dstStride[3]*dstY : NULL;
2302 const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
2303 const int firstLumSrcY2= vLumFilterPos[FFMIN(dstY | ((1<<c->chrDstVSubSample) - 1), dstH-1)];
2304 const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
2305 int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input
2306 int lastLumSrcY2=firstLumSrcY2+ vLumFilterSize -1; // Last line needed as input
2307 int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input
2310 //handle holes (FAST_BILINEAR & weird filters)
2311 if (firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1;
2312 if (firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1;
2313 assert(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1);
2314 assert(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1);
2316 DEBUG_BUFFERS("dstY: %d\n", dstY);
2317 DEBUG_BUFFERS("\tfirstLumSrcY: %d lastLumSrcY: %d lastInLumBuf: %d\n",
2318 firstLumSrcY, lastLumSrcY, lastInLumBuf);
2319 DEBUG_BUFFERS("\tfirstChrSrcY: %d lastChrSrcY: %d lastInChrBuf: %d\n",
2320 firstChrSrcY, lastChrSrcY, lastInChrBuf);
2322 // Do we have enough lines in this slice to output the dstY line
2323 enough_lines = lastLumSrcY2 < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample);
2325 if (!enough_lines) {
2326 lastLumSrcY = srcSliceY + srcSliceH - 1;
2327 lastChrSrcY = chrSrcSliceY + chrSrcSliceH - 1;
2328 DEBUG_BUFFERS("buffering slice: lastLumSrcY %d lastChrSrcY %d\n",
2329 lastLumSrcY, lastChrSrcY);
2332 //Do horizontal scaling
2333 while(lastInLumBuf < lastLumSrcY) {
2334 const uint8_t *src1= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
2335 const uint8_t *src2= src[3]+(lastInLumBuf + 1 - srcSliceY)*srcStride[3];
2337 assert(lumBufIndex < 2*vLumBufSize);
2338 assert(lastInLumBuf + 1 - srcSliceY < srcSliceH);
2339 assert(lastInLumBuf + 1 - srcSliceY >= 0);
2340 hyscale_c(c, lumPixBuf[ lumBufIndex ], dstW, src1, srcW, lumXInc,
2341 hLumFilter, hLumFilterPos, hLumFilterSize,
2344 if (CONFIG_SWSCALE_ALPHA && alpPixBuf)
2345 hyscale_c(c, alpPixBuf[ lumBufIndex ], dstW, src2, srcW, lumXInc,
2346 hLumFilter, hLumFilterPos, hLumFilterSize,
2350 DEBUG_BUFFERS("\t\tlumBufIndex %d: lastInLumBuf: %d\n",
2351 lumBufIndex, lastInLumBuf);
2353 while(lastInChrBuf < lastChrSrcY) {
2354 const uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
2355 const uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
2357 assert(chrBufIndex < 2*vChrBufSize);
2358 assert(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH));
2359 assert(lastInChrBuf + 1 - chrSrcSliceY >= 0);
2360 //FIXME replace parameters through context struct (some at least)
2362 if (c->needs_hcscale)
2363 hcscale_c(c, chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
2364 hChrFilter, hChrFilterPos, hChrFilterSize,
2368 DEBUG_BUFFERS("\t\tchrBufIndex %d: lastInChrBuf: %d\n",
2369 chrBufIndex, lastInChrBuf);
2371 //wrap buf index around to stay inside the ring buffer
2372 if (lumBufIndex >= vLumBufSize) lumBufIndex-= vLumBufSize;
2373 if (chrBufIndex >= vChrBufSize) chrBufIndex-= vChrBufSize;
2375 break; //we can't output a dstY line so let's try with the next slice
2377 c->blueDither= ff_dither8[dstY&1];
2378 if (c->dstFormat == PIX_FMT_RGB555 || c->dstFormat == PIX_FMT_BGR555)
2379 c->greenDither= ff_dither8[dstY&1];
2381 c->greenDither= ff_dither4[dstY&1];
2382 c->redDither= ff_dither8[(dstY+1)&1];
2383 if (dstY < dstH-2) {
2384 const int16_t **lumSrcPtr= (const int16_t **) lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
2385 const int16_t **chrSrcPtr= (const int16_t **) chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
2386 const int16_t **alpSrcPtr= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? (const int16_t **) alpPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize : NULL;
2388 if (flags & SWS_ACCURATE_RND) {
2389 int s= APCK_SIZE / 8;
2390 for (i=0; i<vLumFilterSize; i+=2) {
2391 *(const void**)&lumMmxFilter[s*i ]= lumSrcPtr[i ];
2392 *(const void**)&lumMmxFilter[s*i+APCK_PTR2/4 ]= lumSrcPtr[i+(vLumFilterSize>1)];
2393 lumMmxFilter[s*i+APCK_COEF/4 ]=
2394 lumMmxFilter[s*i+APCK_COEF/4+1]= vLumFilter[dstY*vLumFilterSize + i ]
2395 + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0);
2396 if (CONFIG_SWSCALE_ALPHA && alpPixBuf) {
2397 *(const void**)&alpMmxFilter[s*i ]= alpSrcPtr[i ];
2398 *(const void**)&alpMmxFilter[s*i+APCK_PTR2/4 ]= alpSrcPtr[i+(vLumFilterSize>1)];
2399 alpMmxFilter[s*i+APCK_COEF/4 ]=
2400 alpMmxFilter[s*i+APCK_COEF/4+1]= lumMmxFilter[s*i+APCK_COEF/4 ];
2403 for (i=0; i<vChrFilterSize; i+=2) {
2404 *(const void**)&chrMmxFilter[s*i ]= chrSrcPtr[i ];
2405 *(const void**)&chrMmxFilter[s*i+APCK_PTR2/4 ]= chrSrcPtr[i+(vChrFilterSize>1)];
2406 chrMmxFilter[s*i+APCK_COEF/4 ]=
2407 chrMmxFilter[s*i+APCK_COEF/4+1]= vChrFilter[chrDstY*vChrFilterSize + i ]
2408 + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0);
2411 for (i=0; i<vLumFilterSize; i++) {
2412 lumMmxFilter[4*i+0]= (int32_t)lumSrcPtr[i];
2413 lumMmxFilter[4*i+1]= (uint64_t)lumSrcPtr[i] >> 32;
2414 lumMmxFilter[4*i+2]=
2415 lumMmxFilter[4*i+3]=
2416 ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001;
2417 if (CONFIG_SWSCALE_ALPHA && alpPixBuf) {
2418 alpMmxFilter[4*i+0]= (int32_t)alpSrcPtr[i];
2419 alpMmxFilter[4*i+1]= (uint64_t)alpSrcPtr[i] >> 32;
2420 alpMmxFilter[4*i+2]=
2421 alpMmxFilter[4*i+3]= lumMmxFilter[4*i+2];
2424 for (i=0; i<vChrFilterSize; i++) {
2425 chrMmxFilter[4*i+0]= (int32_t)chrSrcPtr[i];
2426 chrMmxFilter[4*i+1]= (uint64_t)chrSrcPtr[i] >> 32;
2427 chrMmxFilter[4*i+2]=
2428 chrMmxFilter[4*i+3]=
2429 ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001;
2432 if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21) {
2433 const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
2434 if (dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
2436 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
2437 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
2438 dest, uDest, dstW, chrDstW, dstFormat);
2439 } else if (isPlanarYUV(dstFormat) || dstFormat==PIX_FMT_GRAY8) { //YV12 like
2440 const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
2441 if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
2442 if (is16BPS(dstFormat) || is9_OR_10BPS(dstFormat)) {
2444 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
2445 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
2446 alpSrcPtr, (uint16_t *) dest, (uint16_t *) uDest, (uint16_t *) vDest, (uint16_t *) aDest, dstW, chrDstW,
2448 } else if (vLumFilterSize == 1 && vChrFilterSize == 1) { // unscaled YV12
2449 const int16_t *lumBuf = lumSrcPtr[0];
2450 const int16_t *chrBuf= chrSrcPtr[0];
2451 const int16_t *alpBuf= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? alpSrcPtr[0] : NULL;
2452 c->yuv2yuv1(c, lumBuf, chrBuf, alpBuf, dest, uDest, vDest, aDest, dstW, chrDstW);
2453 } else { //General YV12
2455 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
2456 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
2457 alpSrcPtr, dest, uDest, vDest, aDest, dstW, chrDstW);
2460 assert(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
2461 assert(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
2462 if (vLumFilterSize == 1 && vChrFilterSize == 2) { //unscaled RGB
2463 int chrAlpha= vChrFilter[2*dstY+1];
2464 if(flags & SWS_FULL_CHR_H_INT) {
2465 yuv2rgbXinC_full(c, //FIXME write a packed1_full function
2466 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
2467 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
2468 alpSrcPtr, dest, dstW, dstY);
2470 c->yuv2packed1(c, *lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1),
2471 alpPixBuf ? *alpSrcPtr : NULL,
2472 dest, dstW, chrAlpha, dstFormat, flags, dstY);
2474 } else if (vLumFilterSize == 2 && vChrFilterSize == 2) { //bilinear upscale RGB
2475 int lumAlpha= vLumFilter[2*dstY+1];
2476 int chrAlpha= vChrFilter[2*dstY+1];
2478 lumMmxFilter[3]= vLumFilter[2*dstY ]*0x10001;
2480 chrMmxFilter[3]= vChrFilter[2*chrDstY]*0x10001;
2481 if(flags & SWS_FULL_CHR_H_INT) {
2482 yuv2rgbXinC_full(c, //FIXME write a packed2_full function
2483 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
2484 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
2485 alpSrcPtr, dest, dstW, dstY);
2487 c->yuv2packed2(c, *lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1),
2488 alpPixBuf ? *alpSrcPtr : NULL, alpPixBuf ? *(alpSrcPtr+1) : NULL,
2489 dest, dstW, lumAlpha, chrAlpha, dstY);
2491 } else { //general RGB
2492 if(flags & SWS_FULL_CHR_H_INT) {
2494 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
2495 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
2496 alpSrcPtr, dest, dstW, dstY);
2499 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
2500 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
2501 alpSrcPtr, dest, dstW, dstY);
2505 } else { // hmm looks like we can't use MMX here without overwriting this array's tail
2506 const int16_t **lumSrcPtr= (const int16_t **)lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
2507 const int16_t **chrSrcPtr= (const int16_t **)chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
2508 const int16_t **alpSrcPtr= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? (const int16_t **)alpPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize : NULL;
2509 if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21) {
2510 const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
2511 if (dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
2513 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
2514 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
2515 dest, uDest, dstW, chrDstW, dstFormat);
2516 } else if (isPlanarYUV(dstFormat) || dstFormat==PIX_FMT_GRAY8) { //YV12
2517 const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
2518 if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
2519 if (is16BPS(dstFormat) || is9_OR_10BPS(dstFormat)) {
2521 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
2522 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
2523 alpSrcPtr, (uint16_t *) dest, (uint16_t *) uDest, (uint16_t *) vDest, (uint16_t *) aDest, dstW, chrDstW,
2527 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
2528 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
2529 alpSrcPtr, dest, uDest, vDest, aDest, dstW, chrDstW);
2532 assert(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
2533 assert(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
2534 if(flags & SWS_FULL_CHR_H_INT) {
2536 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
2537 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
2538 alpSrcPtr, dest, dstW, dstY);
2541 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
2542 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
2543 alpSrcPtr, dest, dstW, dstY);
2549 if ((dstFormat == PIX_FMT_YUVA420P) && !alpPixBuf)
2550 fillPlane(dst[3], dstStride[3], dstW, dstY-lastDstY, lastDstY, 255);
2552 if (COMPILE_TEMPLATE_MMX2) __asm__ volatile("sfence":::"memory");
2553 __asm__ volatile("emms" :::"memory");
2555 /* store changed local vars back in the context */
2557 c->lumBufIndex= lumBufIndex;
2558 c->chrBufIndex= chrBufIndex;
2559 c->lastInLumBuf= lastInLumBuf;
2560 c->lastInChrBuf= lastInChrBuf;
2562 return dstY - lastDstY;
2565 static void RENAME(sws_init_swScale)(SwsContext *c)
2567 enum PixelFormat srcFormat = c->srcFormat;
2569 c->yuv2yuv1 = RENAME(yuv2yuv1 );
2570 c->yuv2yuvX = RENAME(yuv2yuvX );
2571 c->yuv2packed1 = RENAME(yuv2packed1 );
2572 c->yuv2packed2 = RENAME(yuv2packed2 );
2573 c->yuv2packedX = RENAME(yuv2packedX );
2575 c->hScale = RENAME(hScale );
2577 // Use the new MMX scaler if the MMX2 one can't be used (it is faster than the x86 ASM one).
2578 if (c->flags & SWS_FAST_BILINEAR && c->canMMX2BeUsed)
2580 c->hyscale_fast = RENAME(hyscale_fast);
2581 c->hcscale_fast = RENAME(hcscale_fast);
2583 c->hyscale_fast = NULL;
2584 c->hcscale_fast = NULL;
2588 case PIX_FMT_YUYV422 : c->chrToYV12 = RENAME(yuy2ToUV); break;
2589 case PIX_FMT_UYVY422 : c->chrToYV12 = RENAME(uyvyToUV); break;
2590 case PIX_FMT_NV12 : c->chrToYV12 = RENAME(nv12ToUV); break;
2591 case PIX_FMT_NV21 : c->chrToYV12 = RENAME(nv21ToUV); break;
2592 case PIX_FMT_YUV420P16BE:
2593 case PIX_FMT_YUV422P16BE:
2594 case PIX_FMT_YUV444P16BE: c->chrToYV12 = RENAME(BEToUV); break;
2595 case PIX_FMT_YUV420P16LE:
2596 case PIX_FMT_YUV422P16LE:
2597 case PIX_FMT_YUV444P16LE: c->chrToYV12 = RENAME(LEToUV); break;
2600 if (!c->chrSrcHSubSample) {
2602 case PIX_FMT_BGR24 : c->chrToYV12 = RENAME(bgr24ToUV); break;
2603 case PIX_FMT_RGB24 : c->chrToYV12 = RENAME(rgb24ToUV); break;
2608 switch (srcFormat) {
2609 case PIX_FMT_YUYV422 :
2610 case PIX_FMT_YUV420P16BE:
2611 case PIX_FMT_YUV422P16BE:
2612 case PIX_FMT_YUV444P16BE:
2613 case PIX_FMT_Y400A :
2614 case PIX_FMT_GRAY16BE : c->lumToYV12 = RENAME(yuy2ToY); break;
2615 case PIX_FMT_UYVY422 :
2616 case PIX_FMT_YUV420P16LE:
2617 case PIX_FMT_YUV422P16LE:
2618 case PIX_FMT_YUV444P16LE:
2619 case PIX_FMT_GRAY16LE : c->lumToYV12 = RENAME(uyvyToY); break;
2620 case PIX_FMT_BGR24 : c->lumToYV12 = RENAME(bgr24ToY); break;
2621 case PIX_FMT_RGB24 : c->lumToYV12 = RENAME(rgb24ToY); break;
2625 switch (srcFormat) {
2626 case PIX_FMT_Y400A : c->alpToYV12 = RENAME(yuy2ToY); break;