2 // Software scaling and colorspace conversion routines for MPlayer
4 // Orginal C implementation by A'rpi/ESP-team <arpi@thot.banki.hu>
5 // current version mostly by Michael Niedermayer (michaelni@gmx.at)
6 // the parts written by michael are under GNU GPL
10 #include "../config.h"
12 #include "../mmx_defs.h"
21 //disables the unscaled height version
24 #define RET 0xC3 //near return opcode
28 known BUGS with known cause (no bugreports please!, but patches are welcome :) )
29 horizontal MMX2 scaler reads 1-7 samples too much (might cause a sig11)
31 Supported output formats BGR15 BGR16 BGR24 BGR32
32 BGR15 & BGR16 MMX verions support dithering
33 Special versions: fast Y 1:1 scaling (no interpolation in y direction)
36 more intelligent missalignment avoidance for the horizontal scaler
39 change the distance of the u & v buffer
42 #define ABS(a) ((a) > 0 ? (a) : (-(a)))
43 #define MIN(a,b) ((a) > (b) ? (b) : (a))
44 #define MAX(a,b) ((a) < (b) ? (b) : (a))
47 #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
48 #elif defined (HAVE_3DNOW)
49 #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
53 #define MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
55 #define MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
60 static uint64_t __attribute__((aligned(8))) yCoeff= 0x2568256825682568LL;
61 static uint64_t __attribute__((aligned(8))) vrCoeff= 0x3343334333433343LL;
62 static uint64_t __attribute__((aligned(8))) ubCoeff= 0x40cf40cf40cf40cfLL;
63 static uint64_t __attribute__((aligned(8))) vgCoeff= 0xE5E2E5E2E5E2E5E2LL;
64 static uint64_t __attribute__((aligned(8))) ugCoeff= 0xF36EF36EF36EF36ELL;
65 static uint64_t __attribute__((aligned(8))) bF8= 0xF8F8F8F8F8F8F8F8LL;
66 static uint64_t __attribute__((aligned(8))) bFC= 0xFCFCFCFCFCFCFCFCLL;
67 static uint64_t __attribute__((aligned(8))) w400= 0x0400040004000400LL;
68 static uint64_t __attribute__((aligned(8))) w80= 0x0080008000800080LL;
69 static uint64_t __attribute__((aligned(8))) w10= 0x0010001000100010LL;
70 static uint64_t __attribute__((aligned(8))) bm00001111=0x00000000FFFFFFFFLL;
71 static uint64_t __attribute__((aligned(8))) bm00000111=0x0000000000FFFFFFLL;
72 static uint64_t __attribute__((aligned(8))) bm11111000=0xFFFFFFFFFF000000LL;
74 static uint64_t __attribute__((aligned(8))) b16Dither= 0x0004000400040004LL;
75 static uint64_t __attribute__((aligned(8))) b16Dither1=0x0004000400040004LL;
76 static uint64_t __attribute__((aligned(8))) b16Dither2=0x0602060206020602LL;
77 static uint64_t __attribute__((aligned(8))) g16Dither= 0x0002000200020002LL;
78 static uint64_t __attribute__((aligned(8))) g16Dither1=0x0002000200020002LL;
79 static uint64_t __attribute__((aligned(8))) g16Dither2=0x0301030103010301LL;
81 static uint64_t __attribute__((aligned(8))) b16Mask= 0x001F001F001F001FLL;
82 static uint64_t __attribute__((aligned(8))) g16Mask= 0x07E007E007E007E0LL;
83 static uint64_t __attribute__((aligned(8))) r16Mask= 0xF800F800F800F800LL;
84 static uint64_t __attribute__((aligned(8))) b15Mask= 0x001F001F001F001FLL;
85 static uint64_t __attribute__((aligned(8))) g15Mask= 0x03E003E003E003E0LL;
86 static uint64_t __attribute__((aligned(8))) r15Mask= 0x7C007C007C007C00LL;
88 static uint64_t __attribute__((aligned(8))) M24A= 0x00FF0000FF0000FFLL;
89 static uint64_t __attribute__((aligned(8))) M24B= 0xFF0000FF0000FF00LL;
90 static uint64_t __attribute__((aligned(8))) M24C= 0x0000FF0000FF0000LL;
92 static uint64_t __attribute__((aligned(8))) temp0;
93 static uint64_t __attribute__((aligned(8))) asm_yalpha1;
94 static uint64_t __attribute__((aligned(8))) asm_uvalpha1;
97 // temporary storage for 4 yuv lines:
98 // 16bit for now (mmx likes it more compact)
100 static uint16_t __attribute__((aligned(8))) pix_buf_y[4][2048];
101 static uint16_t __attribute__((aligned(8))) pix_buf_uv[2][2048*2];
103 static uint16_t pix_buf_y[4][2048];
104 static uint16_t pix_buf_uv[2][2048*2];
107 // clipping helper table for C implementations:
108 static unsigned char clip_table[768];
110 static unsigned short clip_table16b[768];
111 static unsigned short clip_table16g[768];
112 static unsigned short clip_table16r[768];
113 static unsigned short clip_table15b[768];
114 static unsigned short clip_table15g[768];
115 static unsigned short clip_table15r[768];
117 // yuv->rgb conversion tables:
118 static int yuvtab_2568[256];
119 static int yuvtab_3343[256];
120 static int yuvtab_0c92[256];
121 static int yuvtab_1a1e[256];
122 static int yuvtab_40cf[256];
125 static uint8_t funnyYCode[10000];
126 static uint8_t funnyUVCode[10000];
129 static int canMMX2BeUsed=0;
131 #define FULL_YSCALEYUV2RGB \
132 "pxor %%mm7, %%mm7 \n\t"\
133 "movd %6, %%mm6 \n\t" /*yalpha1*/\
134 "punpcklwd %%mm6, %%mm6 \n\t"\
135 "punpcklwd %%mm6, %%mm6 \n\t"\
136 "movd %7, %%mm5 \n\t" /*uvalpha1*/\
137 "punpcklwd %%mm5, %%mm5 \n\t"\
138 "punpcklwd %%mm5, %%mm5 \n\t"\
139 "xorl %%eax, %%eax \n\t"\
141 "movq (%0, %%eax, 2), %%mm0 \n\t" /*buf0[eax]*/\
142 "movq (%1, %%eax, 2), %%mm1 \n\t" /*buf1[eax]*/\
143 "movq (%2, %%eax,2), %%mm2 \n\t" /* uvbuf0[eax]*/\
144 "movq (%3, %%eax,2), %%mm3 \n\t" /* uvbuf1[eax]*/\
145 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
146 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
147 "pmulhw %%mm6, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
148 "pmulhw %%mm5, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
149 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
150 "movq 4096(%2, %%eax,2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
151 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
152 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
153 "movq 4096(%3, %%eax,2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\
154 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
155 "psubw %%mm0, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
156 "psubw w80, %%mm1 \n\t" /* 8(Y-16)*/\
157 "psubw w400, %%mm3 \n\t" /* 8(U-128)*/\
158 "pmulhw yCoeff, %%mm1 \n\t"\
161 "pmulhw %%mm5, %%mm4 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
162 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
163 "pmulhw ubCoeff, %%mm3 \n\t"\
164 "psraw $4, %%mm0 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
165 "pmulhw ugCoeff, %%mm2 \n\t"\
166 "paddw %%mm4, %%mm0 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
167 "psubw w400, %%mm0 \n\t" /* (V-128)8*/\
170 "movq %%mm0, %%mm4 \n\t" /* (V-128)8*/\
171 "pmulhw vrCoeff, %%mm0 \n\t"\
172 "pmulhw vgCoeff, %%mm4 \n\t"\
173 "paddw %%mm1, %%mm3 \n\t" /* B*/\
174 "paddw %%mm1, %%mm0 \n\t" /* R*/\
175 "packuswb %%mm3, %%mm3 \n\t"\
177 "packuswb %%mm0, %%mm0 \n\t"\
178 "paddw %%mm4, %%mm2 \n\t"\
179 "paddw %%mm2, %%mm1 \n\t" /* G*/\
181 "packuswb %%mm1, %%mm1 \n\t"
183 #define YSCALEYUV2RGB \
184 "movd %6, %%mm6 \n\t" /*yalpha1*/\
185 "punpcklwd %%mm6, %%mm6 \n\t"\
186 "punpcklwd %%mm6, %%mm6 \n\t"\
187 "movq %%mm6, asm_yalpha1 \n\t"\
188 "movd %7, %%mm5 \n\t" /*uvalpha1*/\
189 "punpcklwd %%mm5, %%mm5 \n\t"\
190 "punpcklwd %%mm5, %%mm5 \n\t"\
191 "movq %%mm5, asm_uvalpha1 \n\t"\
192 "xorl %%eax, %%eax \n\t"\
194 "movq (%2, %%eax), %%mm2 \n\t" /* uvbuf0[eax]*/\
195 "movq (%3, %%eax), %%mm3 \n\t" /* uvbuf1[eax]*/\
196 "movq 4096(%2, %%eax), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
197 "movq 4096(%3, %%eax), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
198 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
199 "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
200 "movq asm_uvalpha1, %%mm0 \n\t"\
201 "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
202 "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
203 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
204 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
205 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
206 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
207 "psubw w400, %%mm3 \n\t" /* (U-128)8*/\
208 "psubw w400, %%mm4 \n\t" /* (V-128)8*/\
209 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
210 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
211 "pmulhw ugCoeff, %%mm3 \n\t"\
212 "pmulhw vgCoeff, %%mm4 \n\t"\
213 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
214 "movq (%0, %%eax, 2), %%mm0 \n\t" /*buf0[eax]*/\
215 "movq (%1, %%eax, 2), %%mm1 \n\t" /*buf1[eax]*/\
216 "movq 8(%0, %%eax, 2), %%mm6 \n\t" /*buf0[eax]*/\
217 "movq 8(%1, %%eax, 2), %%mm7 \n\t" /*buf1[eax]*/\
218 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
219 "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
220 "pmulhw asm_yalpha1, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
221 "pmulhw asm_yalpha1, %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
222 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
223 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
224 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
225 "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
226 "pmulhw ubCoeff, %%mm2 \n\t"\
227 "pmulhw vrCoeff, %%mm5 \n\t"\
228 "psubw w80, %%mm1 \n\t" /* 8(Y-16)*/\
229 "psubw w80, %%mm7 \n\t" /* 8(Y-16)*/\
230 "pmulhw yCoeff, %%mm1 \n\t"\
231 "pmulhw yCoeff, %%mm7 \n\t"\
232 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
233 "paddw %%mm3, %%mm4 \n\t"\
234 "movq %%mm2, %%mm0 \n\t"\
235 "movq %%mm5, %%mm6 \n\t"\
236 "movq %%mm4, %%mm3 \n\t"\
237 "punpcklwd %%mm2, %%mm2 \n\t"\
238 "punpcklwd %%mm5, %%mm5 \n\t"\
239 "punpcklwd %%mm4, %%mm4 \n\t"\
240 "paddw %%mm1, %%mm2 \n\t"\
241 "paddw %%mm1, %%mm5 \n\t"\
242 "paddw %%mm1, %%mm4 \n\t"\
243 "punpckhwd %%mm0, %%mm0 \n\t"\
244 "punpckhwd %%mm6, %%mm6 \n\t"\
245 "punpckhwd %%mm3, %%mm3 \n\t"\
246 "paddw %%mm7, %%mm0 \n\t"\
247 "paddw %%mm7, %%mm6 \n\t"\
248 "paddw %%mm7, %%mm3 \n\t"\
249 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
250 "packuswb %%mm0, %%mm2 \n\t"\
251 "packuswb %%mm6, %%mm5 \n\t"\
252 "packuswb %%mm3, %%mm4 \n\t"\
253 "pxor %%mm7, %%mm7 \n\t"
255 #define YSCALEYUV2RGB1 \
256 "xorl %%eax, %%eax \n\t"\
258 "movq (%2, %%eax), %%mm3 \n\t" /* uvbuf0[eax]*/\
259 "movq 4096(%2, %%eax), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
260 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
261 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
262 "psubw w400, %%mm3 \n\t" /* (U-128)8*/\
263 "psubw w400, %%mm4 \n\t" /* (V-128)8*/\
264 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
265 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
266 "pmulhw ugCoeff, %%mm3 \n\t"\
267 "pmulhw vgCoeff, %%mm4 \n\t"\
268 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
269 "movq (%0, %%eax, 2), %%mm1 \n\t" /*buf0[eax]*/\
270 "movq 8(%0, %%eax, 2), %%mm7 \n\t" /*buf0[eax]*/\
271 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
272 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
273 "pmulhw ubCoeff, %%mm2 \n\t"\
274 "pmulhw vrCoeff, %%mm5 \n\t"\
275 "psubw w80, %%mm1 \n\t" /* 8(Y-16)*/\
276 "psubw w80, %%mm7 \n\t" /* 8(Y-16)*/\
277 "pmulhw yCoeff, %%mm1 \n\t"\
278 "pmulhw yCoeff, %%mm7 \n\t"\
279 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
280 "paddw %%mm3, %%mm4 \n\t"\
281 "movq %%mm2, %%mm0 \n\t"\
282 "movq %%mm5, %%mm6 \n\t"\
283 "movq %%mm4, %%mm3 \n\t"\
284 "punpcklwd %%mm2, %%mm2 \n\t"\
285 "punpcklwd %%mm5, %%mm5 \n\t"\
286 "punpcklwd %%mm4, %%mm4 \n\t"\
287 "paddw %%mm1, %%mm2 \n\t"\
288 "paddw %%mm1, %%mm5 \n\t"\
289 "paddw %%mm1, %%mm4 \n\t"\
290 "punpckhwd %%mm0, %%mm0 \n\t"\
291 "punpckhwd %%mm6, %%mm6 \n\t"\
292 "punpckhwd %%mm3, %%mm3 \n\t"\
293 "paddw %%mm7, %%mm0 \n\t"\
294 "paddw %%mm7, %%mm6 \n\t"\
295 "paddw %%mm7, %%mm3 \n\t"\
296 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
297 "packuswb %%mm0, %%mm2 \n\t"\
298 "packuswb %%mm6, %%mm5 \n\t"\
299 "packuswb %%mm3, %%mm4 \n\t"\
300 "pxor %%mm7, %%mm7 \n\t"
302 // do vertical chrominance interpolation
303 #define YSCALEYUV2RGB1b \
304 "xorl %%eax, %%eax \n\t"\
306 "movq (%2, %%eax), %%mm2 \n\t" /* uvbuf0[eax]*/\
307 "movq (%3, %%eax), %%mm3 \n\t" /* uvbuf1[eax]*/\
308 "movq 4096(%2, %%eax), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
309 "movq 4096(%3, %%eax), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
310 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
311 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
312 "psrlw $5, %%mm3 \n\t"\
313 "psrlw $5, %%mm4 \n\t"\
314 "psubw w400, %%mm3 \n\t" /* (U-128)8*/\
315 "psubw w400, %%mm4 \n\t" /* (V-128)8*/\
316 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
317 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
318 "pmulhw ugCoeff, %%mm3 \n\t"\
319 "pmulhw vgCoeff, %%mm4 \n\t"\
320 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
321 "movq (%0, %%eax, 2), %%mm1 \n\t" /*buf0[eax]*/\
322 "movq 8(%0, %%eax, 2), %%mm7 \n\t" /*buf0[eax]*/\
323 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
324 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
325 "pmulhw ubCoeff, %%mm2 \n\t"\
326 "pmulhw vrCoeff, %%mm5 \n\t"\
327 "psubw w80, %%mm1 \n\t" /* 8(Y-16)*/\
328 "psubw w80, %%mm7 \n\t" /* 8(Y-16)*/\
329 "pmulhw yCoeff, %%mm1 \n\t"\
330 "pmulhw yCoeff, %%mm7 \n\t"\
331 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
332 "paddw %%mm3, %%mm4 \n\t"\
333 "movq %%mm2, %%mm0 \n\t"\
334 "movq %%mm5, %%mm6 \n\t"\
335 "movq %%mm4, %%mm3 \n\t"\
336 "punpcklwd %%mm2, %%mm2 \n\t"\
337 "punpcklwd %%mm5, %%mm5 \n\t"\
338 "punpcklwd %%mm4, %%mm4 \n\t"\
339 "paddw %%mm1, %%mm2 \n\t"\
340 "paddw %%mm1, %%mm5 \n\t"\
341 "paddw %%mm1, %%mm4 \n\t"\
342 "punpckhwd %%mm0, %%mm0 \n\t"\
343 "punpckhwd %%mm6, %%mm6 \n\t"\
344 "punpckhwd %%mm3, %%mm3 \n\t"\
345 "paddw %%mm7, %%mm0 \n\t"\
346 "paddw %%mm7, %%mm6 \n\t"\
347 "paddw %%mm7, %%mm3 \n\t"\
348 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
349 "packuswb %%mm0, %%mm2 \n\t"\
350 "packuswb %%mm6, %%mm5 \n\t"\
351 "packuswb %%mm3, %%mm4 \n\t"\
352 "pxor %%mm7, %%mm7 \n\t"
355 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
356 "movq %%mm2, %%mm1 \n\t" /* B */\
357 "movq %%mm5, %%mm6 \n\t" /* R */\
358 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
359 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
360 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
361 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
362 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
363 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
364 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
365 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
366 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
367 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
369 MOVNTQ(%%mm0, (%4, %%eax, 4))\
370 MOVNTQ(%%mm2, 8(%4, %%eax, 4))\
371 MOVNTQ(%%mm1, 16(%4, %%eax, 4))\
372 MOVNTQ(%%mm3, 24(%4, %%eax, 4))\
374 "addl $8, %%eax \n\t"\
375 "cmpl %5, %%eax \n\t"\
379 "pand bF8, %%mm2 \n\t" /* B */\
380 "pand bFC, %%mm4 \n\t" /* G */\
381 "pand bF8, %%mm5 \n\t" /* R */\
382 "psrlq $3, %%mm2 \n\t"\
384 "movq %%mm2, %%mm1 \n\t"\
385 "movq %%mm4, %%mm3 \n\t"\
387 "punpcklbw %%mm7, %%mm3 \n\t"\
388 "punpcklbw %%mm5, %%mm2 \n\t"\
389 "punpckhbw %%mm7, %%mm4 \n\t"\
390 "punpckhbw %%mm5, %%mm1 \n\t"\
392 "psllq $3, %%mm3 \n\t"\
393 "psllq $3, %%mm4 \n\t"\
395 "por %%mm3, %%mm2 \n\t"\
396 "por %%mm4, %%mm1 \n\t"\
398 MOVNTQ(%%mm2, (%4, %%eax, 2))\
399 MOVNTQ(%%mm1, 8(%4, %%eax, 2))\
401 "addl $8, %%eax \n\t"\
402 "cmpl %5, %%eax \n\t"\
406 "pand bF8, %%mm2 \n\t" /* B */\
407 "pand bF8, %%mm4 \n\t" /* G */\
408 "pand bF8, %%mm5 \n\t" /* R */\
409 "psrlq $3, %%mm2 \n\t"\
410 "psrlq $1, %%mm5 \n\t"\
412 "movq %%mm2, %%mm1 \n\t"\
413 "movq %%mm4, %%mm3 \n\t"\
415 "punpcklbw %%mm7, %%mm3 \n\t"\
416 "punpcklbw %%mm5, %%mm2 \n\t"\
417 "punpckhbw %%mm7, %%mm4 \n\t"\
418 "punpckhbw %%mm5, %%mm1 \n\t"\
420 "psllq $2, %%mm3 \n\t"\
421 "psllq $2, %%mm4 \n\t"\
423 "por %%mm3, %%mm2 \n\t"\
424 "por %%mm4, %%mm1 \n\t"\
426 MOVNTQ(%%mm2, (%4, %%eax, 2))\
427 MOVNTQ(%%mm1, 8(%4, %%eax, 2))\
429 "addl $8, %%eax \n\t"\
430 "cmpl %5, %%eax \n\t"\
433 #define WRITEBGR24OLD \
434 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
435 "movq %%mm2, %%mm1 \n\t" /* B */\
436 "movq %%mm5, %%mm6 \n\t" /* R */\
437 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
438 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
439 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
440 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
441 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
442 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
443 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
444 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
445 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
446 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
448 "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
449 "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
450 "pand bm00000111, %%mm4 \n\t" /* 00000RGB 0 */\
451 "pand bm11111000, %%mm0 \n\t" /* 00RGB000 0.5 */\
452 "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
453 "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
454 "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
455 "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
457 "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
458 "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
459 "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
460 "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
461 "pand bm00001111, %%mm2 \n\t" /* 0000RGBR 1 */\
462 "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
463 "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
464 "pand bm00000111, %%mm4 \n\t" /* 00000RGB 2 */\
465 "pand bm11111000, %%mm1 \n\t" /* 00RGB000 2.5 */\
466 "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
467 "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
468 "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
469 "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
471 "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
472 "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
473 "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
474 "pand bm00000111, %%mm5 \n\t" /* 00000RGB 3 */\
475 "pand bm11111000, %%mm3 \n\t" /* 00RGB000 3.5 */\
476 "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
477 "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
478 "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
480 MOVNTQ(%%mm0, (%%ebx))\
481 MOVNTQ(%%mm2, 8(%%ebx))\
482 MOVNTQ(%%mm3, 16(%%ebx))\
483 "addl $24, %%ebx \n\t"\
485 "addl $8, %%eax \n\t"\
486 "cmpl %5, %%eax \n\t"\
489 #define WRITEBGR24MMX \
490 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
491 "movq %%mm2, %%mm1 \n\t" /* B */\
492 "movq %%mm5, %%mm6 \n\t" /* R */\
493 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
494 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
495 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
496 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
497 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
498 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
499 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
500 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
501 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
502 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
504 "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
505 "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
506 "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
507 "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
509 "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
510 "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
511 "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
512 "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
514 "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
515 "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
516 "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
517 "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
519 "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
520 "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
521 "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
522 "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
523 MOVNTQ(%%mm0, (%%ebx))\
525 "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
526 "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
527 "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
528 "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
529 MOVNTQ(%%mm6, 8(%%ebx))\
531 "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
532 "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
533 "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
534 MOVNTQ(%%mm5, 16(%%ebx))\
536 "addl $24, %%ebx \n\t"\
538 "addl $8, %%eax \n\t"\
539 "cmpl %5, %%eax \n\t"\
542 #define WRITEBGR24MMX2 \
543 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
544 "movq M24A, %%mm0 \n\t"\
545 "movq M24C, %%mm7 \n\t"\
546 "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
547 "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
548 "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
550 "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
551 "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
552 "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
554 "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
555 "por %%mm1, %%mm6 \n\t"\
556 "por %%mm3, %%mm6 \n\t"\
557 MOVNTQ(%%mm6, (%%ebx))\
559 "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
560 "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
561 "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
562 "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
564 "pand M24B, %%mm1 \n\t" /* B5 B4 B3 */\
565 "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
566 "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
568 "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
569 "por %%mm3, %%mm6 \n\t"\
570 MOVNTQ(%%mm6, 8(%%ebx))\
572 "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
573 "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
574 "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
576 "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
577 "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
578 "pand M24B, %%mm6 \n\t" /* R7 R6 R5 */\
580 "por %%mm1, %%mm3 \n\t"\
581 "por %%mm3, %%mm6 \n\t"\
582 MOVNTQ(%%mm6, 16(%%ebx))\
584 "addl $24, %%ebx \n\t"\
586 "addl $8, %%eax \n\t"\
587 "cmpl %5, %%eax \n\t"\
591 #define WRITEBGR24 WRITEBGR24MMX2
593 #define WRITEBGR24 WRITEBGR24MMX
597 void in_asm_used_var_warning_killer()
599 int i= yCoeff+vrCoeff+ubCoeff+vgCoeff+ugCoeff+bF8+bFC+w400+w80+w10+
600 bm00001111+bm00000111+bm11111000+b16Dither+b16Dither1+b16Dither2+g16Dither+g16Dither1+
601 g16Dither2+b16Mask+g16Mask+r16Mask+b15Mask+g15Mask+r15Mask+temp0+asm_yalpha1+ asm_uvalpha1+
607 static inline void yuv2yuv(uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
608 uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstw, int yalpha, int uvalpha)
610 int yalpha1=yalpha^4095;
611 int uvalpha1=uvalpha^4095;
614 asm volatile ("\n\t"::: "memory");
618 ((uint8_t*)dest)[i] = (buf0[i]*yalpha1+buf1[i]*yalpha)>>19;
623 for(i=0; i<(dstw>>1); i++)
625 ((uint8_t*)uDest)[i] = (uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19;
626 ((uint8_t*)vDest)[i] = (uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19;
632 * vertical scale YV12 to RGB
634 static inline void yuv2rgbX(uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
635 uint8_t *dest, int dstw, int yalpha, int uvalpha, int dstbpp)
637 int yalpha1=yalpha^4095;
638 int uvalpha1=uvalpha^4095;
650 "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
651 "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
653 "movq %%mm3, %%mm1 \n\t"
654 "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
655 "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
657 MOVNTQ(%%mm3, (%4, %%eax, 4))
658 MOVNTQ(%%mm1, 8(%4, %%eax, 4))
660 "addl $4, %%eax \n\t"
661 "cmpl %5, %%eax \n\t"
665 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
666 "m" (yalpha1), "m" (uvalpha1)
677 "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
678 "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
680 "movq %%mm3, %%mm1 \n\t"
681 "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
682 "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
684 "movq %%mm3, %%mm2 \n\t" // BGR0BGR0
685 "psrlq $8, %%mm3 \n\t" // GR0BGR00
686 "pand bm00000111, %%mm2 \n\t" // BGR00000
687 "pand bm11111000, %%mm3 \n\t" // 000BGR00
688 "por %%mm2, %%mm3 \n\t" // BGRBGR00
689 "movq %%mm1, %%mm2 \n\t"
690 "psllq $48, %%mm1 \n\t" // 000000BG
691 "por %%mm1, %%mm3 \n\t" // BGRBGRBG
693 "movq %%mm2, %%mm1 \n\t" // BGR0BGR0
694 "psrld $16, %%mm2 \n\t" // R000R000
695 "psrlq $24, %%mm1 \n\t" // 0BGR0000
696 "por %%mm2, %%mm1 \n\t" // RBGRR000
698 "movl %4, %%ebx \n\t"
699 "addl %%eax, %%ebx \n\t"
703 "movntq %%mm3, (%%ebx, %%eax, 2)\n\t"
704 "movntq %%mm1, 8(%%ebx, %%eax, 2)\n\t"
706 "movd %%mm3, (%%ebx, %%eax, 2) \n\t"
707 "psrlq $32, %%mm3 \n\t"
708 "movd %%mm3, 4(%%ebx, %%eax, 2) \n\t"
709 "movd %%mm1, 8(%%ebx, %%eax, 2) \n\t"
711 "addl $4, %%eax \n\t"
712 "cmpl %5, %%eax \n\t"
715 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstw),
716 "m" (yalpha1), "m" (uvalpha1)
726 "paddusb b16Dither, %%mm1 \n\t"
727 "paddusb b16Dither, %%mm0 \n\t"
728 "paddusb b16Dither, %%mm3 \n\t"
730 "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
731 "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
732 "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
734 "psrlw $3, %%mm3 \n\t"
735 "psllw $2, %%mm1 \n\t"
736 "psllw $7, %%mm0 \n\t"
737 "pand g15Mask, %%mm1 \n\t"
738 "pand r15Mask, %%mm0 \n\t"
740 "por %%mm3, %%mm1 \n\t"
741 "por %%mm1, %%mm0 \n\t"
743 MOVNTQ(%%mm0, (%4, %%eax, 2))
745 "addl $4, %%eax \n\t"
746 "cmpl %5, %%eax \n\t"
749 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
750 "m" (yalpha1), "m" (uvalpha1)
760 "paddusb g16Dither, %%mm1 \n\t"
761 "paddusb b16Dither, %%mm0 \n\t"
762 "paddusb b16Dither, %%mm3 \n\t"
764 "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
765 "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
766 "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
768 "psrlw $3, %%mm3 \n\t"
769 "psllw $3, %%mm1 \n\t"
770 "psllw $8, %%mm0 \n\t"
771 "pand g16Mask, %%mm1 \n\t"
772 "pand r16Mask, %%mm0 \n\t"
774 "por %%mm3, %%mm1 \n\t"
775 "por %%mm1, %%mm0 \n\t"
777 MOVNTQ(%%mm0, (%4, %%eax, 2))
779 "addl $4, %%eax \n\t"
780 "cmpl %5, %%eax \n\t"
783 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
784 "m" (yalpha1), "m" (uvalpha1)
789 asm volatile ("\n\t"::: "memory");
791 if(dstbpp==32 || dstbpp==24)
795 // vertical linear interpolation && yuv2rgb in a single step:
796 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
797 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
798 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
799 dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
800 dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
801 dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
809 // vertical linear interpolation && yuv2rgb in a single step:
810 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
811 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
812 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
814 ((uint16_t*)dest)[i] =
815 clip_table16b[(Y + yuvtab_40cf[U]) >>13] |
816 clip_table16g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
817 clip_table16r[(Y + yuvtab_3343[V]) >>13];
824 // vertical linear interpolation && yuv2rgb in a single step:
825 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
826 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
827 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
829 ((uint16_t*)dest)[i] =
830 clip_table15b[(Y + yuvtab_40cf[U]) >>13] |
831 clip_table15g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
832 clip_table15r[(Y + yuvtab_3343[V]) >>13];
846 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
847 "m" (yalpha1), "m" (uvalpha1)
854 "movl %4, %%ebx \n\t"
858 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstw),
859 "m" (yalpha1), "m" (uvalpha1)
867 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
869 "paddusb b16Dither, %%mm2 \n\t"
870 "paddusb b16Dither, %%mm4 \n\t"
871 "paddusb b16Dither, %%mm5 \n\t"
876 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
877 "m" (yalpha1), "m" (uvalpha1)
885 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
887 "paddusb g16Dither, %%mm2 \n\t"
888 "paddusb b16Dither, %%mm4 \n\t"
889 "paddusb b16Dither, %%mm5 \n\t"
894 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
895 "m" (yalpha1), "m" (uvalpha1)
900 asm volatile ("\n\t"::: "memory");
905 for(i=0; i<dstw-1; i+=2){
906 // vertical linear interpolation && yuv2rgb in a single step:
907 int Y1=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
908 int Y2=yuvtab_2568[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19)];
909 int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
910 int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
912 int Cb= yuvtab_40cf[U];
913 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
914 int Cr= yuvtab_3343[V];
916 dest[4*i+0]=clip_table[((Y1 + Cb) >>13)];
917 dest[4*i+1]=clip_table[((Y1 + Cg) >>13)];
918 dest[4*i+2]=clip_table[((Y1 + Cr) >>13)];
920 dest[4*i+4]=clip_table[((Y2 + Cb) >>13)];
921 dest[4*i+5]=clip_table[((Y2 + Cg) >>13)];
922 dest[4*i+6]=clip_table[((Y2 + Cr) >>13)];
928 for(i=0; i<dstw-1; i+=2){
929 // vertical linear interpolation && yuv2rgb in a single step:
930 int Y1=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
931 int Y2=yuvtab_2568[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19)];
932 int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
933 int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
935 int Cb= yuvtab_40cf[U];
936 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
937 int Cr= yuvtab_3343[V];
939 dest[0]=clip_table[((Y1 + Cb) >>13)];
940 dest[1]=clip_table[((Y1 + Cg) >>13)];
941 dest[2]=clip_table[((Y1 + Cr) >>13)];
943 dest[3]=clip_table[((Y2 + Cb) >>13)];
944 dest[4]=clip_table[((Y2 + Cg) >>13)];
945 dest[5]=clip_table[((Y2 + Cr) >>13)];
952 for(i=0; i<dstw-1; i+=2){
953 // vertical linear interpolation && yuv2rgb in a single step:
954 int Y1=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
955 int Y2=yuvtab_2568[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19)];
956 int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
957 int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
959 int Cb= yuvtab_40cf[U];
960 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
961 int Cr= yuvtab_3343[V];
963 ((uint16_t*)dest)[i] =
964 clip_table16b[(Y1 + Cb) >>13] |
965 clip_table16g[(Y1 + Cg) >>13] |
966 clip_table16r[(Y1 + Cr) >>13];
968 ((uint16_t*)dest)[i+1] =
969 clip_table16b[(Y2 + Cb) >>13] |
970 clip_table16g[(Y2 + Cg) >>13] |
971 clip_table16r[(Y2 + Cr) >>13];
977 for(i=0; i<dstw-1; i+=2){
978 // vertical linear interpolation && yuv2rgb in a single step:
979 int Y1=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
980 int Y2=yuvtab_2568[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19)];
981 int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
982 int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
984 int Cb= yuvtab_40cf[U];
985 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
986 int Cr= yuvtab_3343[V];
988 ((uint16_t*)dest)[i] =
989 clip_table15b[(Y1 + Cb) >>13] |
990 clip_table15g[(Y1 + Cg) >>13] |
991 clip_table15r[(Y1 + Cr) >>13];
993 ((uint16_t*)dest)[i+1] =
994 clip_table15b[(Y2 + Cb) >>13] |
995 clip_table15g[(Y2 + Cg) >>13] |
996 clip_table15r[(Y2 + Cr) >>13];
1004 * YV12 to RGB without scaling or interpolating
1006 static inline void yuv2rgb1(uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
1007 uint8_t *dest, int dstw, int yalpha, int uvalpha, int dstbpp)
1009 int uvalpha1=uvalpha^4095;
1011 int yalpha1=yalpha^4095;
1014 if(fullUVIpol || allwaysIpol)
1016 yuv2rgbX(buf0, buf1, uvbuf0, uvbuf1, dest, dstw, yalpha, uvalpha, dstbpp);
1019 if( yalpha > 2048 ) buf0 = buf1;
1022 if( uvalpha < 2048 ) // note this is not correct (shifts chrominance by 0.5 pixels) but its a bit faster
1029 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
1030 "m" (yalpha1), "m" (uvalpha1)
1037 "movl %4, %%ebx \n\t"
1040 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstw),
1041 "m" (yalpha1), "m" (uvalpha1)
1049 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1051 "paddusb b16Dither, %%mm2 \n\t"
1052 "paddusb b16Dither, %%mm4 \n\t"
1053 "paddusb b16Dither, %%mm5 \n\t"
1056 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
1057 "m" (yalpha1), "m" (uvalpha1)
1065 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1067 "paddusb g16Dither, %%mm2 \n\t"
1068 "paddusb b16Dither, %%mm4 \n\t"
1069 "paddusb b16Dither, %%mm5 \n\t"
1073 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
1074 "m" (yalpha1), "m" (uvalpha1)
1086 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
1087 "m" (yalpha1), "m" (uvalpha1)
1094 "movl %4, %%ebx \n\t"
1097 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstw),
1098 "m" (yalpha1), "m" (uvalpha1)
1106 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1108 "paddusb b16Dither, %%mm2 \n\t"
1109 "paddusb b16Dither, %%mm4 \n\t"
1110 "paddusb b16Dither, %%mm5 \n\t"
1113 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
1114 "m" (yalpha1), "m" (uvalpha1)
1122 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1124 "paddusb g16Dither, %%mm2 \n\t"
1125 "paddusb b16Dither, %%mm4 \n\t"
1126 "paddusb b16Dither, %%mm5 \n\t"
1130 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
1131 "m" (yalpha1), "m" (uvalpha1)
1137 //FIXME write 2 versions (for even & odd lines)
1138 asm volatile ("\n\t"::: "memory");
1143 for(i=0; i<dstw-1; i+=2){
1144 // vertical linear interpolation && yuv2rgb in a single step:
1145 int Y1=yuvtab_2568[buf0[i]>>7];
1146 int Y2=yuvtab_2568[buf0[i+1]>>7];
1147 int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
1148 int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
1150 int Cb= yuvtab_40cf[U];
1151 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
1152 int Cr= yuvtab_3343[V];
1154 dest[4*i+0]=clip_table[((Y1 + Cb) >>13)];
1155 dest[4*i+1]=clip_table[((Y1 + Cg) >>13)];
1156 dest[4*i+2]=clip_table[((Y1 + Cr) >>13)];
1158 dest[4*i+4]=clip_table[((Y2 + Cb) >>13)];
1159 dest[4*i+5]=clip_table[((Y2 + Cg) >>13)];
1160 dest[4*i+6]=clip_table[((Y2 + Cr) >>13)];
1166 for(i=0; i<dstw-1; i+=2){
1167 // vertical linear interpolation && yuv2rgb in a single step:
1168 int Y1=yuvtab_2568[buf0[i]>>7];
1169 int Y2=yuvtab_2568[buf0[i+1]>>7];
1170 int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
1171 int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
1173 int Cb= yuvtab_40cf[U];
1174 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
1175 int Cr= yuvtab_3343[V];
1177 dest[0]=clip_table[((Y1 + Cb) >>13)];
1178 dest[1]=clip_table[((Y1 + Cg) >>13)];
1179 dest[2]=clip_table[((Y1 + Cr) >>13)];
1181 dest[3]=clip_table[((Y2 + Cb) >>13)];
1182 dest[4]=clip_table[((Y2 + Cg) >>13)];
1183 dest[5]=clip_table[((Y2 + Cr) >>13)];
1190 for(i=0; i<dstw-1; i+=2){
1191 // vertical linear interpolation && yuv2rgb in a single step:
1192 int Y1=yuvtab_2568[buf0[i]>>7];
1193 int Y2=yuvtab_2568[buf0[i+1]>>7];
1194 int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
1195 int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
1197 int Cb= yuvtab_40cf[U];
1198 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
1199 int Cr= yuvtab_3343[V];
1201 ((uint16_t*)dest)[i] =
1202 clip_table16b[(Y1 + Cb) >>13] |
1203 clip_table16g[(Y1 + Cg) >>13] |
1204 clip_table16r[(Y1 + Cr) >>13];
1206 ((uint16_t*)dest)[i+1] =
1207 clip_table16b[(Y2 + Cb) >>13] |
1208 clip_table16g[(Y2 + Cg) >>13] |
1209 clip_table16r[(Y2 + Cr) >>13];
1215 for(i=0; i<dstw-1; i+=2){
1216 // vertical linear interpolation && yuv2rgb in a single step:
1217 int Y1=yuvtab_2568[buf0[i]>>7];
1218 int Y2=yuvtab_2568[buf0[i+1]>>7];
1219 int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
1220 int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
1222 int Cb= yuvtab_40cf[U];
1223 int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
1224 int Cr= yuvtab_3343[V];
1226 ((uint16_t*)dest)[i] =
1227 clip_table15b[(Y1 + Cb) >>13] |
1228 clip_table15g[(Y1 + Cg) >>13] |
1229 clip_table15r[(Y1 + Cr) >>13];
1231 ((uint16_t*)dest)[i+1] =
1232 clip_table15b[(Y2 + Cb) >>13] |
1233 clip_table15g[(Y2 + Cg) >>13] |
1234 clip_table15r[(Y2 + Cr) >>13];
1241 static inline void hyscale(uint16_t *dst, int dstWidth, uint8_t *src, int srcWidth, int xInc)
1243 // *** horizontal scale Y line to temp buffer
1250 "pxor %%mm7, %%mm7 \n\t"
1251 "pxor %%mm2, %%mm2 \n\t" // 2*xalpha
1252 "movd %5, %%mm6 \n\t" // xInc&0xFFFF
1253 "punpcklwd %%mm6, %%mm6 \n\t"
1254 "punpcklwd %%mm6, %%mm6 \n\t"
1255 "movq %%mm6, %%mm2 \n\t"
1256 "psllq $16, %%mm2 \n\t"
1257 "paddw %%mm6, %%mm2 \n\t"
1258 "psllq $16, %%mm2 \n\t"
1259 "paddw %%mm6, %%mm2 \n\t"
1260 "psllq $16, %%mm2 \n\t" //0,t,2t,3t t=xInc&0xFF
1261 "movq %%mm2, temp0 \n\t"
1262 "movd %4, %%mm6 \n\t" //(xInc*4)&0xFFFF
1263 "punpcklwd %%mm6, %%mm6 \n\t"
1264 "punpcklwd %%mm6, %%mm6 \n\t"
1265 "xorl %%eax, %%eax \n\t" // i
1266 "movl %0, %%esi \n\t" // src
1267 "movl %1, %%edi \n\t" // buf1
1268 "movl %3, %%edx \n\t" // (xInc*4)>>16
1269 "xorl %%ecx, %%ecx \n\t"
1270 "xorl %%ebx, %%ebx \n\t"
1271 "movw %4, %%bx \n\t" // (xInc*4)&0xFFFF
1273 #define FUNNY_Y_CODE \
1274 PREFETCH" 1024(%%esi) \n\t"\
1275 PREFETCH" 1056(%%esi) \n\t"\
1276 PREFETCH" 1088(%%esi) \n\t"\
1277 "call funnyYCode \n\t"\
1278 "movq temp0, %%mm2 \n\t"\
1279 "xorl %%ecx, %%ecx \n\t"
1290 :: "m" (src), "m" (dst), "m" (dstWidth), "m" ((xInc*4)>>16),
1291 "m" ((xInc*4)&0xFFFF), "m" (xInc&0xFFFF)
1292 : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
1294 for(i=dstWidth-1; (i*xInc)>>16 >=srcWidth-1; i--) dst[i] = src[srcWidth-1]*128;
1299 //NO MMX just normal asm ...
1301 "xorl %%eax, %%eax \n\t" // i
1302 "xorl %%ebx, %%ebx \n\t" // xx
1303 "xorl %%ecx, %%ecx \n\t" // 2*xalpha
1305 "movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
1306 "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
1307 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
1308 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
1309 "shll $16, %%edi \n\t"
1310 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
1311 "movl %1, %%edi \n\t"
1312 "shrl $9, %%esi \n\t"
1313 "movw %%si, (%%edi, %%eax, 2) \n\t"
1314 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
1315 "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
1317 "movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
1318 "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
1319 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
1320 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
1321 "shll $16, %%edi \n\t"
1322 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
1323 "movl %1, %%edi \n\t"
1324 "shrl $9, %%esi \n\t"
1325 "movw %%si, 2(%%edi, %%eax, 2) \n\t"
1326 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
1327 "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
1330 "addl $2, %%eax \n\t"
1331 "cmpl %2, %%eax \n\t"
1335 :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF)
1336 : "%eax", "%ebx", "%ecx", "%edi", "%esi"
1339 } //if MMX2 cant be used
1343 unsigned int xpos=0;
1344 for(i=0;i<dstWidth;i++)
1346 register unsigned int xx=xpos>>16;
1347 register unsigned int xalpha=(xpos&0xFFFF)>>9;
1348 dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
1354 inline static void hcscale(uint16_t *dst, int dstWidth,
1355 uint8_t *src1, uint8_t *src2, int srcWidth, int xInc)
1363 "pxor %%mm7, %%mm7 \n\t"
1364 "pxor %%mm2, %%mm2 \n\t" // 2*xalpha
1365 "movd %5, %%mm6 \n\t" // xInc&0xFFFF
1366 "punpcklwd %%mm6, %%mm6 \n\t"
1367 "punpcklwd %%mm6, %%mm6 \n\t"
1368 "movq %%mm6, %%mm2 \n\t"
1369 "psllq $16, %%mm2 \n\t"
1370 "paddw %%mm6, %%mm2 \n\t"
1371 "psllq $16, %%mm2 \n\t"
1372 "paddw %%mm6, %%mm2 \n\t"
1373 "psllq $16, %%mm2 \n\t" //0,t,2t,3t t=xInc&0xFFFF
1374 "movq %%mm2, temp0 \n\t"
1375 "movd %4, %%mm6 \n\t" //(xInc*4)&0xFFFF
1376 "punpcklwd %%mm6, %%mm6 \n\t"
1377 "punpcklwd %%mm6, %%mm6 \n\t"
1378 "xorl %%eax, %%eax \n\t" // i
1379 "movl %0, %%esi \n\t" // src
1380 "movl %1, %%edi \n\t" // buf1
1381 "movl %3, %%edx \n\t" // (xInc*4)>>16
1382 "xorl %%ecx, %%ecx \n\t"
1383 "xorl %%ebx, %%ebx \n\t"
1384 "movw %4, %%bx \n\t" // (xInc*4)&0xFFFF
1386 #define FUNNYUVCODE \
1387 PREFETCH" 1024(%%esi) \n\t"\
1388 PREFETCH" 1056(%%esi) \n\t"\
1389 PREFETCH" 1088(%%esi) \n\t"\
1390 "call funnyUVCode \n\t"\
1391 "movq temp0, %%mm2 \n\t"\
1392 "xorl %%ecx, %%ecx \n\t"
1403 "xorl %%eax, %%eax \n\t" // i
1404 "movl %6, %%esi \n\t" // src
1405 "movl %1, %%edi \n\t" // buf1
1406 "addl $4096, %%edi \n\t"
1418 :: "m" (src1), "m" (dst), "m" (dstWidth), "m" ((xInc*4)>>16),
1419 "m" ((xInc*4)&0xFFFF), "m" (xInc&0xFFFF), "m" (src2)
1420 : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
1422 for(i=dstWidth-1; (i*xInc)>>16 >=srcWidth/2-1; i--)
1424 dst[i] = src1[srcWidth/2-1]*128;
1425 dst[i+2048] = src2[srcWidth/2-1]*128;
1432 "xorl %%eax, %%eax \n\t" // i
1433 "xorl %%ebx, %%ebx \n\t" // xx
1434 "xorl %%ecx, %%ecx \n\t" // 2*xalpha
1436 "movl %0, %%esi \n\t"
1437 "movzbl (%%esi, %%ebx), %%edi \n\t" //src[xx]
1438 "movzbl 1(%%esi, %%ebx), %%esi \n\t" //src[xx+1]
1439 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
1440 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
1441 "shll $16, %%edi \n\t"
1442 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
1443 "movl %1, %%edi \n\t"
1444 "shrl $9, %%esi \n\t"
1445 "movw %%si, (%%edi, %%eax, 2) \n\t"
1447 "movzbl (%5, %%ebx), %%edi \n\t" //src[xx]
1448 "movzbl 1(%5, %%ebx), %%esi \n\t" //src[xx+1]
1449 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
1450 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
1451 "shll $16, %%edi \n\t"
1452 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
1453 "movl %1, %%edi \n\t"
1454 "shrl $9, %%esi \n\t"
1455 "movw %%si, 4096(%%edi, %%eax, 2)\n\t"
1457 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
1458 "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
1459 "addl $1, %%eax \n\t"
1460 "cmpl %2, %%eax \n\t"
1463 :: "m" (src1), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF),
1465 : "%eax", "%ebx", "%ecx", "%edi", "%esi"
1468 } //if MMX2 cant be used
1472 unsigned int xpos=0;
1473 for(i=0;i<dstWidth;i++)
1475 register unsigned int xx=xpos>>16;
1476 register unsigned int xalpha=(xpos&0xFFFF)>>9;
1477 dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
1478 dst[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
1480 dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
1481 dst[i+2048]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
1489 // *** bilinear scaling and yuv->rgb or yuv->yuv conversion of yv12 slices:
1490 // *** Note: it's called multiple times while decoding a frame, first time y==0
1491 // *** Designed to upscale, but may work for downscale too.
1492 // s_xinc = (src_width << 16) / dst_width
1493 // s_yinc = (src_height << 16) / dst_height
1494 void SwScale_YV12slice(unsigned char* srcptr[],int stride[], int y, int h,
1495 uint8_t* dstptr[], int dststride, int dstw, int dstbpp,
1496 unsigned int s_xinc,unsigned int s_yinc){
1499 //static int s_yinc=(vo_dga_src_height<<16)/vo_dga_vp_height;
1500 //static int s_xinc=(vo_dga_src_width<<8)/vo_dga_vp_width;
1502 unsigned int s_xinc2;
1504 static int s_srcypos; // points to the dst Pixels center in the source (0 is the center of pixel 0,0 in src)
1507 // last horzontally interpolated lines, used to avoid unnecessary calculations
1508 static int s_last_ypos;
1509 static int s_last_y1pos;
1512 // used to detect a horizontal size change
1513 static int old_dstw= -1;
1514 static int old_s_xinc= -1;
1521 if(((dstw + 7)&(~7)) >= dststride) dstw&= ~7;
1523 srcWidth= (dstw*s_xinc + 0x8000)>>16;
1524 dstUVw= fullUVIpol ? dstw : dstw/2;
1527 canMMX2BeUsed= (s_xinc <= 0x10000 && (dstw&31)==0 && (srcWidth&15)==0) ? 1 : 0;
1530 // match pixel 0 of the src to pixel 0 of dst and match pixel n-2 of src to pixel n-2 of dst
1531 // n-2 is the last chrominance sample available
1532 // FIXME this is not perfect, but noone shuld notice the difference, the more correct variant
1533 // would be like the vertical one, but that would require some special code for the
1534 // first and last pixel
1535 if(canMMX2BeUsed) s_xinc+= 20;
1536 else s_xinc = ((srcWidth-2)<<16)/(dstw-2) - 20;
1538 if(fullUVIpol && !(dstbpp==12)) s_xinc2= s_xinc>>1;
1539 else s_xinc2= s_xinc;
1540 // force calculation of the horizontal interpolation of the first line
1543 // printf("dstw %d, srcw %d, mmx2 %d\n", dstw, srcWidth, canMMX2BeUsed);
1546 s_srcypos= s_yinc/2 - 0x8000;
1549 // clean the buffers so that no green stuff is drawen if the width is not sane (%8=0)
1550 for(i=dstw-2; i<dstw+20; i++)
1552 pix_buf_uv[0][i] = pix_buf_uv[1][i]
1553 = pix_buf_uv[0][2048+i] = pix_buf_uv[1][2048+i] = 128*128;
1554 pix_buf_uv[0][i/2] = pix_buf_uv[1][i/2]
1555 = pix_buf_uv[0][2048+i/2] = pix_buf_uv[1][2048+i/2] = 128*128;
1556 pix_buf_y[0][i]= pix_buf_y[1][i]= 0;
1560 // cant downscale !!!
1561 if((old_s_xinc != s_xinc || old_dstw!=dstw) && canMMX2BeUsed)
1573 // create an optimized horizontal scaling routine
1581 "movq (%%esi), %%mm0 \n\t" //FIXME Alignment
1582 "movq %%mm0, %%mm1 \n\t"
1583 "psrlq $8, %%mm0 \n\t"
1584 "punpcklbw %%mm7, %%mm1 \n\t"
1585 "movq %%mm2, %%mm3 \n\t"
1586 "punpcklbw %%mm7, %%mm0 \n\t"
1587 "addw %%bx, %%cx \n\t" //2*xalpha += (4*s_xinc)&0xFFFF
1588 "pshufw $0xFF, %%mm1, %%mm1 \n\t"
1590 "adcl %%edx, %%esi \n\t" //xx+= (4*s_xinc)>>16 + carry
1591 "pshufw $0xFF, %%mm0, %%mm0 \n\t"
1593 "psrlw $9, %%mm3 \n\t"
1594 "psubw %%mm1, %%mm0 \n\t"
1595 "pmullw %%mm3, %%mm0 \n\t"
1596 "paddw %%mm6, %%mm2 \n\t" // 2*alpha += xpos&0xFFFF
1597 "psllw $7, %%mm1 \n\t"
1598 "paddw %%mm1, %%mm0 \n\t"
1600 "movq %%mm0, (%%edi, %%eax) \n\t"
1602 "addl $8, %%eax \n\t"
1615 :"=r" (fragment), "=r" (imm8OfPShufW1), "=r" (imm8OfPShufW2),
1616 "=r" (fragmentLength)
1619 xpos= 0; //s_xinc/2 - 0x8000; // difference between pixel centers
1621 /* choose xinc so that all 8 parts fit exactly
1622 Note: we cannot use just 1 part because it would not fit in the code cache */
1623 // s_xinc2_diff= -((((s_xinc2*(dstw/8))&0xFFFF))/(dstw/8))-10;
1624 // s_xinc_diff= -((((s_xinc*(dstw/8))&0xFFFF))/(dstw/8));
1626 // s_xinc2_diff+= ((0x10000/(dstw/8)));
1628 // s_xinc_diff= s_xinc2_diff*2;
1630 // s_xinc2+= s_xinc2_diff;
1631 // s_xinc+= s_xinc_diff;
1633 // old_s_xinc= s_xinc;
1635 for(i=0; i<dstw/8; i++)
1642 int b=((xpos+s_xinc)>>16) - xx;
1643 int c=((xpos+s_xinc*2)>>16) - xx;
1644 int d=((xpos+s_xinc*3)>>16) - xx;
1646 memcpy(funnyYCode + fragmentLength*i/4, fragment, fragmentLength);
1648 funnyYCode[fragmentLength*i/4 + imm8OfPShufW1]=
1649 funnyYCode[fragmentLength*i/4 + imm8OfPShufW2]=
1650 a | (b<<2) | (c<<4) | (d<<6);
1652 // if we dont need to read 8 bytes than dont :), reduces the chance of
1653 // crossing a cache line
1654 if(d<3) funnyYCode[fragmentLength*i/4 + 1]= 0x6E;
1656 funnyYCode[fragmentLength*(i+4)/4]= RET;
1661 xpos= 0; //s_xinc2/2 - 0x10000; // difference between centers of chrom samples
1662 for(i=0; i<dstUVw/8; i++)
1669 int b=((xpos+s_xinc2)>>16) - xx;
1670 int c=((xpos+s_xinc2*2)>>16) - xx;
1671 int d=((xpos+s_xinc2*3)>>16) - xx;
1673 memcpy(funnyUVCode + fragmentLength*i/4, fragment, fragmentLength);
1675 funnyUVCode[fragmentLength*i/4 + imm8OfPShufW1]=
1676 funnyUVCode[fragmentLength*i/4 + imm8OfPShufW2]=
1677 a | (b<<2) | (c<<4) | (d<<6);
1679 // if we dont need to read 8 bytes than dont :), reduces the chance of
1680 // crossing a cache line
1681 if(d<3) funnyUVCode[fragmentLength*i/4 + 1]= 0x6E;
1683 funnyUVCode[fragmentLength*(i+4)/4]= RET;
1687 // funnyCode[0]= RET;
1694 unsigned char *dest =dstptr[0]+dststride*s_ypos;
1695 unsigned char *uDest=dstptr[1]+(dststride>>1)*(s_ypos>>1);
1696 unsigned char *vDest=dstptr[2]+(dststride>>1)*(s_ypos>>1);
1698 int y0=(s_srcypos + 0xFFFF)>>16; // first luminance source line number below the dst line
1699 // points to the dst Pixels center in the source (0 is the center of pixel 0,0 in src)
1700 int srcuvpos= dstbpp==12 ? s_srcypos + s_yinc/2 - 0x8000 :
1702 int y1=(srcuvpos + 0x1FFFF)>>17; // first chrominance source line number below the dst line
1703 int yalpha=((s_srcypos-1)&0xFFFF)>>4;
1704 int uvalpha=((srcuvpos-1)&0x1FFFF)>>5;
1705 uint16_t *buf0=pix_buf_y[y0&1]; // top line of the interpolated slice
1706 uint16_t *buf1=pix_buf_y[((y0+1)&1)]; // bottom line of the interpolated slice
1707 uint16_t *uvbuf0=pix_buf_uv[y1&1]; // top line of the interpolated slice
1708 uint16_t *uvbuf1=pix_buf_uv[(y1+1)&1]; // bottom line of the interpolated slice
1710 if(y0>=y+h) break; // FIXME wrong, skips last lines, but they are dupliactes anyway
1712 if((y0&1) && dstbpp==12) uvalpha=-1; // there is no alpha if there is no line
1714 s_ypos++; s_srcypos+=s_yinc;
1716 //only interpolate the src line horizontally if we didnt do it allready
1720 // skip if first line has been horiz scaled alleady
1721 if(s_last_ypos != y0-1)
1723 // check if first line is before any available src lines
1724 if(y0-1 < y) src=srcptr[0]+(0 )*stride[0];
1725 else src=srcptr[0]+(y0-y-1)*stride[0];
1727 hyscale(buf0, dstw, src, srcWidth, s_xinc);
1729 // check if second line is after any available src lines
1730 if(y0-y >= h) src=srcptr[0]+(h-1)*stride[0];
1731 else src=srcptr[0]+(y0-y)*stride[0];
1733 // the min() is required to avoid reuseing lines which where not available
1734 s_last_ypos= MIN(y0, y+h-1);
1735 hyscale(buf1, dstw, src, srcWidth, s_xinc);
1737 // printf("%d %d %d %d\n", y, y1, s_last_y1pos, h);
1738 // *** horizontal scale U and V lines to temp buffer
1739 if(s_last_y1pos!=y1)
1741 uint8_t *src1, *src2;
1742 // skip if first line has been horiz scaled alleady
1743 if(s_last_y1pos != y1-1)
1745 // check if first line is before any available src lines
1748 src1= srcptr[1]+(0)*stride[1];
1749 src2= srcptr[2]+(0)*stride[2];
1751 src1= srcptr[1]+(y1-y/2-1)*stride[1];
1752 src2= srcptr[2]+(y1-y/2-1)*stride[2];
1754 hcscale(uvbuf0, dstUVw, src1, src2, srcWidth, s_xinc2);
1757 // check if second line is after any available src lines
1760 src1= srcptr[1]+(h/2-1)*stride[1];
1761 src2= srcptr[2]+(h/2-1)*stride[2];
1763 src1= srcptr[1]+(y1-y/2)*stride[1];
1764 src2= srcptr[2]+(y1-y/2)*stride[2];
1766 hcscale(uvbuf1, dstUVw, src1, src2, srcWidth, s_xinc2);
1768 // the min() is required to avoid reuseing lines which where not available
1769 s_last_y1pos= MIN(y1, y/2+h/2-1);
1772 if(dstbpp==12) //YV12
1773 yuv2yuv(buf0, buf1, uvbuf0, uvbuf1, dest, uDest, vDest, dstw, yalpha, uvalpha);
1774 else if(ABS(s_yinc - 0x10000) < 10)
1775 yuv2rgb1(buf0, buf1, uvbuf0, uvbuf1, dest, dstw, yalpha, uvalpha, dstbpp);
1777 yuv2rgbX(buf0, buf1, uvbuf0, uvbuf1, dest, dstw, yalpha, uvalpha, dstbpp);
1780 b16Dither= b16Dither1;
1781 b16Dither1= b16Dither2;
1782 b16Dither2= b16Dither;
1784 g16Dither= g16Dither1;
1785 g16Dither1= g16Dither2;
1786 g16Dither2= g16Dither;
1791 __asm __volatile(SFENCE:::"memory");
1792 __asm __volatile(EMMS:::"memory");
1797 void SwScale_Init(){
1798 // generating tables:
1802 clip_table[i+256]=i;
1803 clip_table[i+512]=255;
1804 yuvtab_2568[i]=(0x2568*(i-16))+(256<<13);
1805 yuvtab_3343[i]=0x3343*(i-128);
1806 yuvtab_0c92[i]=-0x0c92*(i-128);
1807 yuvtab_1a1e[i]=-0x1a1e*(i-128);
1808 yuvtab_40cf[i]=0x40cf*(i-128);
1811 for(i=0; i<768; i++)
1813 int v= clip_table[i];
1814 clip_table16b[i]= v>>3;
1815 clip_table16g[i]= (v<<3)&0x07E0;
1816 clip_table16r[i]= (v<<8)&0xF800;
1817 clip_table15b[i]= v>>3;
1818 clip_table15g[i]= (v<<2)&0x03E0;
1819 clip_table15r[i]= (v<<7)&0x7C00;