6b6b4bf65862fa87ce64eb0225e5ddab17a8ce35
[ffmpeg.git] / libswscale / swscale_template.c
1 /*
2  * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19  *
20  * the C code (not assembly, mmx, ...) of the swscaler which has been written
21  * by Michael Niedermayer can be used under the LGPL license too
22  */
23
24 #undef REAL_MOVNTQ
25 #undef MOVNTQ
26 #undef PAVGB
27 #undef PREFETCH
28 #undef PREFETCHW
29 #undef EMMS
30 #undef SFENCE
31
32 #ifdef HAVE_3DNOW
33 /* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */
34 #define EMMS     "femms"
35 #else
36 #define EMMS     "emms"
37 #endif
38
39 #ifdef HAVE_3DNOW
40 #define PREFETCH  "prefetch"
41 #define PREFETCHW "prefetchw"
42 #elif defined ( HAVE_MMX2 )
43 #define PREFETCH "prefetchnta"
44 #define PREFETCHW "prefetcht0"
45 #else
46 #define PREFETCH "/nop"
47 #define PREFETCHW "/nop"
48 #endif
49
50 #ifdef HAVE_MMX2
51 #define SFENCE "sfence"
52 #else
53 #define SFENCE "/nop"
54 #endif
55
56 #ifdef HAVE_MMX2
57 #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
58 #elif defined (HAVE_3DNOW)
59 #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
60 #endif
61
62 #ifdef HAVE_MMX2
63 #define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
64 #else
65 #define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
66 #endif
67 #define MOVNTQ(a,b)  REAL_MOVNTQ(a,b)
68
69 #ifdef HAVE_ALTIVEC
70 #include "swscale_altivec_template.c"
71 #endif
72
73 #define YSCALEYUV2YV12X(x, offset, dest, width) \
74                 asm volatile(\
75                         "xor %%"REG_a", %%"REG_a"       \n\t"\
76                         "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
77                         "movq %%mm3, %%mm4              \n\t"\
78                         "lea " offset "(%0), %%"REG_d"  \n\t"\
79                         "mov (%%"REG_d"), %%"REG_S"     \n\t"\
80                         ASMALIGN(4) /* FIXME Unroll? */\
81                         "1:                             \n\t"\
82                         "movq 8(%%"REG_d"), %%mm0       \n\t" /* filterCoeff */\
83                         "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm2\n\t" /* srcData */\
84                         "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm5\n\t" /* srcData */\
85                         "add $16, %%"REG_d"             \n\t"\
86                         "mov (%%"REG_d"), %%"REG_S"     \n\t"\
87                         "test %%"REG_S", %%"REG_S"      \n\t"\
88                         "pmulhw %%mm0, %%mm2            \n\t"\
89                         "pmulhw %%mm0, %%mm5            \n\t"\
90                         "paddw %%mm2, %%mm3             \n\t"\
91                         "paddw %%mm5, %%mm4             \n\t"\
92                         " jnz 1b                        \n\t"\
93                         "psraw $3, %%mm3                \n\t"\
94                         "psraw $3, %%mm4                \n\t"\
95                         "packuswb %%mm4, %%mm3          \n\t"\
96                         MOVNTQ(%%mm3, (%1, %%REGa))\
97                         "add $8, %%"REG_a"              \n\t"\
98                         "cmp %2, %%"REG_a"              \n\t"\
99                         "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
100                         "movq %%mm3, %%mm4              \n\t"\
101                         "lea " offset "(%0), %%"REG_d"  \n\t"\
102                         "mov (%%"REG_d"), %%"REG_S"     \n\t"\
103                         "jb 1b                          \n\t"\
104                         :: "r" (&c->redDither),\
105                         "r" (dest), "p" (width)\
106                         : "%"REG_a, "%"REG_d, "%"REG_S\
107                 );
108
109 #define YSCALEYUV2YV12X_ACCURATE(x, offset, dest, width) \
110                 asm volatile(\
111                         "lea " offset "(%0), %%"REG_d"  \n\t"\
112                         "xor %%"REG_a", %%"REG_a"       \n\t"\
113                         "pxor %%mm4, %%mm4              \n\t"\
114                         "pxor %%mm5, %%mm5              \n\t"\
115                         "pxor %%mm6, %%mm6              \n\t"\
116                         "pxor %%mm7, %%mm7              \n\t"\
117                         "mov (%%"REG_d"), %%"REG_S"     \n\t"\
118                         ASMALIGN(4) \
119                         "1:                             \n\t"\
120                         "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm0\n\t" /* srcData */\
121                         "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm2\n\t" /* srcData */\
122                         "mov 4(%%"REG_d"), %%"REG_S"    \n\t"\
123                         "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm1\n\t" /* srcData */\
124                         "movq %%mm0, %%mm3              \n\t"\
125                         "punpcklwd %%mm1, %%mm0        \n\t"\
126                         "punpckhwd %%mm1, %%mm3        \n\t"\
127                         "movq 8(%%"REG_d"), %%mm1       \n\t" /* filterCoeff */\
128                         "pmaddwd %%mm1, %%mm0           \n\t"\
129                         "pmaddwd %%mm1, %%mm3           \n\t"\
130                         "paddd %%mm0, %%mm4             \n\t"\
131                         "paddd %%mm3, %%mm5             \n\t"\
132                         "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm3\n\t" /* srcData */\
133                         "mov 16(%%"REG_d"), %%"REG_S"   \n\t"\
134                         "add $16, %%"REG_d"             \n\t"\
135                         "test %%"REG_S", %%"REG_S"      \n\t"\
136                         "movq %%mm2, %%mm0              \n\t"\
137                         "punpcklwd %%mm3, %%mm2        \n\t"\
138                         "punpckhwd %%mm3, %%mm0        \n\t"\
139                         "pmaddwd %%mm1, %%mm2           \n\t"\
140                         "pmaddwd %%mm1, %%mm0           \n\t"\
141                         "paddd %%mm2, %%mm6             \n\t"\
142                         "paddd %%mm0, %%mm7             \n\t"\
143                         " jnz 1b                        \n\t"\
144                         "psrad $16, %%mm4               \n\t"\
145                         "psrad $16, %%mm5               \n\t"\
146                         "psrad $16, %%mm6               \n\t"\
147                         "psrad $16, %%mm7               \n\t"\
148                         "movq "VROUNDER_OFFSET"(%0), %%mm0\n\t"\
149                         "packssdw %%mm5, %%mm4          \n\t"\
150                         "packssdw %%mm7, %%mm6          \n\t"\
151                         "paddw %%mm0, %%mm4             \n\t"\
152                         "paddw %%mm0, %%mm6             \n\t"\
153                         "psraw $3, %%mm4                \n\t"\
154                         "psraw $3, %%mm6                \n\t"\
155                         "packuswb %%mm6, %%mm4          \n\t"\
156                         MOVNTQ(%%mm4, (%1, %%REGa))\
157                         "add $8, %%"REG_a"              \n\t"\
158                         "cmp %2, %%"REG_a"              \n\t"\
159                         "lea " offset "(%0), %%"REG_d"  \n\t"\
160                         "pxor %%mm4, %%mm4              \n\t"\
161                         "pxor %%mm5, %%mm5              \n\t"\
162                         "pxor %%mm6, %%mm6              \n\t"\
163                         "pxor %%mm7, %%mm7              \n\t"\
164                         "mov (%%"REG_d"), %%"REG_S"     \n\t"\
165                         "jb 1b                          \n\t"\
166                         :: "r" (&c->redDither),\
167                         "r" (dest), "p" (width)\
168                         : "%"REG_a, "%"REG_d, "%"REG_S\
169                 );
170
171 #define YSCALEYUV2YV121 \
172                         "mov %2, %%"REG_a"              \n\t"\
173                         ASMALIGN(4) /* FIXME Unroll? */\
174                         "1:                             \n\t"\
175                         "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
176                         "movq 8(%0, %%"REG_a", 2), %%mm1\n\t"\
177                         "psraw $7, %%mm0                \n\t"\
178                         "psraw $7, %%mm1                \n\t"\
179                         "packuswb %%mm1, %%mm0          \n\t"\
180                         MOVNTQ(%%mm0, (%1, %%REGa))\
181                         "add $8, %%"REG_a"              \n\t"\
182                         "jnc 1b                         \n\t"
183
184 /*
185                         :: "m" (-lumFilterSize), "m" (-chrFilterSize),
186                            "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
187                            "r" (dest), "m" (dstW),
188                            "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
189                         : "%eax", "%ebx", "%ecx", "%edx", "%esi"
190 */
191 #define YSCALEYUV2PACKEDX \
192         asm volatile(\
193                 "xor %%"REG_a", %%"REG_a"       \n\t"\
194                 ASMALIGN(4)\
195                 "nop                            \n\t"\
196                 "1:                             \n\t"\
197                 "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
198                 "mov (%%"REG_d"), %%"REG_S"     \n\t"\
199                 "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
200                 "movq %%mm3, %%mm4              \n\t"\
201                 ASMALIGN(4)\
202                 "2:                             \n\t"\
203                 "movq 8(%%"REG_d"), %%mm0       \n\t" /* filterCoeff */\
204                 "movq (%%"REG_S", %%"REG_a"), %%mm2     \n\t" /* UsrcData */\
205                 "movq 4096(%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\
206                 "add $16, %%"REG_d"             \n\t"\
207                 "mov (%%"REG_d"), %%"REG_S"     \n\t"\
208                 "pmulhw %%mm0, %%mm2            \n\t"\
209                 "pmulhw %%mm0, %%mm5            \n\t"\
210                 "paddw %%mm2, %%mm3             \n\t"\
211                 "paddw %%mm5, %%mm4             \n\t"\
212                 "test %%"REG_S", %%"REG_S"      \n\t"\
213                 " jnz 2b                        \n\t"\
214 \
215                 "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
216                 "mov (%%"REG_d"), %%"REG_S"     \n\t"\
217                 "movq "VROUNDER_OFFSET"(%0), %%mm1\n\t"\
218                 "movq %%mm1, %%mm7              \n\t"\
219                 ASMALIGN(4)\
220                 "2:                             \n\t"\
221                 "movq 8(%%"REG_d"), %%mm0       \n\t" /* filterCoeff */\
222                 "movq (%%"REG_S", %%"REG_a", 2), %%mm2  \n\t" /* Y1srcData */\
223                 "movq 8(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* Y2srcData */\
224                 "add $16, %%"REG_d"             \n\t"\
225                 "mov (%%"REG_d"), %%"REG_S"     \n\t"\
226                 "pmulhw %%mm0, %%mm2            \n\t"\
227                 "pmulhw %%mm0, %%mm5            \n\t"\
228                 "paddw %%mm2, %%mm1             \n\t"\
229                 "paddw %%mm5, %%mm7             \n\t"\
230                 "test %%"REG_S", %%"REG_S"      \n\t"\
231                 " jnz 2b                        \n\t"\
232
233 #define YSCALEYUV2PACKEDX_END\
234         :: "r" (&c->redDither), \
235             "m" (dummy), "m" (dummy), "m" (dummy),\
236             "r" (dest), "m" (dstW)\
237         : "%"REG_a, "%"REG_d, "%"REG_S\
238         );
239
240 #define YSCALEYUV2PACKEDX_ACCURATE \
241         asm volatile(\
242                 "xor %%"REG_a", %%"REG_a"       \n\t"\
243                 ASMALIGN(4)\
244                 "nop                            \n\t"\
245                 "1:                             \n\t"\
246                 "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
247                 "mov (%%"REG_d"), %%"REG_S"     \n\t"\
248                 "pxor %%mm4, %%mm4              \n\t"\
249                 "pxor %%mm5, %%mm5              \n\t"\
250                 "pxor %%mm6, %%mm6              \n\t"\
251                 "pxor %%mm7, %%mm7              \n\t"\
252                 ASMALIGN(4)\
253                 "2:                             \n\t"\
254                 "movq (%%"REG_S", %%"REG_a"), %%mm0     \n\t" /* UsrcData */\
255                 "movq 4096(%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* VsrcData */\
256                 "mov 4(%%"REG_d"), %%"REG_S"    \n\t"\
257                 "movq (%%"REG_S", %%"REG_a"), %%mm1     \n\t" /* UsrcData */\
258                 "movq %%mm0, %%mm3              \n\t"\
259                 "punpcklwd %%mm1, %%mm0        \n\t"\
260                 "punpckhwd %%mm1, %%mm3        \n\t"\
261                 "movq 8(%%"REG_d"), %%mm1       \n\t" /* filterCoeff */\
262                 "pmaddwd %%mm1, %%mm0           \n\t"\
263                 "pmaddwd %%mm1, %%mm3           \n\t"\
264                 "paddd %%mm0, %%mm4             \n\t"\
265                 "paddd %%mm3, %%mm5             \n\t"\
266                 "movq 4096(%%"REG_S", %%"REG_a"), %%mm3 \n\t" /* VsrcData */\
267                 "mov 16(%%"REG_d"), %%"REG_S"   \n\t"\
268                 "add $16, %%"REG_d"             \n\t"\
269                 "test %%"REG_S", %%"REG_S"      \n\t"\
270                 "movq %%mm2, %%mm0              \n\t"\
271                 "punpcklwd %%mm3, %%mm2        \n\t"\
272                 "punpckhwd %%mm3, %%mm0        \n\t"\
273                 "pmaddwd %%mm1, %%mm2           \n\t"\
274                 "pmaddwd %%mm1, %%mm0           \n\t"\
275                 "paddd %%mm2, %%mm6             \n\t"\
276                 "paddd %%mm0, %%mm7             \n\t"\
277                 " jnz 2b                        \n\t"\
278                 "psrad $16, %%mm4               \n\t"\
279                 "psrad $16, %%mm5               \n\t"\
280                 "psrad $16, %%mm6               \n\t"\
281                 "psrad $16, %%mm7               \n\t"\
282                 "movq "VROUNDER_OFFSET"(%0), %%mm0\n\t"\
283                 "packssdw %%mm5, %%mm4          \n\t"\
284                 "packssdw %%mm7, %%mm6          \n\t"\
285                 "paddw %%mm0, %%mm4             \n\t"\
286                 "paddw %%mm0, %%mm6             \n\t"\
287                 "movq %%mm4, "U_TEMP"(%0)       \n\t"\
288                 "movq %%mm6, "V_TEMP"(%0)       \n\t"\
289 \
290                 "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
291                 "mov (%%"REG_d"), %%"REG_S"     \n\t"\
292                 "pxor %%mm1, %%mm1              \n\t"\
293                 "pxor %%mm5, %%mm5              \n\t"\
294                 "pxor %%mm7, %%mm7              \n\t"\
295                 "pxor %%mm6, %%mm6              \n\t"\
296                 ASMALIGN(4)\
297                 "2:                             \n\t"\
298                 "movq (%%"REG_S", %%"REG_a", 2), %%mm0  \n\t" /* Y1srcData */\
299                 "movq 8(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y2srcData */\
300                 "mov 4(%%"REG_d"), %%"REG_S"    \n\t"\
301                 "movq (%%"REG_S", %%"REG_a", 2), %%mm4  \n\t" /* Y1srcData */\
302                 "movq %%mm0, %%mm3              \n\t"\
303                 "punpcklwd %%mm4, %%mm0        \n\t"\
304                 "punpckhwd %%mm4, %%mm3        \n\t"\
305                 "movq 8(%%"REG_d"), %%mm4       \n\t" /* filterCoeff */\
306                 "pmaddwd %%mm4, %%mm0           \n\t"\
307                 "pmaddwd %%mm4, %%mm3           \n\t"\
308                 "paddd %%mm0, %%mm1             \n\t"\
309                 "paddd %%mm3, %%mm5             \n\t"\
310                 "movq 8(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* Y2srcData */\
311                 "mov 16(%%"REG_d"), %%"REG_S"   \n\t"\
312                 "add $16, %%"REG_d"             \n\t"\
313                 "test %%"REG_S", %%"REG_S"      \n\t"\
314                 "movq %%mm2, %%mm0              \n\t"\
315                 "punpcklwd %%mm3, %%mm2        \n\t"\
316                 "punpckhwd %%mm3, %%mm0        \n\t"\
317                 "pmaddwd %%mm4, %%mm2           \n\t"\
318                 "pmaddwd %%mm4, %%mm0           \n\t"\
319                 "paddd %%mm2, %%mm7             \n\t"\
320                 "paddd %%mm0, %%mm6             \n\t"\
321                 " jnz 2b                        \n\t"\
322                 "psrad $16, %%mm1               \n\t"\
323                 "psrad $16, %%mm5               \n\t"\
324                 "psrad $16, %%mm7               \n\t"\
325                 "psrad $16, %%mm6               \n\t"\
326                 "movq "VROUNDER_OFFSET"(%0), %%mm0\n\t"\
327                 "packssdw %%mm5, %%mm1          \n\t"\
328                 "packssdw %%mm6, %%mm7          \n\t"\
329                 "paddw %%mm0, %%mm1             \n\t"\
330                 "paddw %%mm0, %%mm7             \n\t"\
331                 "movq  "U_TEMP"(%0), %%mm3      \n\t"\
332                 "movq  "V_TEMP"(%0), %%mm4      \n\t"\
333
334 #define YSCALEYUV2RGBX \
335                 "psubw "U_OFFSET"(%0), %%mm3    \n\t" /* (U-128)8*/\
336                 "psubw "V_OFFSET"(%0), %%mm4    \n\t" /* (V-128)8*/\
337                 "movq %%mm3, %%mm2              \n\t" /* (U-128)8*/\
338                 "movq %%mm4, %%mm5              \n\t" /* (V-128)8*/\
339                 "pmulhw "UG_COEFF"(%0), %%mm3   \n\t"\
340                 "pmulhw "VG_COEFF"(%0), %%mm4   \n\t"\
341         /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
342                 "pmulhw "UB_COEFF"(%0), %%mm2   \n\t"\
343                 "pmulhw "VR_COEFF"(%0), %%mm5   \n\t"\
344                 "psubw "Y_OFFSET"(%0), %%mm1    \n\t" /* 8(Y-16)*/\
345                 "psubw "Y_OFFSET"(%0), %%mm7    \n\t" /* 8(Y-16)*/\
346                 "pmulhw "Y_COEFF"(%0), %%mm1    \n\t"\
347                 "pmulhw "Y_COEFF"(%0), %%mm7    \n\t"\
348         /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
349                 "paddw %%mm3, %%mm4             \n\t"\
350                 "movq %%mm2, %%mm0              \n\t"\
351                 "movq %%mm5, %%mm6              \n\t"\
352                 "movq %%mm4, %%mm3              \n\t"\
353                 "punpcklwd %%mm2, %%mm2         \n\t"\
354                 "punpcklwd %%mm5, %%mm5         \n\t"\
355                 "punpcklwd %%mm4, %%mm4         \n\t"\
356                 "paddw %%mm1, %%mm2             \n\t"\
357                 "paddw %%mm1, %%mm5             \n\t"\
358                 "paddw %%mm1, %%mm4             \n\t"\
359                 "punpckhwd %%mm0, %%mm0         \n\t"\
360                 "punpckhwd %%mm6, %%mm6         \n\t"\
361                 "punpckhwd %%mm3, %%mm3         \n\t"\
362                 "paddw %%mm7, %%mm0             \n\t"\
363                 "paddw %%mm7, %%mm6             \n\t"\
364                 "paddw %%mm7, %%mm3             \n\t"\
365                 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
366                 "packuswb %%mm0, %%mm2          \n\t"\
367                 "packuswb %%mm6, %%mm5          \n\t"\
368                 "packuswb %%mm3, %%mm4          \n\t"\
369                 "pxor %%mm7, %%mm7              \n\t"
370 #if 0
371 #define FULL_YSCALEYUV2RGB \
372                 "pxor %%mm7, %%mm7              \n\t"\
373                 "movd %6, %%mm6                 \n\t" /*yalpha1*/\
374                 "punpcklwd %%mm6, %%mm6         \n\t"\
375                 "punpcklwd %%mm6, %%mm6         \n\t"\
376                 "movd %7, %%mm5                 \n\t" /*uvalpha1*/\
377                 "punpcklwd %%mm5, %%mm5         \n\t"\
378                 "punpcklwd %%mm5, %%mm5         \n\t"\
379                 "xor %%"REG_a", %%"REG_a"               \n\t"\
380                 ASMALIGN(4)\
381                 "1:                             \n\t"\
382                 "movq (%0, %%"REG_a", 2), %%mm0 \n\t" /*buf0[eax]*/\
383                 "movq (%1, %%"REG_a", 2), %%mm1 \n\t" /*buf1[eax]*/\
384                 "movq (%2, %%"REG_a",2), %%mm2  \n\t" /* uvbuf0[eax]*/\
385                 "movq (%3, %%"REG_a",2), %%mm3  \n\t" /* uvbuf1[eax]*/\
386                 "psubw %%mm1, %%mm0             \n\t" /* buf0[eax] - buf1[eax]*/\
387                 "psubw %%mm3, %%mm2             \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
388                 "pmulhw %%mm6, %%mm0            \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
389                 "pmulhw %%mm5, %%mm2            \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
390                 "psraw $4, %%mm1                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
391                 "movq 4096(%2, %%"REG_a",2), %%mm4      \n\t" /* uvbuf0[eax+2048]*/\
392                 "psraw $4, %%mm3                \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
393                 "paddw %%mm0, %%mm1             \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
394                 "movq 4096(%3, %%"REG_a",2), %%mm0      \n\t" /* uvbuf1[eax+2048]*/\
395                 "paddw %%mm2, %%mm3             \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
396                 "psubw %%mm0, %%mm4             \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
397                 "psubw "MANGLE(w80)", %%mm1     \n\t" /* 8(Y-16)*/\
398                 "psubw "MANGLE(w400)", %%mm3    \n\t" /* 8(U-128)*/\
399                 "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\
400 \
401 \
402                 "pmulhw %%mm5, %%mm4            \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
403                 "movq %%mm3, %%mm2              \n\t" /* (U-128)8*/\
404                 "pmulhw "MANGLE(ubCoeff)", %%mm3\n\t"\
405                 "psraw $4, %%mm0                \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
406                 "pmulhw "MANGLE(ugCoeff)", %%mm2\n\t"\
407                 "paddw %%mm4, %%mm0             \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
408                 "psubw "MANGLE(w400)", %%mm0    \n\t" /* (V-128)8*/\
409 \
410 \
411                 "movq %%mm0, %%mm4              \n\t" /* (V-128)8*/\
412                 "pmulhw "MANGLE(vrCoeff)", %%mm0\n\t"\
413                 "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\
414                 "paddw %%mm1, %%mm3             \n\t" /* B*/\
415                 "paddw %%mm1, %%mm0             \n\t" /* R*/\
416                 "packuswb %%mm3, %%mm3          \n\t"\
417 \
418                 "packuswb %%mm0, %%mm0          \n\t"\
419                 "paddw %%mm4, %%mm2             \n\t"\
420                 "paddw %%mm2, %%mm1             \n\t" /* G*/\
421 \
422                 "packuswb %%mm1, %%mm1          \n\t"
423 #endif
424
425 #define REAL_YSCALEYUV2PACKED(index, c) \
426                 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
427                 "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1\n\t"\
428                 "psraw $3, %%mm0                \n\t"\
429                 "psraw $3, %%mm1                \n\t"\
430                 "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c")\n\t"\
431                 "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c")\n\t"\
432                 "xor "#index", "#index"         \n\t"\
433                 ASMALIGN(4)\
434                 "1:                             \n\t"\
435                 "movq (%2, "#index"), %%mm2     \n\t" /* uvbuf0[eax]*/\
436                 "movq (%3, "#index"), %%mm3     \n\t" /* uvbuf1[eax]*/\
437                 "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
438                 "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
439                 "psubw %%mm3, %%mm2             \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
440                 "psubw %%mm4, %%mm5             \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
441                 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
442                 "pmulhw %%mm0, %%mm2            \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
443                 "pmulhw %%mm0, %%mm5            \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
444                 "psraw $7, %%mm3                \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
445                 "psraw $7, %%mm4                \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
446                 "paddw %%mm2, %%mm3             \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
447                 "paddw %%mm5, %%mm4             \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
448                 "movq (%0, "#index", 2), %%mm0  \n\t" /*buf0[eax]*/\
449                 "movq (%1, "#index", 2), %%mm1  \n\t" /*buf1[eax]*/\
450                 "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
451                 "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
452                 "psubw %%mm1, %%mm0             \n\t" /* buf0[eax] - buf1[eax]*/\
453                 "psubw %%mm7, %%mm6             \n\t" /* buf0[eax] - buf1[eax]*/\
454                 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
455                 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
456                 "psraw $7, %%mm1                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
457                 "psraw $7, %%mm7                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
458                 "paddw %%mm0, %%mm1             \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
459                 "paddw %%mm6, %%mm7             \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
460                 
461 #define YSCALEYUV2PACKED(index, c)  REAL_YSCALEYUV2PACKED(index, c)
462                 
463 #define REAL_YSCALEYUV2RGB(index, c) \
464                 "xor "#index", "#index" \n\t"\
465                 ASMALIGN(4)\
466                 "1:                             \n\t"\
467                 "movq (%2, "#index"), %%mm2     \n\t" /* uvbuf0[eax]*/\
468                 "movq (%3, "#index"), %%mm3     \n\t" /* uvbuf1[eax]*/\
469                 "movq 4096(%2, "#index"), %%mm5\n\t" /* uvbuf0[eax+2048]*/\
470                 "movq 4096(%3, "#index"), %%mm4\n\t" /* uvbuf1[eax+2048]*/\
471                 "psubw %%mm3, %%mm2             \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
472                 "psubw %%mm4, %%mm5             \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
473                 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
474                 "pmulhw %%mm0, %%mm2            \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
475                 "pmulhw %%mm0, %%mm5            \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
476                 "psraw $4, %%mm3                \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
477                 "psraw $4, %%mm4                \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
478                 "paddw %%mm2, %%mm3             \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
479                 "paddw %%mm5, %%mm4             \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
480                 "psubw "U_OFFSET"("#c"), %%mm3  \n\t" /* (U-128)8*/\
481                 "psubw "V_OFFSET"("#c"), %%mm4  \n\t" /* (V-128)8*/\
482                 "movq %%mm3, %%mm2              \n\t" /* (U-128)8*/\
483                 "movq %%mm4, %%mm5              \n\t" /* (V-128)8*/\
484                 "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
485                 "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
486         /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
487                 "movq (%0, "#index", 2), %%mm0  \n\t" /*buf0[eax]*/\
488                 "movq (%1, "#index", 2), %%mm1  \n\t" /*buf1[eax]*/\
489                 "movq 8(%0, "#index", 2), %%mm6\n\t" /*buf0[eax]*/\
490                 "movq 8(%1, "#index", 2), %%mm7\n\t" /*buf1[eax]*/\
491                 "psubw %%mm1, %%mm0             \n\t" /* buf0[eax] - buf1[eax]*/\
492                 "psubw %%mm7, %%mm6             \n\t" /* buf0[eax] - buf1[eax]*/\
493                 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
494                 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
495                 "psraw $4, %%mm1                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
496                 "psraw $4, %%mm7                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
497                 "paddw %%mm0, %%mm1             \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
498                 "paddw %%mm6, %%mm7             \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
499                 "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
500                 "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
501                 "psubw "Y_OFFSET"("#c"), %%mm1  \n\t" /* 8(Y-16)*/\
502                 "psubw "Y_OFFSET"("#c"), %%mm7  \n\t" /* 8(Y-16)*/\
503                 "pmulhw "Y_COEFF"("#c"), %%mm1  \n\t"\
504                 "pmulhw "Y_COEFF"("#c"), %%mm7  \n\t"\
505         /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
506                 "paddw %%mm3, %%mm4             \n\t"\
507                 "movq %%mm2, %%mm0              \n\t"\
508                 "movq %%mm5, %%mm6              \n\t"\
509                 "movq %%mm4, %%mm3              \n\t"\
510                 "punpcklwd %%mm2, %%mm2         \n\t"\
511                 "punpcklwd %%mm5, %%mm5         \n\t"\
512                 "punpcklwd %%mm4, %%mm4         \n\t"\
513                 "paddw %%mm1, %%mm2             \n\t"\
514                 "paddw %%mm1, %%mm5             \n\t"\
515                 "paddw %%mm1, %%mm4             \n\t"\
516                 "punpckhwd %%mm0, %%mm0         \n\t"\
517                 "punpckhwd %%mm6, %%mm6         \n\t"\
518                 "punpckhwd %%mm3, %%mm3         \n\t"\
519                 "paddw %%mm7, %%mm0             \n\t"\
520                 "paddw %%mm7, %%mm6             \n\t"\
521                 "paddw %%mm7, %%mm3             \n\t"\
522                 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
523                 "packuswb %%mm0, %%mm2          \n\t"\
524                 "packuswb %%mm6, %%mm5          \n\t"\
525                 "packuswb %%mm3, %%mm4          \n\t"\
526                 "pxor %%mm7, %%mm7              \n\t"
527 #define YSCALEYUV2RGB(index, c)  REAL_YSCALEYUV2RGB(index, c)
528                 
529 #define REAL_YSCALEYUV2PACKED1(index, c) \
530                 "xor "#index", "#index"         \n\t"\
531                 ASMALIGN(4)\
532                 "1:                             \n\t"\
533                 "movq (%2, "#index"), %%mm3     \n\t" /* uvbuf0[eax]*/\
534                 "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
535                 "psraw $7, %%mm3                \n\t" \
536                 "psraw $7, %%mm4                \n\t" \
537                 "movq (%0, "#index", 2), %%mm1  \n\t" /*buf0[eax]*/\
538                 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
539                 "psraw $7, %%mm1                \n\t" \
540                 "psraw $7, %%mm7                \n\t" \
541                 
542 #define YSCALEYUV2PACKED1(index, c)  REAL_YSCALEYUV2PACKED1(index, c)
543                 
544 #define REAL_YSCALEYUV2RGB1(index, c) \
545                 "xor "#index", "#index" \n\t"\
546                 ASMALIGN(4)\
547                 "1:                             \n\t"\
548                 "movq (%2, "#index"), %%mm3     \n\t" /* uvbuf0[eax]*/\
549                 "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
550                 "psraw $4, %%mm3                \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
551                 "psraw $4, %%mm4                \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
552                 "psubw "U_OFFSET"("#c"), %%mm3  \n\t" /* (U-128)8*/\
553                 "psubw "V_OFFSET"("#c"), %%mm4  \n\t" /* (V-128)8*/\
554                 "movq %%mm3, %%mm2              \n\t" /* (U-128)8*/\
555                 "movq %%mm4, %%mm5              \n\t" /* (V-128)8*/\
556                 "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
557                 "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
558         /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
559                 "movq (%0, "#index", 2), %%mm1  \n\t" /*buf0[eax]*/\
560                 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
561                 "psraw $4, %%mm1                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
562                 "psraw $4, %%mm7                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
563                 "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
564                 "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
565                 "psubw "Y_OFFSET"("#c"), %%mm1  \n\t" /* 8(Y-16)*/\
566                 "psubw "Y_OFFSET"("#c"), %%mm7  \n\t" /* 8(Y-16)*/\
567                 "pmulhw "Y_COEFF"("#c"), %%mm1  \n\t"\
568                 "pmulhw "Y_COEFF"("#c"), %%mm7  \n\t"\
569         /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
570                 "paddw %%mm3, %%mm4             \n\t"\
571                 "movq %%mm2, %%mm0              \n\t"\
572                 "movq %%mm5, %%mm6              \n\t"\
573                 "movq %%mm4, %%mm3              \n\t"\
574                 "punpcklwd %%mm2, %%mm2         \n\t"\
575                 "punpcklwd %%mm5, %%mm5         \n\t"\
576                 "punpcklwd %%mm4, %%mm4         \n\t"\
577                 "paddw %%mm1, %%mm2             \n\t"\
578                 "paddw %%mm1, %%mm5             \n\t"\
579                 "paddw %%mm1, %%mm4             \n\t"\
580                 "punpckhwd %%mm0, %%mm0         \n\t"\
581                 "punpckhwd %%mm6, %%mm6         \n\t"\
582                 "punpckhwd %%mm3, %%mm3         \n\t"\
583                 "paddw %%mm7, %%mm0             \n\t"\
584                 "paddw %%mm7, %%mm6             \n\t"\
585                 "paddw %%mm7, %%mm3             \n\t"\
586                 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
587                 "packuswb %%mm0, %%mm2          \n\t"\
588                 "packuswb %%mm6, %%mm5          \n\t"\
589                 "packuswb %%mm3, %%mm4          \n\t"\
590                 "pxor %%mm7, %%mm7              \n\t"
591 #define YSCALEYUV2RGB1(index, c)  REAL_YSCALEYUV2RGB1(index, c)
592
593 #define REAL_YSCALEYUV2PACKED1b(index, c) \
594                 "xor "#index", "#index"         \n\t"\
595                 ASMALIGN(4)\
596                 "1:                             \n\t"\
597                 "movq (%2, "#index"), %%mm2     \n\t" /* uvbuf0[eax]*/\
598                 "movq (%3, "#index"), %%mm3     \n\t" /* uvbuf1[eax]*/\
599                 "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
600                 "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
601                 "paddw %%mm2, %%mm3             \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
602                 "paddw %%mm5, %%mm4             \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
603                 "psrlw $8, %%mm3                \n\t" \
604                 "psrlw $8, %%mm4                \n\t" \
605                 "movq (%0, "#index", 2), %%mm1  \n\t" /*buf0[eax]*/\
606                 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
607                 "psraw $7, %%mm1                \n\t" \
608                 "psraw $7, %%mm7                \n\t" 
609 #define YSCALEYUV2PACKED1b(index, c)  REAL_YSCALEYUV2PACKED1b(index, c)
610                 
611 // do vertical chrominance interpolation
612 #define REAL_YSCALEYUV2RGB1b(index, c) \
613                 "xor "#index", "#index"         \n\t"\
614                 ASMALIGN(4)\
615                 "1:                             \n\t"\
616                 "movq (%2, "#index"), %%mm2     \n\t" /* uvbuf0[eax]*/\
617                 "movq (%3, "#index"), %%mm3     \n\t" /* uvbuf1[eax]*/\
618                 "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
619                 "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
620                 "paddw %%mm2, %%mm3             \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
621                 "paddw %%mm5, %%mm4             \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
622                 "psrlw $5, %%mm3                \n\t" /*FIXME might overflow*/\
623                 "psrlw $5, %%mm4                \n\t" /*FIXME might overflow*/\
624                 "psubw "U_OFFSET"("#c"), %%mm3  \n\t" /* (U-128)8*/\
625                 "psubw "V_OFFSET"("#c"), %%mm4  \n\t" /* (V-128)8*/\
626                 "movq %%mm3, %%mm2              \n\t" /* (U-128)8*/\
627                 "movq %%mm4, %%mm5              \n\t" /* (V-128)8*/\
628                 "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
629                 "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
630         /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
631                 "movq (%0, "#index", 2), %%mm1  \n\t" /*buf0[eax]*/\
632                 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
633                 "psraw $4, %%mm1                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
634                 "psraw $4, %%mm7                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
635                 "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
636                 "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
637                 "psubw "Y_OFFSET"("#c"), %%mm1  \n\t" /* 8(Y-16)*/\
638                 "psubw "Y_OFFSET"("#c"), %%mm7  \n\t" /* 8(Y-16)*/\
639                 "pmulhw "Y_COEFF"("#c"), %%mm1  \n\t"\
640                 "pmulhw "Y_COEFF"("#c"), %%mm7  \n\t"\
641         /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
642                 "paddw %%mm3, %%mm4             \n\t"\
643                 "movq %%mm2, %%mm0              \n\t"\
644                 "movq %%mm5, %%mm6              \n\t"\
645                 "movq %%mm4, %%mm3              \n\t"\
646                 "punpcklwd %%mm2, %%mm2         \n\t"\
647                 "punpcklwd %%mm5, %%mm5         \n\t"\
648                 "punpcklwd %%mm4, %%mm4         \n\t"\
649                 "paddw %%mm1, %%mm2             \n\t"\
650                 "paddw %%mm1, %%mm5             \n\t"\
651                 "paddw %%mm1, %%mm4             \n\t"\
652                 "punpckhwd %%mm0, %%mm0         \n\t"\
653                 "punpckhwd %%mm6, %%mm6         \n\t"\
654                 "punpckhwd %%mm3, %%mm3         \n\t"\
655                 "paddw %%mm7, %%mm0             \n\t"\
656                 "paddw %%mm7, %%mm6             \n\t"\
657                 "paddw %%mm7, %%mm3             \n\t"\
658                 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
659                 "packuswb %%mm0, %%mm2          \n\t"\
660                 "packuswb %%mm6, %%mm5          \n\t"\
661                 "packuswb %%mm3, %%mm4          \n\t"\
662                 "pxor %%mm7, %%mm7              \n\t"
663 #define YSCALEYUV2RGB1b(index, c)  REAL_YSCALEYUV2RGB1b(index, c)
664
665 #define REAL_WRITEBGR32(dst, dstw, index) \
666                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
667                         "movq %%mm2, %%mm1              \n\t" /* B */\
668                         "movq %%mm5, %%mm6              \n\t" /* R */\
669                         "punpcklbw %%mm4, %%mm2         \n\t" /* GBGBGBGB 0 */\
670                         "punpcklbw %%mm7, %%mm5         \n\t" /* 0R0R0R0R 0 */\
671                         "punpckhbw %%mm4, %%mm1         \n\t" /* GBGBGBGB 2 */\
672                         "punpckhbw %%mm7, %%mm6         \n\t" /* 0R0R0R0R 2 */\
673                         "movq %%mm2, %%mm0              \n\t" /* GBGBGBGB 0 */\
674                         "movq %%mm1, %%mm3              \n\t" /* GBGBGBGB 2 */\
675                         "punpcklwd %%mm5, %%mm0         \n\t" /* 0RGB0RGB 0 */\
676                         "punpckhwd %%mm5, %%mm2         \n\t" /* 0RGB0RGB 1 */\
677                         "punpcklwd %%mm6, %%mm1         \n\t" /* 0RGB0RGB 2 */\
678                         "punpckhwd %%mm6, %%mm3         \n\t" /* 0RGB0RGB 3 */\
679 \
680                         MOVNTQ(%%mm0, (dst, index, 4))\
681                         MOVNTQ(%%mm2, 8(dst, index, 4))\
682                         MOVNTQ(%%mm1, 16(dst, index, 4))\
683                         MOVNTQ(%%mm3, 24(dst, index, 4))\
684 \
685                         "add $8, "#index"               \n\t"\
686                         "cmp "#dstw", "#index"          \n\t"\
687                         " jb 1b                         \n\t"
688 #define WRITEBGR32(dst, dstw, index)  REAL_WRITEBGR32(dst, dstw, index)
689
690 #define REAL_WRITEBGR16(dst, dstw, index) \
691                         "pand "MANGLE(bF8)", %%mm2      \n\t" /* B */\
692                         "pand "MANGLE(bFC)", %%mm4      \n\t" /* G */\
693                         "pand "MANGLE(bF8)", %%mm5      \n\t" /* R */\
694                         "psrlq $3, %%mm2                \n\t"\
695 \
696                         "movq %%mm2, %%mm1              \n\t"\
697                         "movq %%mm4, %%mm3              \n\t"\
698 \
699                         "punpcklbw %%mm7, %%mm3         \n\t"\
700                         "punpcklbw %%mm5, %%mm2         \n\t"\
701                         "punpckhbw %%mm7, %%mm4         \n\t"\
702                         "punpckhbw %%mm5, %%mm1         \n\t"\
703 \
704                         "psllq $3, %%mm3                \n\t"\
705                         "psllq $3, %%mm4                \n\t"\
706 \
707                         "por %%mm3, %%mm2               \n\t"\
708                         "por %%mm4, %%mm1               \n\t"\
709 \
710                         MOVNTQ(%%mm2, (dst, index, 2))\
711                         MOVNTQ(%%mm1, 8(dst, index, 2))\
712 \
713                         "add $8, "#index"               \n\t"\
714                         "cmp "#dstw", "#index"          \n\t"\
715                         " jb 1b                         \n\t"
716 #define WRITEBGR16(dst, dstw, index)  REAL_WRITEBGR16(dst, dstw, index)
717
718 #define REAL_WRITEBGR15(dst, dstw, index) \
719                         "pand "MANGLE(bF8)", %%mm2      \n\t" /* B */\
720                         "pand "MANGLE(bF8)", %%mm4      \n\t" /* G */\
721                         "pand "MANGLE(bF8)", %%mm5      \n\t" /* R */\
722                         "psrlq $3, %%mm2                \n\t"\
723                         "psrlq $1, %%mm5                \n\t"\
724 \
725                         "movq %%mm2, %%mm1              \n\t"\
726                         "movq %%mm4, %%mm3              \n\t"\
727 \
728                         "punpcklbw %%mm7, %%mm3         \n\t"\
729                         "punpcklbw %%mm5, %%mm2         \n\t"\
730                         "punpckhbw %%mm7, %%mm4         \n\t"\
731                         "punpckhbw %%mm5, %%mm1         \n\t"\
732 \
733                         "psllq $2, %%mm3                \n\t"\
734                         "psllq $2, %%mm4                \n\t"\
735 \
736                         "por %%mm3, %%mm2               \n\t"\
737                         "por %%mm4, %%mm1               \n\t"\
738 \
739                         MOVNTQ(%%mm2, (dst, index, 2))\
740                         MOVNTQ(%%mm1, 8(dst, index, 2))\
741 \
742                         "add $8, "#index"               \n\t"\
743                         "cmp "#dstw", "#index"          \n\t"\
744                         " jb 1b                         \n\t"
745 #define WRITEBGR15(dst, dstw, index)  REAL_WRITEBGR15(dst, dstw, index)
746
747 #define WRITEBGR24OLD(dst, dstw, index) \
748                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
749                         "movq %%mm2, %%mm1              \n\t" /* B */\
750                         "movq %%mm5, %%mm6              \n\t" /* R */\
751                         "punpcklbw %%mm4, %%mm2         \n\t" /* GBGBGBGB 0 */\
752                         "punpcklbw %%mm7, %%mm5         \n\t" /* 0R0R0R0R 0 */\
753                         "punpckhbw %%mm4, %%mm1         \n\t" /* GBGBGBGB 2 */\
754                         "punpckhbw %%mm7, %%mm6         \n\t" /* 0R0R0R0R 2 */\
755                         "movq %%mm2, %%mm0              \n\t" /* GBGBGBGB 0 */\
756                         "movq %%mm1, %%mm3              \n\t" /* GBGBGBGB 2 */\
757                         "punpcklwd %%mm5, %%mm0         \n\t" /* 0RGB0RGB 0 */\
758                         "punpckhwd %%mm5, %%mm2         \n\t" /* 0RGB0RGB 1 */\
759                         "punpcklwd %%mm6, %%mm1         \n\t" /* 0RGB0RGB 2 */\
760                         "punpckhwd %%mm6, %%mm3         \n\t" /* 0RGB0RGB 3 */\
761 \
762                         "movq %%mm0, %%mm4              \n\t" /* 0RGB0RGB 0 */\
763                         "psrlq $8, %%mm0                \n\t" /* 00RGB0RG 0 */\
764                         "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 0 */\
765                         "pand "MANGLE(bm11111000)", %%mm0\n\t" /* 00RGB000 0.5 */\
766                         "por %%mm4, %%mm0               \n\t" /* 00RGBRGB 0 */\
767                         "movq %%mm2, %%mm4              \n\t" /* 0RGB0RGB 1 */\
768                         "psllq $48, %%mm2               \n\t" /* GB000000 1 */\
769                         "por %%mm2, %%mm0               \n\t" /* GBRGBRGB 0 */\
770 \
771                         "movq %%mm4, %%mm2              \n\t" /* 0RGB0RGB 1 */\
772                         "psrld $16, %%mm4               \n\t" /* 000R000R 1 */\
773                         "psrlq $24, %%mm2               \n\t" /* 0000RGB0 1.5 */\
774                         "por %%mm4, %%mm2               \n\t" /* 000RRGBR 1 */\
775                         "pand "MANGLE(bm00001111)", %%mm2\n\t" /* 0000RGBR 1 */\
776                         "movq %%mm1, %%mm4              \n\t" /* 0RGB0RGB 2 */\
777                         "psrlq $8, %%mm1                \n\t" /* 00RGB0RG 2 */\
778                         "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 2 */\
779                         "pand "MANGLE(bm11111000)", %%mm1\n\t" /* 00RGB000 2.5 */\
780                         "por %%mm4, %%mm1               \n\t" /* 00RGBRGB 2 */\
781                         "movq %%mm1, %%mm4              \n\t" /* 00RGBRGB 2 */\
782                         "psllq $32, %%mm1               \n\t" /* BRGB0000 2 */\
783                         "por %%mm1, %%mm2               \n\t" /* BRGBRGBR 1 */\
784 \
785                         "psrlq $32, %%mm4               \n\t" /* 000000RG 2.5 */\
786                         "movq %%mm3, %%mm5              \n\t" /* 0RGB0RGB 3 */\
787                         "psrlq $8, %%mm3                \n\t" /* 00RGB0RG 3 */\
788                         "pand "MANGLE(bm00000111)", %%mm5\n\t" /* 00000RGB 3 */\
789                         "pand "MANGLE(bm11111000)", %%mm3\n\t" /* 00RGB000 3.5 */\
790                         "por %%mm5, %%mm3               \n\t" /* 00RGBRGB 3 */\
791                         "psllq $16, %%mm3               \n\t" /* RGBRGB00 3 */\
792                         "por %%mm4, %%mm3               \n\t" /* RGBRGBRG 2.5 */\
793 \
794                         MOVNTQ(%%mm0, (dst))\
795                         MOVNTQ(%%mm2, 8(dst))\
796                         MOVNTQ(%%mm3, 16(dst))\
797                         "add $24, "#dst"                \n\t"\
798 \
799                         "add $8, "#index"               \n\t"\
800                         "cmp "#dstw", "#index"          \n\t"\
801                         " jb 1b                         \n\t"
802
803 #define WRITEBGR24MMX(dst, dstw, index) \
804                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
805                         "movq %%mm2, %%mm1              \n\t" /* B */\
806                         "movq %%mm5, %%mm6              \n\t" /* R */\
807                         "punpcklbw %%mm4, %%mm2         \n\t" /* GBGBGBGB 0 */\
808                         "punpcklbw %%mm7, %%mm5         \n\t" /* 0R0R0R0R 0 */\
809                         "punpckhbw %%mm4, %%mm1         \n\t" /* GBGBGBGB 2 */\
810                         "punpckhbw %%mm7, %%mm6         \n\t" /* 0R0R0R0R 2 */\
811                         "movq %%mm2, %%mm0              \n\t" /* GBGBGBGB 0 */\
812                         "movq %%mm1, %%mm3              \n\t" /* GBGBGBGB 2 */\
813                         "punpcklwd %%mm5, %%mm0         \n\t" /* 0RGB0RGB 0 */\
814                         "punpckhwd %%mm5, %%mm2         \n\t" /* 0RGB0RGB 1 */\
815                         "punpcklwd %%mm6, %%mm1         \n\t" /* 0RGB0RGB 2 */\
816                         "punpckhwd %%mm6, %%mm3         \n\t" /* 0RGB0RGB 3 */\
817 \
818                         "movq %%mm0, %%mm4              \n\t" /* 0RGB0RGB 0 */\
819                         "movq %%mm2, %%mm6              \n\t" /* 0RGB0RGB 1 */\
820                         "movq %%mm1, %%mm5              \n\t" /* 0RGB0RGB 2 */\
821                         "movq %%mm3, %%mm7              \n\t" /* 0RGB0RGB 3 */\
822 \
823                         "psllq $40, %%mm0               \n\t" /* RGB00000 0 */\
824                         "psllq $40, %%mm2               \n\t" /* RGB00000 1 */\
825                         "psllq $40, %%mm1               \n\t" /* RGB00000 2 */\
826                         "psllq $40, %%mm3               \n\t" /* RGB00000 3 */\
827 \
828                         "punpckhdq %%mm4, %%mm0         \n\t" /* 0RGBRGB0 0 */\
829                         "punpckhdq %%mm6, %%mm2         \n\t" /* 0RGBRGB0 1 */\
830                         "punpckhdq %%mm5, %%mm1         \n\t" /* 0RGBRGB0 2 */\
831                         "punpckhdq %%mm7, %%mm3         \n\t" /* 0RGBRGB0 3 */\
832 \
833                         "psrlq $8, %%mm0                \n\t" /* 00RGBRGB 0 */\
834                         "movq %%mm2, %%mm6              \n\t" /* 0RGBRGB0 1 */\
835                         "psllq $40, %%mm2               \n\t" /* GB000000 1 */\
836                         "por %%mm2, %%mm0               \n\t" /* GBRGBRGB 0 */\
837                         MOVNTQ(%%mm0, (dst))\
838 \
839                         "psrlq $24, %%mm6               \n\t" /* 0000RGBR 1 */\
840                         "movq %%mm1, %%mm5              \n\t" /* 0RGBRGB0 2 */\
841                         "psllq $24, %%mm1               \n\t" /* BRGB0000 2 */\
842                         "por %%mm1, %%mm6               \n\t" /* BRGBRGBR 1 */\
843                         MOVNTQ(%%mm6, 8(dst))\
844 \
845                         "psrlq $40, %%mm5               \n\t" /* 000000RG 2 */\
846                         "psllq $8, %%mm3                \n\t" /* RGBRGB00 3 */\
847                         "por %%mm3, %%mm5               \n\t" /* RGBRGBRG 2 */\
848                         MOVNTQ(%%mm5, 16(dst))\
849 \
850                         "add $24, "#dst"                \n\t"\
851 \
852                         "add $8, "#index"                       \n\t"\
853                         "cmp "#dstw", "#index"                  \n\t"\
854                         " jb 1b                         \n\t"
855
856 #define WRITEBGR24MMX2(dst, dstw, index) \
857                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
858                         "movq "MANGLE(M24A)", %%mm0     \n\t"\
859                         "movq "MANGLE(M24C)", %%mm7     \n\t"\
860                         "pshufw $0x50, %%mm2, %%mm1     \n\t" /* B3 B2 B3 B2  B1 B0 B1 B0 */\
861                         "pshufw $0x50, %%mm4, %%mm3     \n\t" /* G3 G2 G3 G2  G1 G0 G1 G0 */\
862                         "pshufw $0x00, %%mm5, %%mm6     \n\t" /* R1 R0 R1 R0  R1 R0 R1 R0 */\
863 \
864                         "pand %%mm0, %%mm1              \n\t" /*    B2        B1       B0 */\
865                         "pand %%mm0, %%mm3              \n\t" /*    G2        G1       G0 */\
866                         "pand %%mm7, %%mm6              \n\t" /*       R1        R0       */\
867 \
868                         "psllq $8, %%mm3                \n\t" /* G2        G1       G0    */\
869                         "por %%mm1, %%mm6               \n\t"\
870                         "por %%mm3, %%mm6               \n\t"\
871                         MOVNTQ(%%mm6, (dst))\
872 \
873                         "psrlq $8, %%mm4                \n\t" /* 00 G7 G6 G5  G4 G3 G2 G1 */\
874                         "pshufw $0xA5, %%mm2, %%mm1     \n\t" /* B5 B4 B5 B4  B3 B2 B3 B2 */\
875                         "pshufw $0x55, %%mm4, %%mm3     \n\t" /* G4 G3 G4 G3  G4 G3 G4 G3 */\
876                         "pshufw $0xA5, %%mm5, %%mm6     \n\t" /* R5 R4 R5 R4  R3 R2 R3 R2 */\
877 \
878                         "pand "MANGLE(M24B)", %%mm1     \n\t" /* B5       B4        B3    */\
879                         "pand %%mm7, %%mm3              \n\t" /*       G4        G3       */\
880                         "pand %%mm0, %%mm6              \n\t" /*    R4        R3       R2 */\
881 \
882                         "por %%mm1, %%mm3               \n\t" /* B5    G4 B4     G3 B3    */\
883                         "por %%mm3, %%mm6               \n\t"\
884                         MOVNTQ(%%mm6, 8(dst))\
885 \
886                         "pshufw $0xFF, %%mm2, %%mm1     \n\t" /* B7 B6 B7 B6  B7 B6 B6 B7 */\
887                         "pshufw $0xFA, %%mm4, %%mm3     \n\t" /* 00 G7 00 G7  G6 G5 G6 G5 */\
888                         "pshufw $0xFA, %%mm5, %%mm6     \n\t" /* R7 R6 R7 R6  R5 R4 R5 R4 */\
889 \
890                         "pand %%mm7, %%mm1              \n\t" /*       B7        B6       */\
891                         "pand %%mm0, %%mm3              \n\t" /*    G7        G6       G5 */\
892                         "pand "MANGLE(M24B)", %%mm6     \n\t" /* R7       R6        R5    */\
893 \
894                         "por %%mm1, %%mm3               \n\t"\
895                         "por %%mm3, %%mm6               \n\t"\
896                         MOVNTQ(%%mm6, 16(dst))\
897 \
898                         "add $24, "#dst"                \n\t"\
899 \
900                         "add $8, "#index"               \n\t"\
901                         "cmp "#dstw", "#index"          \n\t"\
902                         " jb 1b                         \n\t"
903
904 #ifdef HAVE_MMX2
905 #undef WRITEBGR24
906 #define WRITEBGR24(dst, dstw, index)  WRITEBGR24MMX2(dst, dstw, index)
907 #else
908 #undef WRITEBGR24
909 #define WRITEBGR24(dst, dstw, index)  WRITEBGR24MMX(dst, dstw, index)
910 #endif
911
912 #define REAL_WRITEYUY2(dst, dstw, index) \
913                         "packuswb %%mm3, %%mm3          \n\t"\
914                         "packuswb %%mm4, %%mm4          \n\t"\
915                         "packuswb %%mm7, %%mm1          \n\t"\
916                         "punpcklbw %%mm4, %%mm3         \n\t"\
917                         "movq %%mm1, %%mm7              \n\t"\
918                         "punpcklbw %%mm3, %%mm1         \n\t"\
919                         "punpckhbw %%mm3, %%mm7         \n\t"\
920 \
921                         MOVNTQ(%%mm1, (dst, index, 2))\
922                         MOVNTQ(%%mm7, 8(dst, index, 2))\
923 \
924                         "add $8, "#index"               \n\t"\
925                         "cmp "#dstw", "#index"          \n\t"\
926                         " jb 1b                         \n\t"
927 #define WRITEYUY2(dst, dstw, index)  REAL_WRITEYUY2(dst, dstw, index)
928
929
930 static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
931                                     int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
932                                     uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
933 {
934 #ifdef HAVE_MMX
935         if(c->flags & SWS_ACCURATE_RND){
936                 if(uDest){
937                         YSCALEYUV2YV12X_ACCURATE(   0, CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
938                         YSCALEYUV2YV12X_ACCURATE(4096, CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
939                 }
940
941                 YSCALEYUV2YV12X_ACCURATE(0, LUM_MMX_FILTER_OFFSET, dest, dstW)
942         }else{
943                 if(uDest){
944                         YSCALEYUV2YV12X(   0, CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
945                         YSCALEYUV2YV12X(4096, CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
946                 }
947
948                 YSCALEYUV2YV12X(0, LUM_MMX_FILTER_OFFSET, dest, dstW)
949         }
950 #else
951 #ifdef HAVE_ALTIVEC
952 yuv2yuvX_altivec_real(lumFilter, lumSrc, lumFilterSize,
953                       chrFilter, chrSrc, chrFilterSize,
954                       dest, uDest, vDest, dstW, chrDstW);
955 #else //HAVE_ALTIVEC
956 yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize,
957             chrFilter, chrSrc, chrFilterSize,
958             dest, uDest, vDest, dstW, chrDstW);
959 #endif //!HAVE_ALTIVEC
960 #endif
961 }
962
963 static inline void RENAME(yuv2nv12X)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
964                                      int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
965                                      uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat)
966 {
967 yuv2nv12XinC(lumFilter, lumSrc, lumFilterSize,
968              chrFilter, chrSrc, chrFilterSize,
969              dest, uDest, dstW, chrDstW, dstFormat);
970 }
971
972 static inline void RENAME(yuv2yuv1)(int16_t *lumSrc, int16_t *chrSrc,
973                                     uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
974 {
975 #ifdef HAVE_MMX
976         if(uDest != NULL)
977         {
978                 asm volatile(
979                                 YSCALEYUV2YV121
980                                 :: "r" (chrSrc + chrDstW), "r" (uDest + chrDstW),
981                                 "g" (-chrDstW)
982                                 : "%"REG_a
983                         );
984
985                 asm volatile(
986                                 YSCALEYUV2YV121
987                                 :: "r" (chrSrc + 2048 + chrDstW), "r" (vDest + chrDstW),
988                                 "g" (-chrDstW)
989                                 : "%"REG_a
990                         );
991         }
992
993         asm volatile(
994                 YSCALEYUV2YV121
995                 :: "r" (lumSrc + dstW), "r" (dest + dstW),
996                 "g" (-dstW)
997                 : "%"REG_a
998         );
999 #else
1000         int i;
1001         for(i=0; i<dstW; i++)
1002         {
1003                 int val= lumSrc[i]>>7;
1004                 
1005                 if(val&256){
1006                         if(val<0) val=0;
1007                         else      val=255;
1008                 }
1009
1010                 dest[i]= val;
1011         }
1012
1013         if(uDest != NULL)
1014                 for(i=0; i<chrDstW; i++)
1015                 {
1016                         int u=chrSrc[i]>>7;
1017                         int v=chrSrc[i + 2048]>>7;
1018
1019                         if((u|v)&256){
1020                                 if(u<0)         u=0;
1021                                 else if (u>255) u=255;
1022                                 if(v<0)         v=0;
1023                                 else if (v>255) v=255;
1024                         }
1025
1026                         uDest[i]= u;
1027                         vDest[i]= v;
1028                 }
1029 #endif
1030 }
1031
1032
1033 /**
1034  * vertical scale YV12 to RGB
1035  */
1036 static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
1037                                     int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
1038                             uint8_t *dest, long dstW, long dstY)
1039 {
1040 #ifdef HAVE_MMX
1041     long dummy=0;
1042     if(c->flags & SWS_ACCURATE_RND){
1043                 switch(c->dstFormat){
1044                 case PIX_FMT_RGB32:
1045                                 YSCALEYUV2PACKEDX_ACCURATE
1046                                 YSCALEYUV2RGBX
1047                                 WRITEBGR32(%4, %5, %%REGa)
1048
1049                                 YSCALEYUV2PACKEDX_END
1050                         return;
1051                 case PIX_FMT_BGR24:
1052                                 YSCALEYUV2PACKEDX_ACCURATE
1053                                 YSCALEYUV2RGBX
1054                                 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"\n\t" //FIXME optimize
1055                                 "add %4, %%"REG_c"                      \n\t"
1056                                 WRITEBGR24(%%REGc, %5, %%REGa)
1057
1058
1059                         :: "r" (&c->redDither), 
1060                            "m" (dummy), "m" (dummy), "m" (dummy),
1061                            "r" (dest), "m" (dstW)
1062                         : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
1063                         );
1064                         return;
1065                 case PIX_FMT_BGR555:
1066                                 YSCALEYUV2PACKEDX_ACCURATE
1067                                 YSCALEYUV2RGBX
1068                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1069 #ifdef DITHER1XBPP
1070                                 "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
1071                                 "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
1072                                 "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
1073 #endif
1074
1075                                 WRITEBGR15(%4, %5, %%REGa)
1076                                 YSCALEYUV2PACKEDX_END
1077                         return;
1078                 case PIX_FMT_BGR565:
1079                                 YSCALEYUV2PACKEDX_ACCURATE
1080                                 YSCALEYUV2RGBX
1081                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1082 #ifdef DITHER1XBPP
1083                                 "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
1084                                 "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
1085                                 "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
1086 #endif
1087
1088                                 WRITEBGR16(%4, %5, %%REGa)
1089                                 YSCALEYUV2PACKEDX_END
1090                         return;
1091                 case PIX_FMT_YUYV422:
1092                                 YSCALEYUV2PACKEDX_ACCURATE
1093                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1094
1095                                 "psraw $3, %%mm3                \n\t"
1096                                 "psraw $3, %%mm4                \n\t"
1097                                 "psraw $3, %%mm1                \n\t"
1098                                 "psraw $3, %%mm7                \n\t"
1099                                 WRITEYUY2(%4, %5, %%REGa)
1100                                 YSCALEYUV2PACKEDX_END
1101                         return;
1102                 }
1103     }else{
1104         switch(c->dstFormat)
1105         {
1106         case PIX_FMT_RGB32:
1107                                 YSCALEYUV2PACKEDX
1108                                 YSCALEYUV2RGBX
1109                                 WRITEBGR32(%4, %5, %%REGa)
1110                                 YSCALEYUV2PACKEDX_END
1111                 return;
1112         case PIX_FMT_BGR24:
1113                                 YSCALEYUV2PACKEDX
1114                                 YSCALEYUV2RGBX
1115                                 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"\n\t" //FIXME optimize
1116                                 "add %4, %%"REG_c"                      \n\t"
1117                                 WRITEBGR24(%%REGc, %5, %%REGa)
1118
1119                         :: "r" (&c->redDither), 
1120                            "m" (dummy), "m" (dummy), "m" (dummy),
1121                            "r" (dest), "m" (dstW)
1122                         : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
1123                         );
1124                 return;
1125         case PIX_FMT_BGR555:
1126                                 YSCALEYUV2PACKEDX
1127                                 YSCALEYUV2RGBX
1128                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1129 #ifdef DITHER1XBPP
1130                                 "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
1131                                 "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
1132                                 "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
1133 #endif
1134
1135                                 WRITEBGR15(%4, %5, %%REGa)
1136                                 YSCALEYUV2PACKEDX_END
1137                 return;
1138         case PIX_FMT_BGR565:
1139                                 YSCALEYUV2PACKEDX
1140                                 YSCALEYUV2RGBX
1141                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1142 #ifdef DITHER1XBPP
1143                                 "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
1144                                 "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
1145                                 "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
1146 #endif
1147
1148                                 WRITEBGR16(%4, %5, %%REGa)
1149                                 YSCALEYUV2PACKEDX_END
1150                 return;
1151         case PIX_FMT_YUYV422:
1152                                 YSCALEYUV2PACKEDX
1153                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1154
1155                                 "psraw $3, %%mm3                \n\t"
1156                                 "psraw $3, %%mm4                \n\t"
1157                                 "psraw $3, %%mm1                \n\t"
1158                                 "psraw $3, %%mm7                \n\t"
1159                                 WRITEYUY2(%4, %5, %%REGa)
1160                                 YSCALEYUV2PACKEDX_END
1161                 return;
1162         }
1163     }
1164 #endif
1165 #ifdef HAVE_ALTIVEC
1166                 /* The following list of supported dstFormat values should
1167                    match what's found in the body of altivec_yuv2packedX() */
1168                 if(c->dstFormat==PIX_FMT_ABGR  || c->dstFormat==PIX_FMT_BGRA  ||
1169                    c->dstFormat==PIX_FMT_BGR24 || c->dstFormat==PIX_FMT_RGB24 ||
1170                    c->dstFormat==PIX_FMT_RGBA  || c->dstFormat==PIX_FMT_ARGB)
1171                         altivec_yuv2packedX (c, lumFilter, lumSrc, lumFilterSize,
1172                                     chrFilter, chrSrc, chrFilterSize,
1173                                     dest, dstW, dstY);
1174                 else
1175 #endif
1176                         yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize,
1177                                     chrFilter, chrSrc, chrFilterSize,
1178                                     dest, dstW, dstY);
1179 }
1180
1181 /**
1182  * vertical bilinear scale YV12 to RGB
1183  */
1184 static inline void RENAME(yuv2packed2)(SwsContext *c, uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
1185                             uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
1186 {
1187         int yalpha1=yalpha^4095;
1188         int uvalpha1=uvalpha^4095;
1189         int i;
1190
1191 #if 0 //isn't used
1192         if(flags&SWS_FULL_CHR_H_INT)
1193         {
1194                 switch(dstFormat)
1195                 {
1196 #ifdef HAVE_MMX
1197                 case PIX_FMT_RGB32:
1198                         asm volatile(
1199
1200
1201 FULL_YSCALEYUV2RGB
1202                         "punpcklbw %%mm1, %%mm3         \n\t" // BGBGBGBG
1203                         "punpcklbw %%mm7, %%mm0         \n\t" // R0R0R0R0
1204
1205                         "movq %%mm3, %%mm1              \n\t"
1206                         "punpcklwd %%mm0, %%mm3         \n\t" // BGR0BGR0
1207                         "punpckhwd %%mm0, %%mm1         \n\t" // BGR0BGR0
1208
1209                         MOVNTQ(%%mm3, (%4, %%REGa, 4))
1210                         MOVNTQ(%%mm1, 8(%4, %%REGa, 4))
1211
1212                         "add $4, %%"REG_a"              \n\t"
1213                         "cmp %5, %%"REG_a"              \n\t"
1214                         " jb 1b                         \n\t"
1215
1216
1217                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" ((long)dstW),
1218                         "m" (yalpha1), "m" (uvalpha1)
1219                         : "%"REG_a
1220                         );
1221                         break;
1222                 case PIX_FMT_BGR24:
1223                         asm volatile(
1224
1225 FULL_YSCALEYUV2RGB
1226
1227                                                                 // lsb ... msb
1228                         "punpcklbw %%mm1, %%mm3         \n\t" // BGBGBGBG
1229                         "punpcklbw %%mm7, %%mm0         \n\t" // R0R0R0R0
1230
1231                         "movq %%mm3, %%mm1              \n\t"
1232                         "punpcklwd %%mm0, %%mm3         \n\t" // BGR0BGR0
1233                         "punpckhwd %%mm0, %%mm1         \n\t" // BGR0BGR0
1234
1235                         "movq %%mm3, %%mm2              \n\t" // BGR0BGR0
1236                         "psrlq $8, %%mm3                \n\t" // GR0BGR00
1237                         "pand "MANGLE(bm00000111)", %%mm2\n\t" // BGR00000
1238                         "pand "MANGLE(bm11111000)", %%mm3\n\t" // 000BGR00
1239                         "por %%mm2, %%mm3               \n\t" // BGRBGR00
1240                         "movq %%mm1, %%mm2              \n\t"
1241                         "psllq $48, %%mm1               \n\t" // 000000BG
1242                         "por %%mm1, %%mm3               \n\t" // BGRBGRBG
1243
1244                         "movq %%mm2, %%mm1              \n\t" // BGR0BGR0
1245                         "psrld $16, %%mm2               \n\t" // R000R000
1246                         "psrlq $24, %%mm1               \n\t" // 0BGR0000
1247                         "por %%mm2, %%mm1               \n\t" // RBGRR000
1248
1249                         "mov %4, %%"REG_b"              \n\t"
1250                         "add %%"REG_a", %%"REG_b"       \n\t"
1251
1252 #ifdef HAVE_MMX2
1253                         //FIXME Alignment
1254                         "movntq %%mm3, (%%"REG_b", %%"REG_a", 2)\n\t"
1255                         "movntq %%mm1, 8(%%"REG_b", %%"REG_a", 2)\n\t"
1256 #else
1257                         "movd %%mm3, (%%"REG_b", %%"REG_a", 2)  \n\t"
1258                         "psrlq $32, %%mm3               \n\t"
1259                         "movd %%mm3, 4(%%"REG_b", %%"REG_a", 2) \n\t"
1260                         "movd %%mm1, 8(%%"REG_b", %%"REG_a", 2) \n\t"
1261 #endif
1262                         "add $4, %%"REG_a"              \n\t"
1263                         "cmp %5, %%"REG_a"              \n\t"
1264                         " jb 1b                         \n\t"
1265
1266                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW),
1267                         "m" (yalpha1), "m" (uvalpha1)
1268                         : "%"REG_a, "%"REG_b
1269                         );
1270                         break;
1271                 case PIX_FMT_BGR555:
1272                         asm volatile(
1273
1274 FULL_YSCALEYUV2RGB
1275 #ifdef DITHER1XBPP
1276                         "paddusb "MANGLE(g5Dither)", %%mm1\n\t"
1277                         "paddusb "MANGLE(r5Dither)", %%mm0\n\t"
1278                         "paddusb "MANGLE(b5Dither)", %%mm3\n\t"
1279 #endif
1280                         "punpcklbw %%mm7, %%mm1         \n\t" // 0G0G0G0G
1281                         "punpcklbw %%mm7, %%mm3         \n\t" // 0B0B0B0B
1282                         "punpcklbw %%mm7, %%mm0         \n\t" // 0R0R0R0R
1283
1284                         "psrlw $3, %%mm3                \n\t"
1285                         "psllw $2, %%mm1                \n\t"
1286                         "psllw $7, %%mm0                \n\t"
1287                         "pand "MANGLE(g15Mask)", %%mm1  \n\t"
1288                         "pand "MANGLE(r15Mask)", %%mm0  \n\t"
1289
1290                         "por %%mm3, %%mm1               \n\t"
1291                         "por %%mm1, %%mm0               \n\t"
1292
1293                         MOVNTQ(%%mm0, (%4, %%REGa, 2))
1294
1295                         "add $4, %%"REG_a"              \n\t"
1296                         "cmp %5, %%"REG_a"              \n\t"
1297                         " jb 1b                         \n\t"
1298
1299                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
1300                         "m" (yalpha1), "m" (uvalpha1)
1301                         : "%"REG_a
1302                         );
1303                         break;
1304                 case PIX_FMT_BGR565:
1305                         asm volatile(
1306
1307 FULL_YSCALEYUV2RGB
1308 #ifdef DITHER1XBPP
1309                         "paddusb "MANGLE(g6Dither)", %%mm1\n\t"
1310                         "paddusb "MANGLE(r5Dither)", %%mm0\n\t"
1311                         "paddusb "MANGLE(b5Dither)", %%mm3\n\t"
1312 #endif
1313                         "punpcklbw %%mm7, %%mm1         \n\t" // 0G0G0G0G
1314                         "punpcklbw %%mm7, %%mm3         \n\t" // 0B0B0B0B
1315                         "punpcklbw %%mm7, %%mm0         \n\t" // 0R0R0R0R
1316
1317                         "psrlw $3, %%mm3                \n\t"
1318                         "psllw $3, %%mm1                \n\t"
1319                         "psllw $8, %%mm0                \n\t"
1320                         "pand "MANGLE(g16Mask)", %%mm1  \n\t"
1321                         "pand "MANGLE(r16Mask)", %%mm0  \n\t"
1322
1323                         "por %%mm3, %%mm1               \n\t"
1324                         "por %%mm1, %%mm0               \n\t"
1325
1326                         MOVNTQ(%%mm0, (%4, %%REGa, 2))
1327
1328                         "add $4, %%"REG_a"              \n\t"
1329                         "cmp %5, %%"REG_a"              \n\t"
1330                         " jb 1b                         \n\t"
1331
1332                         :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
1333                         "m" (yalpha1), "m" (uvalpha1)
1334                         : "%"REG_a
1335                         );
1336                 break;
1337 #endif
1338                 case PIX_FMT_BGR32:
1339 #ifndef HAVE_MMX
1340                 case PIX_FMT_RGB32:
1341 #endif
1342                 if(dstFormat==PIX_FMT_RGB32)
1343                 {
1344                         int i;
1345 #ifdef WORDS_BIGENDIAN
1346                         dest++;
1347 #endif
1348                         for(i=0;i<dstW;i++){
1349                                 // vertical linear interpolation && yuv2rgb in a single step:
1350                                 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
1351                                 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
1352                                 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
1353                                 dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
1354                                 dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
1355                                 dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
1356                                 dest+= 4;
1357                         }
1358                 }
1359                 else if(dstFormat==PIX_FMT_BGR24)
1360                 {
1361                         int i;
1362                         for(i=0;i<dstW;i++){
1363                                 // vertical linear interpolation && yuv2rgb in a single step:
1364                                 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
1365                                 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
1366                                 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
1367                                 dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
1368                                 dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
1369                                 dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
1370                                 dest+= 3;
1371                         }
1372                 }
1373                 else if(dstFormat==PIX_FMT_BGR565)
1374                 {
1375                         int i;
1376                         for(i=0;i<dstW;i++){
1377                                 // vertical linear interpolation && yuv2rgb in a single step:
1378                                 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
1379                                 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
1380                                 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
1381
1382                                 ((uint16_t*)dest)[i] =
1383                                         clip_table16b[(Y + yuvtab_40cf[U]) >>13] |
1384                                         clip_table16g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
1385                                         clip_table16r[(Y + yuvtab_3343[V]) >>13];
1386                         }
1387                 }
1388                 else if(dstFormat==PIX_FMT_BGR555)
1389                 {
1390                         int i;
1391                         for(i=0;i<dstW;i++){
1392                                 // vertical linear interpolation && yuv2rgb in a single step:
1393                                 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
1394                                 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
1395                                 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
1396
1397                                 ((uint16_t*)dest)[i] =
1398                                         clip_table15b[(Y + yuvtab_40cf[U]) >>13] |
1399                                         clip_table15g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
1400                                         clip_table15r[(Y + yuvtab_3343[V]) >>13];
1401                         }
1402                 }
1403         }//FULL_UV_IPOL
1404         else
1405         {
1406 #endif // if 0
1407 #ifdef HAVE_MMX
1408         switch(c->dstFormat)
1409         {
1410 //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
1411         case PIX_FMT_RGB32:
1412                         asm volatile(
1413                                 "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1414                                 "mov %4, %%"REG_b"                      \n\t"
1415                                 "push %%"REG_BP"                        \n\t"
1416                                 YSCALEYUV2RGB(%%REGBP, %5)
1417                                 WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
1418                                 "pop %%"REG_BP"                         \n\t"
1419                                 "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1420
1421                         :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1422                         "a" (&c->redDither)
1423                         );
1424                         return;
1425         case PIX_FMT_BGR24:
1426                         asm volatile(
1427                                 "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1428                                 "mov %4, %%"REG_b"                      \n\t"
1429                                 "push %%"REG_BP"                        \n\t"
1430                                 YSCALEYUV2RGB(%%REGBP, %5)
1431                                 WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1432                                 "pop %%"REG_BP"                         \n\t"
1433                                 "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1434                         :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1435                         "a" (&c->redDither)
1436                         );
1437                         return;
1438         case PIX_FMT_BGR555:
1439                         asm volatile(
1440                                 "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1441                                 "mov %4, %%"REG_b"                      \n\t"
1442                                 "push %%"REG_BP"                        \n\t"
1443                                 YSCALEYUV2RGB(%%REGBP, %5)
1444                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1445 #ifdef DITHER1XBPP
1446                                 "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
1447                                 "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
1448                                 "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
1449 #endif
1450
1451                                 WRITEBGR15(%%REGb, 8280(%5), %%REGBP)
1452                                 "pop %%"REG_BP"                         \n\t"
1453                                 "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1454
1455                         :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1456                         "a" (&c->redDither)
1457                         );
1458                         return;
1459         case PIX_FMT_BGR565:
1460                         asm volatile(
1461                                 "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1462                                 "mov %4, %%"REG_b"                      \n\t"
1463                                 "push %%"REG_BP"                        \n\t"
1464                                 YSCALEYUV2RGB(%%REGBP, %5)
1465                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1466 #ifdef DITHER1XBPP
1467                                 "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
1468                                 "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
1469                                 "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
1470 #endif
1471
1472                                 WRITEBGR16(%%REGb, 8280(%5), %%REGBP)
1473                                 "pop %%"REG_BP"                         \n\t"
1474                                 "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1475                         :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1476                         "a" (&c->redDither)
1477                         );
1478                         return;
1479         case PIX_FMT_YUYV422:
1480                         asm volatile(
1481                                 "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1482                                 "mov %4, %%"REG_b"                      \n\t"
1483                                 "push %%"REG_BP"                        \n\t"
1484                                 YSCALEYUV2PACKED(%%REGBP, %5)
1485                                 WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1486                                 "pop %%"REG_BP"                         \n\t"
1487                                 "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1488                         :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1489                         "a" (&c->redDither)
1490                         );
1491                         return;
1492         default: break;
1493         }
1494 #endif //HAVE_MMX
1495 YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C, YSCALE_YUV_2_PACKED2_C)
1496 }
1497
1498 /**
1499  * YV12 to RGB without scaling or interpolating
1500  */
1501 static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *uvbuf0, uint16_t *uvbuf1,
1502                             uint8_t *dest, int dstW, int uvalpha, int dstFormat, int flags, int y)
1503 {
1504         const int yalpha1=0;
1505         int i;
1506         
1507         uint16_t *buf1= buf0; //FIXME needed for the rgb1/bgr1
1508         const int yalpha= 4096; //FIXME ...
1509
1510         if(flags&SWS_FULL_CHR_H_INT)
1511         {
1512                 RENAME(yuv2packed2)(c, buf0, buf0, uvbuf0, uvbuf1, dest, dstW, 0, uvalpha, y);
1513                 return;
1514         }
1515
1516 #ifdef HAVE_MMX
1517         if( uvalpha < 2048 ) // note this is not correct (shifts chrominance by 0.5 pixels) but its a bit faster
1518         {
1519                 switch(dstFormat)
1520                 {
1521                 case PIX_FMT_RGB32:
1522                         asm volatile(
1523                                 "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1524                                 "mov %4, %%"REG_b"                      \n\t"
1525                                 "push %%"REG_BP"                        \n\t"
1526                                 YSCALEYUV2RGB1(%%REGBP, %5)
1527                                 WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
1528                                 "pop %%"REG_BP"                         \n\t"
1529                                 "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1530
1531                         :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1532                         "a" (&c->redDither)
1533                         );
1534                         return;
1535                 case PIX_FMT_BGR24:
1536                         asm volatile(
1537                                 "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1538                                 "mov %4, %%"REG_b"                      \n\t"
1539                                 "push %%"REG_BP"                        \n\t"
1540                                 YSCALEYUV2RGB1(%%REGBP, %5)
1541                                 WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1542                                 "pop %%"REG_BP"                         \n\t"
1543                                 "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1544
1545                         :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1546                         "a" (&c->redDither)
1547                         );
1548                         return;
1549                 case PIX_FMT_BGR555:
1550                         asm volatile(
1551                                 "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1552                                 "mov %4, %%"REG_b"                      \n\t"
1553                                 "push %%"REG_BP"                        \n\t"
1554                                 YSCALEYUV2RGB1(%%REGBP, %5)
1555                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1556 #ifdef DITHER1XBPP
1557                                 "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
1558                                 "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
1559                                 "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
1560 #endif
1561                                 WRITEBGR15(%%REGb, 8280(%5), %%REGBP)
1562                                 "pop %%"REG_BP"                         \n\t"
1563                                 "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1564
1565                         :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1566                         "a" (&c->redDither)
1567                         );
1568                         return;
1569                 case PIX_FMT_BGR565:
1570                         asm volatile(
1571                                 "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1572                                 "mov %4, %%"REG_b"                      \n\t"
1573                                 "push %%"REG_BP"                        \n\t"
1574                                 YSCALEYUV2RGB1(%%REGBP, %5)
1575                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1576 #ifdef DITHER1XBPP
1577                                 "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
1578                                 "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
1579                                 "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
1580 #endif
1581
1582                                 WRITEBGR16(%%REGb, 8280(%5), %%REGBP)
1583                                 "pop %%"REG_BP"                         \n\t"
1584                                 "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1585
1586                         :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1587                         "a" (&c->redDither)
1588                         );
1589                         return;
1590                 case PIX_FMT_YUYV422:
1591                         asm volatile(
1592                                 "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1593                                 "mov %4, %%"REG_b"                      \n\t"
1594                                 "push %%"REG_BP"                        \n\t"
1595                                 YSCALEYUV2PACKED1(%%REGBP, %5)
1596                                 WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1597                                 "pop %%"REG_BP"                         \n\t"
1598                                 "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1599
1600                         :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1601                         "a" (&c->redDither)
1602                         );
1603                         return;
1604                 }
1605         }
1606         else
1607         {
1608                 switch(dstFormat)
1609                 {
1610                 case PIX_FMT_RGB32:
1611                         asm volatile(
1612                                 "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1613                                 "mov %4, %%"REG_b"                      \n\t"
1614                                 "push %%"REG_BP"                        \n\t"
1615                                 YSCALEYUV2RGB1b(%%REGBP, %5)
1616                                 WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
1617                                 "pop %%"REG_BP"                         \n\t"
1618                                 "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1619
1620                         :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1621                         "a" (&c->redDither)
1622                         );
1623                         return;
1624                 case PIX_FMT_BGR24:
1625                         asm volatile(
1626                                 "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1627                                 "mov %4, %%"REG_b"                      \n\t"
1628                                 "push %%"REG_BP"                        \n\t"
1629                                 YSCALEYUV2RGB1b(%%REGBP, %5)
1630                                 WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1631                                 "pop %%"REG_BP"                         \n\t"
1632                                 "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1633
1634                         :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1635                         "a" (&c->redDither)
1636                         );
1637                         return;
1638                 case PIX_FMT_BGR555:
1639                         asm volatile(
1640                                 "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1641                                 "mov %4, %%"REG_b"                      \n\t"
1642                                 "push %%"REG_BP"                        \n\t"
1643                                 YSCALEYUV2RGB1b(%%REGBP, %5)
1644                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1645 #ifdef DITHER1XBPP
1646                                 "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
1647                                 "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
1648                                 "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
1649 #endif
1650                                 WRITEBGR15(%%REGb, 8280(%5), %%REGBP)
1651                                 "pop %%"REG_BP"                         \n\t"
1652                                 "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1653
1654                         :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1655                         "a" (&c->redDither)
1656                         );
1657                         return;
1658                 case PIX_FMT_BGR565:
1659                         asm volatile(
1660                                 "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1661                                 "mov %4, %%"REG_b"                      \n\t"
1662                                 "push %%"REG_BP"                        \n\t"
1663                                 YSCALEYUV2RGB1b(%%REGBP, %5)
1664                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1665 #ifdef DITHER1XBPP
1666                                 "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
1667                                 "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
1668                                 "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
1669 #endif
1670
1671                                 WRITEBGR16(%%REGb, 8280(%5), %%REGBP)
1672                                 "pop %%"REG_BP"                         \n\t"
1673                                 "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1674
1675                         :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1676                         "a" (&c->redDither)
1677                         );
1678                         return;
1679                 case PIX_FMT_YUYV422:
1680                         asm volatile(
1681                                 "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1682                                 "mov %4, %%"REG_b"                      \n\t"
1683                                 "push %%"REG_BP"                        \n\t"
1684                                 YSCALEYUV2PACKED1b(%%REGBP, %5)
1685                                 WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1686                                 "pop %%"REG_BP"                         \n\t"
1687                                 "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1688
1689                         :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1690                         "a" (&c->redDither)
1691                         );
1692                         return;
1693                 }
1694         }
1695 #endif
1696         if( uvalpha < 2048 )
1697         {
1698                 YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1_C, YSCALE_YUV_2_PACKED1_C)
1699         }else{
1700                 YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1B_C, YSCALE_YUV_2_PACKED1B_C)
1701         }
1702 }
1703
1704 //FIXME yuy2* can read upto 7 samples to much
1705
1706 static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, long width)
1707 {
1708 #ifdef HAVE_MMX
1709         asm volatile(
1710                 "movq "MANGLE(bm01010101)", %%mm2\n\t"
1711                 "mov %0, %%"REG_a"              \n\t"
1712                 "1:                             \n\t"
1713                 "movq (%1, %%"REG_a",2), %%mm0  \n\t"
1714                 "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
1715                 "pand %%mm2, %%mm0              \n\t"
1716                 "pand %%mm2, %%mm1              \n\t"
1717                 "packuswb %%mm1, %%mm0          \n\t"
1718                 "movq %%mm0, (%2, %%"REG_a")    \n\t"
1719                 "add $8, %%"REG_a"              \n\t"
1720                 " js 1b                         \n\t"
1721                 : : "g" (-width), "r" (src+width*2), "r" (dst+width)
1722                 : "%"REG_a
1723         );
1724 #else
1725         int i;
1726         for(i=0; i<width; i++)
1727                 dst[i]= src[2*i];
1728 #endif
1729 }
1730
1731 static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
1732 {
1733 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
1734         asm volatile(
1735                 "movq "MANGLE(bm01010101)", %%mm4\n\t"
1736                 "mov %0, %%"REG_a"              \n\t"
1737                 "1:                             \n\t"
1738                 "movq (%1, %%"REG_a",4), %%mm0  \n\t"
1739                 "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
1740                 "movq (%2, %%"REG_a",4), %%mm2  \n\t"
1741                 "movq 8(%2, %%"REG_a",4), %%mm3 \n\t"
1742                 PAVGB(%%mm2, %%mm0)
1743                 PAVGB(%%mm3, %%mm1)
1744                 "psrlw $8, %%mm0                \n\t"
1745                 "psrlw $8, %%mm1                \n\t"
1746                 "packuswb %%mm1, %%mm0          \n\t"
1747                 "movq %%mm0, %%mm1              \n\t"
1748                 "psrlw $8, %%mm0                \n\t"
1749                 "pand %%mm4, %%mm1              \n\t"
1750                 "packuswb %%mm0, %%mm0          \n\t"
1751                 "packuswb %%mm1, %%mm1          \n\t"
1752                 "movd %%mm0, (%4, %%"REG_a")    \n\t"
1753                 "movd %%mm1, (%3, %%"REG_a")    \n\t"
1754                 "add $4, %%"REG_a"              \n\t"
1755                 " js 1b                         \n\t"
1756                 : : "g" (-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
1757                 : "%"REG_a
1758         );
1759 #else
1760         int i;
1761         for(i=0; i<width; i++)
1762         {
1763                 dstU[i]= (src1[4*i + 1] + src2[4*i + 1])>>1;
1764                 dstV[i]= (src1[4*i + 3] + src2[4*i + 3])>>1;
1765         }
1766 #endif
1767 }
1768
1769 //this is allmost identical to the previous, end exists only cuz yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses
1770 static inline void RENAME(uyvyToY)(uint8_t *dst, uint8_t *src, long width)
1771 {
1772 #ifdef HAVE_MMX
1773         asm volatile(
1774                 "mov %0, %%"REG_a"              \n\t"
1775                 "1:                             \n\t"
1776                 "movq (%1, %%"REG_a",2), %%mm0  \n\t"
1777                 "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
1778                 "psrlw $8, %%mm0                \n\t"
1779                 "psrlw $8, %%mm1                \n\t"
1780                 "packuswb %%mm1, %%mm0          \n\t"
1781                 "movq %%mm0, (%2, %%"REG_a")    \n\t"
1782                 "add $8, %%"REG_a"              \n\t"
1783                 " js 1b                         \n\t"
1784                 : : "g" (-width), "r" (src+width*2), "r" (dst+width)
1785                 : "%"REG_a
1786         );
1787 #else
1788         int i;
1789         for(i=0; i<width; i++)
1790                 dst[i]= src[2*i+1];
1791 #endif
1792 }
1793
1794 static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
1795 {
1796 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
1797         asm volatile(
1798                 "movq "MANGLE(bm01010101)", %%mm4\n\t"
1799                 "mov %0, %%"REG_a"              \n\t"
1800                 "1:                             \n\t"
1801                 "movq (%1, %%"REG_a",4), %%mm0  \n\t"
1802                 "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
1803                 "movq (%2, %%"REG_a",4), %%mm2  \n\t"
1804                 "movq 8(%2, %%"REG_a",4), %%mm3 \n\t"
1805                 PAVGB(%%mm2, %%mm0)
1806                 PAVGB(%%mm3, %%mm1)
1807                 "pand %%mm4, %%mm0              \n\t"
1808                 "pand %%mm4, %%mm1              \n\t"
1809                 "packuswb %%mm1, %%mm0          \n\t"
1810                 "movq %%mm0, %%mm1              \n\t"
1811                 "psrlw $8, %%mm0                \n\t"
1812                 "pand %%mm4, %%mm1              \n\t"
1813                 "packuswb %%mm0, %%mm0          \n\t"
1814                 "packuswb %%mm1, %%mm1          \n\t"
1815                 "movd %%mm0, (%4, %%"REG_a")    \n\t"
1816                 "movd %%mm1, (%3, %%"REG_a")    \n\t"
1817                 "add $4, %%"REG_a"              \n\t"
1818                 " js 1b                         \n\t"
1819                 : : "g" (-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
1820                 : "%"REG_a
1821         );
1822 #else
1823         int i;
1824         for(i=0; i<width; i++)
1825         {
1826                 dstU[i]= (src1[4*i + 0] + src2[4*i + 0])>>1;
1827                 dstV[i]= (src1[4*i + 2] + src2[4*i + 2])>>1;
1828         }
1829 #endif
1830 }
1831
1832 static inline void RENAME(bgr32ToY)(uint8_t *dst, uint8_t *src, int width)
1833 {
1834         int i;
1835         for(i=0; i<width; i++)
1836         {
1837                 int b=  ((uint32_t*)src)[i]&0xFF;
1838                 int g= (((uint32_t*)src)[i]>>8)&0xFF;
1839                 int r= (((uint32_t*)src)[i]>>16)&0xFF;
1840
1841                 dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
1842         }
1843 }
1844
1845 static inline void RENAME(bgr32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
1846 {
1847         int i;
1848         for(i=0; i<width; i++)
1849         {
1850                 const int a= ((uint32_t*)src1)[2*i+0];
1851                 const int e= ((uint32_t*)src1)[2*i+1];
1852                 const int c= ((uint32_t*)src2)[2*i+0];
1853                 const int d= ((uint32_t*)src2)[2*i+1];
1854                 const int l= (a&0xFF00FF) + (e&0xFF00FF) + (c&0xFF00FF) + (d&0xFF00FF);
1855                 const int h= (a&0x00FF00) + (e&0x00FF00) + (c&0x00FF00) + (d&0x00FF00);
1856                 const int b=  l&0x3FF;
1857                 const int g=  h>>8;
1858                 const int r=  l>>16;
1859
1860                 dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
1861                 dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
1862         }
1863 }
1864
1865 static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, long width)
1866 {
1867 #ifdef HAVE_MMX
1868         asm volatile(
1869                 "mov %2, %%"REG_a"              \n\t"
1870                 "movq "MANGLE(bgr2YCoeff)", %%mm6               \n\t"
1871                 "movq "MANGLE(w1111)", %%mm5            \n\t"
1872                 "pxor %%mm7, %%mm7              \n\t"
1873                 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d"\n\t"
1874                 ASMALIGN(4)
1875                 "1:                             \n\t"
1876                 PREFETCH" 64(%0, %%"REG_d")     \n\t"
1877                 "movd (%0, %%"REG_d"), %%mm0    \n\t"
1878                 "movd 3(%0, %%"REG_d"), %%mm1   \n\t"
1879                 "punpcklbw %%mm7, %%mm0         \n\t"
1880                 "punpcklbw %%mm7, %%mm1         \n\t"
1881                 "movd 6(%0, %%"REG_d"), %%mm2   \n\t"
1882                 "movd 9(%0, %%"REG_d"), %%mm3   \n\t"
1883                 "punpcklbw %%mm7, %%mm2         \n\t"
1884                 "punpcklbw %%mm7, %%mm3         \n\t"
1885                 "pmaddwd %%mm6, %%mm0           \n\t"
1886                 "pmaddwd %%mm6, %%mm1           \n\t"
1887                 "pmaddwd %%mm6, %%mm2           \n\t"
1888                 "pmaddwd %%mm6, %%mm3           \n\t"
1889 #ifndef FAST_BGR2YV12
1890                 "psrad $8, %%mm0                \n\t"
1891                 "psrad $8, %%mm1                \n\t"
1892                 "psrad $8, %%mm2                \n\t"
1893                 "psrad $8, %%mm3                \n\t"
1894 #endif
1895                 "packssdw %%mm1, %%mm0          \n\t"
1896                 "packssdw %%mm3, %%mm2          \n\t"
1897                 "pmaddwd %%mm5, %%mm0           \n\t"
1898                 "pmaddwd %%mm5, %%mm2           \n\t"
1899                 "packssdw %%mm2, %%mm0          \n\t"
1900                 "psraw $7, %%mm0                \n\t"
1901
1902                 "movd 12(%0, %%"REG_d"), %%mm4  \n\t"
1903                 "movd 15(%0, %%"REG_d"), %%mm1  \n\t"
1904                 "punpcklbw %%mm7, %%mm4         \n\t"
1905                 "punpcklbw %%mm7, %%mm1         \n\t"
1906                 "movd 18(%0, %%"REG_d"), %%mm2  \n\t"
1907                 "movd 21(%0, %%"REG_d"), %%mm3  \n\t"
1908                 "punpcklbw %%mm7, %%mm2         \n\t"
1909                 "punpcklbw %%mm7, %%mm3         \n\t"
1910                 "pmaddwd %%mm6, %%mm4           \n\t"
1911                 "pmaddwd %%mm6, %%mm1           \n\t"
1912                 "pmaddwd %%mm6, %%mm2           \n\t"
1913                 "pmaddwd %%mm6, %%mm3           \n\t"
1914 #ifndef FAST_BGR2YV12
1915                 "psrad $8, %%mm4                \n\t"
1916                 "psrad $8, %%mm1                \n\t"
1917                 "psrad $8, %%mm2                \n\t"
1918                 "psrad $8, %%mm3                \n\t"
1919 #endif
1920                 "packssdw %%mm1, %%mm4          \n\t"
1921                 "packssdw %%mm3, %%mm2          \n\t"
1922                 "pmaddwd %%mm5, %%mm4           \n\t"
1923                 "pmaddwd %%mm5, %%mm2           \n\t"
1924                 "add $24, %%"REG_d"             \n\t"
1925                 "packssdw %%mm2, %%mm4          \n\t"
1926                 "psraw $7, %%mm4                \n\t"
1927
1928                 "packuswb %%mm4, %%mm0          \n\t"
1929                 "paddusb "MANGLE(bgr2YOffset)", %%mm0   \n\t"
1930
1931                 "movq %%mm0, (%1, %%"REG_a")    \n\t"
1932                 "add $8, %%"REG_a"              \n\t"
1933                 " js 1b                         \n\t"
1934                 : : "r" (src+width*3), "r" (dst+width), "g" (-width)
1935                 : "%"REG_a, "%"REG_d
1936         );
1937 #else
1938         int i;
1939         for(i=0; i<width; i++)
1940         {
1941                 int b= src[i*3+0];
1942                 int g= src[i*3+1];
1943                 int r= src[i*3+2];
1944
1945                 dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
1946         }
1947 #endif
1948 }
1949
1950 static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
1951 {
1952 #ifdef HAVE_MMX
1953         asm volatile(
1954                 "mov %4, %%"REG_a"              \n\t"
1955                 "movq "MANGLE(w1111)", %%mm5            \n\t"
1956                 "movq "MANGLE(bgr2UCoeff)", %%mm6               \n\t"
1957                 "pxor %%mm7, %%mm7              \n\t"
1958                 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d"       \n\t"
1959                 "add %%"REG_d", %%"REG_d"       \n\t"
1960                 ASMALIGN(4)
1961                 "1:                             \n\t"
1962                 PREFETCH" 64(%0, %%"REG_d")     \n\t"
1963                 PREFETCH" 64(%1, %%"REG_d")     \n\t"
1964 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
1965                 "movq (%0, %%"REG_d"), %%mm0    \n\t"
1966                 "movq (%1, %%"REG_d"), %%mm1    \n\t"
1967                 "movq 6(%0, %%"REG_d"), %%mm2   \n\t"
1968                 "movq 6(%1, %%"REG_d"), %%mm3   \n\t"
1969                 PAVGB(%%mm1, %%mm0)
1970                 PAVGB(%%mm3, %%mm2)
1971                 "movq %%mm0, %%mm1              \n\t"
1972                 "movq %%mm2, %%mm3              \n\t"
1973                 "psrlq $24, %%mm0               \n\t"
1974                 "psrlq $24, %%mm2               \n\t"
1975                 PAVGB(%%mm1, %%mm0)
1976                 PAVGB(%%mm3, %%mm2)
1977                 "punpcklbw %%mm7, %%mm0         \n\t"
1978                 "punpcklbw %%mm7, %%mm2         \n\t"
1979 #else
1980                 "movd (%0, %%"REG_d"), %%mm0    \n\t"
1981                 "movd (%1, %%"REG_d"), %%mm1    \n\t"
1982                 "movd 3(%0, %%"REG_d"), %%mm2   \n\t"
1983                 "movd 3(%1, %%"REG_d"), %%mm3   \n\t"
1984                 "punpcklbw %%mm7, %%mm0         \n\t"
1985                 "punpcklbw %%mm7, %%mm1         \n\t"
1986                 "punpcklbw %%mm7, %%mm2         \n\t"
1987                 "punpcklbw %%mm7, %%mm3         \n\t"
1988                 "paddw %%mm1, %%mm0             \n\t"
1989                 "paddw %%mm3, %%mm2             \n\t"
1990                 "paddw %%mm2, %%mm0             \n\t"
1991                 "movd 6(%0, %%"REG_d"), %%mm4   \n\t"
1992                 "movd 6(%1, %%"REG_d"), %%mm1   \n\t"
1993                 "movd 9(%0, %%"REG_d"), %%mm2   \n\t"
1994                 "movd 9(%1, %%"REG_d"), %%mm3   \n\t"
1995                 "punpcklbw %%mm7, %%mm4         \n\t"
1996                 "punpcklbw %%mm7, %%mm1         \n\t"
1997                 "punpcklbw %%mm7, %%mm2         \n\t"
1998                 "punpcklbw %%mm7, %%mm3         \n\t"
1999                 "paddw %%mm1, %%mm4             \n\t"
2000                 "paddw %%mm3, %%mm2             \n\t"
2001                 "paddw %%mm4, %%mm2             \n\t"
2002                 "psrlw $2, %%mm0                \n\t"
2003                 "psrlw $2, %%mm2                \n\t"
2004 #endif
2005                 "movq "MANGLE(bgr2VCoeff)", %%mm1               \n\t"
2006                 "movq "MANGLE(bgr2VCoeff)", %%mm3               \n\t"
2007                 
2008                 "pmaddwd %%mm0, %%mm1           \n\t"
2009                 "pmaddwd %%mm2, %%mm3           \n\t"
2010                 "pmaddwd %%mm6, %%mm0           \n\t"
2011                 "pmaddwd %%mm6, %%mm2           \n\t"
2012 #ifndef FAST_BGR2YV12
2013                 "psrad $8, %%mm0                \n\t"
2014                 "psrad $8, %%mm1                \n\t"
2015                 "psrad $8, %%mm2                \n\t"
2016                 "psrad $8, %%mm3                \n\t"
2017 #endif
2018                 "packssdw %%mm2, %%mm0          \n\t"
2019                 "packssdw %%mm3, %%mm1          \n\t"
2020                 "pmaddwd %%mm5, %%mm0           \n\t"
2021                 "pmaddwd %%mm5, %%mm1           \n\t"
2022                 "packssdw %%mm1, %%mm0          \n\t" // V1 V0 U1 U0
2023                 "psraw $7, %%mm0                \n\t"
2024
2025 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
2026                 "movq 12(%0, %%"REG_d"), %%mm4  \n\t"
2027                 "movq 12(%1, %%"REG_d"), %%mm1  \n\t"
2028                 "movq 18(%0, %%"REG_d"), %%mm2  \n\t"
2029                 "movq 18(%1, %%"REG_d"), %%mm3  \n\t"
2030                 PAVGB(%%mm1, %%mm4)
2031                 PAVGB(%%mm3, %%mm2)
2032                 "movq %%mm4, %%mm1              \n\t"
2033                 "movq %%mm2, %%mm3              \n\t"
2034                 "psrlq $24, %%mm4               \n\t"
2035                 "psrlq $24, %%mm2               \n\t"
2036                 PAVGB(%%mm1, %%mm4)
2037                 PAVGB(%%mm3, %%mm2)
2038                 "punpcklbw %%mm7, %%mm4         \n\t"
2039                 "punpcklbw %%mm7, %%mm2         \n\t"
2040 #else
2041                 "movd 12(%0, %%"REG_d"), %%mm4  \n\t"
2042                 "movd 12(%1, %%"REG_d"), %%mm1  \n\t"
2043                 "movd 15(%0, %%"REG_d"), %%mm2  \n\t"
2044                 "movd 15(%1, %%"REG_d"), %%mm3  \n\t"
2045                 "punpcklbw %%mm7, %%mm4         \n\t"
2046                 "punpcklbw %%mm7, %%mm1         \n\t"
2047                 "punpcklbw %%mm7, %%mm2         \n\t"
2048                 "punpcklbw %%mm7, %%mm3         \n\t"
2049                 "paddw %%mm1, %%mm4             \n\t"
2050                 "paddw %%mm3, %%mm2             \n\t"
2051                 "paddw %%mm2, %%mm4             \n\t"
2052                 "movd 18(%0, %%"REG_d"), %%mm5  \n\t"
2053                 "movd 18(%1, %%"REG_d"), %%mm1  \n\t"
2054                 "movd 21(%0, %%"REG_d"), %%mm2  \n\t"
2055                 "movd 21(%1, %%"REG_d"), %%mm3  \n\t"
2056                 "punpcklbw %%mm7, %%mm5         \n\t"
2057                 "punpcklbw %%mm7, %%mm1         \n\t"
2058                 "punpcklbw %%mm7, %%mm2         \n\t"
2059                 "punpcklbw %%mm7, %%mm3         \n\t"
2060                 "paddw %%mm1, %%mm5             \n\t"
2061                 "paddw %%mm3, %%mm2             \n\t"
2062                 "paddw %%mm5, %%mm2             \n\t"
2063                 "movq "MANGLE(w1111)", %%mm5            \n\t"
2064                 "psrlw $2, %%mm4                \n\t"
2065                 "psrlw $2, %%mm2                \n\t"
2066 #endif
2067                 "movq "MANGLE(bgr2VCoeff)", %%mm1               \n\t"
2068                 "movq "MANGLE(bgr2VCoeff)", %%mm3               \n\t"
2069                 
2070                 "pmaddwd %%mm4, %%mm1           \n\t"
2071                 "pmaddwd %%mm2, %%mm3           \n\t"
2072                 "pmaddwd %%mm6, %%mm4           \n\t"
2073                 "pmaddwd %%mm6, %%mm2           \n\t"
2074 #ifndef FAST_BGR2YV12
2075                 "psrad $8, %%mm4                \n\t"
2076                 "psrad $8, %%mm1                \n\t"
2077                 "psrad $8, %%mm2                \n\t"
2078                 "psrad $8, %%mm3                \n\t"
2079 #endif
2080                 "packssdw %%mm2, %%mm4          \n\t"
2081                 "packssdw %%mm3, %%mm1          \n\t"
2082                 "pmaddwd %%mm5, %%mm4           \n\t"
2083                 "pmaddwd %%mm5, %%mm1           \n\t"
2084                 "add $24, %%"REG_d"             \n\t"
2085                 "packssdw %%mm1, %%mm4          \n\t" // V3 V2 U3 U2
2086                 "psraw $7, %%mm4                \n\t"
2087                 
2088                 "movq %%mm0, %%mm1              \n\t"
2089                 "punpckldq %%mm4, %%mm0         \n\t"
2090                 "punpckhdq %%mm4, %%mm1         \n\t"
2091                 "packsswb %%mm1, %%mm0          \n\t"
2092                 "paddb "MANGLE(bgr2UVOffset)", %%mm0    \n\t"
2093
2094                 "movd %%mm0, (%2, %%"REG_a")    \n\t"
2095                 "punpckhdq %%mm0, %%mm0         \n\t"
2096                 "movd %%mm0, (%3, %%"REG_a")    \n\t"
2097                 "add $4, %%"REG_a"              \n\t"
2098                 " js 1b                         \n\t"
2099                 : : "r" (src1+width*6), "r" (src2+width*6), "r" (dstU+width), "r" (dstV+width), "g" (-width)
2100                 : "%"REG_a, "%"REG_d
2101         );
2102 #else
2103         int i;
2104         for(i=0; i<width; i++)
2105         {
2106                 int b= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3];
2107                 int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4];
2108                 int r= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5];
2109
2110                 dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
2111                 dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
2112         }
2113 #endif
2114 }
2115
2116 static inline void RENAME(bgr16ToY)(uint8_t *dst, uint8_t *src, int width)
2117 {
2118         int i;
2119         for(i=0; i<width; i++)
2120         {
2121                 int d= ((uint16_t*)src)[i];
2122                 int b= d&0x1F;
2123                 int g= (d>>5)&0x3F;
2124                 int r= (d>>11)&0x1F;
2125
2126                 dst[i]= ((2*RY*r + GY*g + 2*BY*b)>>(RGB2YUV_SHIFT-2)) + 16;
2127         }
2128 }
2129
2130 static inline void RENAME(bgr16ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
2131 {
2132         int i;
2133         for(i=0; i<width; i++)
2134         {
2135                 int d0= ((uint32_t*)src1)[i];
2136                 int d1= ((uint32_t*)src2)[i];
2137                 
2138                 int dl= (d0&0x07E0F81F) + (d1&0x07E0F81F);
2139                 int dh= ((d0>>5)&0x07C0F83F) + ((d1>>5)&0x07C0F83F);
2140
2141                 int dh2= (dh>>11) + (dh<<21);
2142                 int d= dh2 + dl;
2143
2144                 int b= d&0x7F;
2145                 int r= (d>>11)&0x7F;
2146                 int g= d>>21;
2147                 dstU[i]= ((2*RU*r + GU*g + 2*BU*b)>>(RGB2YUV_SHIFT+2-2)) + 128;
2148                 dstV[i]= ((2*RV*r + GV*g + 2*BV*b)>>(RGB2YUV_SHIFT+2-2)) + 128;
2149         }
2150 }
2151
2152 static inline void RENAME(bgr15ToY)(uint8_t *dst, uint8_t *src, int width)
2153 {
2154         int i;
2155         for(i=0; i<width; i++)
2156         {
2157                 int d= ((uint16_t*)src)[i];
2158                 int b= d&0x1F;
2159                 int g= (d>>5)&0x1F;
2160                 int r= (d>>10)&0x1F;
2161
2162                 dst[i]= ((RY*r + GY*g + BY*b)>>(RGB2YUV_SHIFT-3)) + 16;
2163         }
2164 }
2165
2166 static inline void RENAME(bgr15ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
2167 {
2168         int i;
2169         for(i=0; i<width; i++)
2170         {
2171                 int d0= ((uint32_t*)src1)[i];
2172                 int d1= ((uint32_t*)src2)[i];
2173                 
2174                 int dl= (d0&0x03E07C1F) + (d1&0x03E07C1F);
2175                 int dh= ((d0>>5)&0x03E0F81F) + ((d1>>5)&0x03E0F81F);
2176
2177                 int dh2= (dh>>11) + (dh<<21);
2178                 int d= dh2 + dl;
2179
2180                 int b= d&0x7F;
2181                 int r= (d>>10)&0x7F;
2182                 int g= d>>21;
2183                 dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2-3)) + 128;
2184                 dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2-3)) + 128;
2185         }
2186 }
2187
2188
2189 static inline void RENAME(rgb32ToY)(uint8_t *dst, uint8_t *src, int width)
2190 {
2191         int i;
2192         for(i=0; i<width; i++)
2193         {
2194                 int r=  ((uint32_t*)src)[i]&0xFF;
2195                 int g= (((uint32_t*)src)[i]>>8)&0xFF;
2196                 int b= (((uint32_t*)src)[i]>>16)&0xFF;
2197
2198                 dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
2199         }
2200 }
2201
2202 static inline void RENAME(rgb32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
2203 {
2204         int i;
2205         for(i=0; i<width; i++)
2206         {
2207                 const int a= ((uint32_t*)src1)[2*i+0];
2208                 const int e= ((uint32_t*)src1)[2*i+1];
2209                 const int c= ((uint32_t*)src2)[2*i+0];
2210                 const int d= ((uint32_t*)src2)[2*i+1];
2211                 const int l= (a&0xFF00FF) + (e&0xFF00FF) + (c&0xFF00FF) + (d&0xFF00FF);
2212                 const int h= (a&0x00FF00) + (e&0x00FF00) + (c&0x00FF00) + (d&0x00FF00);
2213                 const int r=  l&0x3FF;
2214                 const int g=  h>>8;
2215                 const int b=  l>>16;
2216
2217                 dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
2218                 dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
2219         }
2220 }
2221
2222 static inline void RENAME(rgb24ToY)(uint8_t *dst, uint8_t *src, int width)
2223 {
2224         int i;
2225         for(i=0; i<width; i++)
2226         {
2227                 int r= src[i*3+0];
2228                 int g= src[i*3+1];
2229                 int b= src[i*3+2];
2230
2231                 dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
2232         }
2233 }
2234
2235 static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
2236 {
2237         int i;
2238         for(i=0; i<width; i++)
2239         {
2240                 int r= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3];
2241                 int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4];
2242                 int b= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5];
2243
2244                 dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
2245                 dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
2246         }
2247 }
2248
2249
2250 // Bilinear / Bicubic scaling
2251 static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc,
2252                                   int16_t *filter, int16_t *filterPos, long filterSize)
2253 {
2254 #ifdef HAVE_MMX
2255         assert(filterSize % 4 == 0 && filterSize>0);
2256         if(filterSize==4) // allways true for upscaling, sometimes for down too
2257         {
2258                 long counter= -2*dstW;
2259                 filter-= counter*2;
2260                 filterPos-= counter/2;
2261                 dst-= counter/2;
2262                 asm volatile(
2263 #if defined(PIC)
2264                         "push %%"REG_b"                 \n\t"
2265 #endif
2266                         "pxor %%mm7, %%mm7              \n\t"
2267                         "movq "MANGLE(w02)", %%mm6      \n\t"
2268                         "push %%"REG_BP"                \n\t" // we use 7 regs here ...
2269                         "mov %%"REG_a", %%"REG_BP"      \n\t"
2270                         ASMALIGN(4)
2271                         "1:                             \n\t"
2272                         "movzwl (%2, %%"REG_BP"), %%eax \n\t"
2273                         "movzwl 2(%2, %%"REG_BP"), %%ebx\n\t"
2274                         "movq (%1, %%"REG_BP", 4), %%mm1\n\t"
2275                         "movq 8(%1, %%"REG_BP", 4), %%mm3\n\t"
2276                         "movd (%3, %%"REG_a"), %%mm0    \n\t"
2277                         "movd (%3, %%"REG_b"), %%mm2    \n\t"
2278                         "punpcklbw %%mm7, %%mm0         \n\t"
2279                         "punpcklbw %%mm7, %%mm2         \n\t"
2280                         "pmaddwd %%mm1, %%mm0           \n\t"
2281                         "pmaddwd %%mm2, %%mm3           \n\t"
2282                         "psrad $8, %%mm0                \n\t"
2283                         "psrad $8, %%mm3                \n\t"
2284                         "packssdw %%mm3, %%mm0          \n\t"
2285                         "pmaddwd %%mm6, %%mm0           \n\t"
2286                         "packssdw %%mm0, %%mm0          \n\t"
2287                         "movd %%mm0, (%4, %%"REG_BP")   \n\t"
2288                         "add $4, %%"REG_BP"             \n\t"
2289                         " jnc 1b                        \n\t"
2290
2291                         "pop %%"REG_BP"                 \n\t"
2292 #if defined(PIC)
2293                         "pop %%"REG_b"                  \n\t"
2294 #endif
2295                         : "+a" (counter)
2296                         : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
2297 #if !defined(PIC)
2298                         : "%"REG_b
2299 #endif
2300                 );
2301         }
2302         else if(filterSize==8)
2303         {
2304                 long counter= -2*dstW;
2305                 filter-= counter*4;
2306                 filterPos-= counter/2;
2307                 dst-= counter/2;
2308                 asm volatile(
2309 #if defined(PIC)
2310                         "push %%"REG_b"                 \n\t"
2311 #endif
2312                         "pxor %%mm7, %%mm7              \n\t"
2313                         "movq "MANGLE(w02)", %%mm6      \n\t"
2314                         "push %%"REG_BP"                \n\t" // we use 7 regs here ...
2315                         "mov %%"REG_a", %%"REG_BP"      \n\t"
2316                         ASMALIGN(4)
2317                         "1:                             \n\t"
2318                         "movzwl (%2, %%"REG_BP"), %%eax \n\t"
2319                         "movzwl 2(%2, %%"REG_BP"), %%ebx\n\t"
2320                         "movq (%1, %%"REG_BP", 8), %%mm1\n\t"
2321                         "movq 16(%1, %%"REG_BP", 8), %%mm3\n\t"
2322                         "movd (%3, %%"REG_a"), %%mm0    \n\t"
2323                         "movd (%3, %%"REG_b"), %%mm2    \n\t"
2324                         "punpcklbw %%mm7, %%mm0         \n\t"
2325                         "punpcklbw %%mm7, %%mm2         \n\t"
2326                         "pmaddwd %%mm1, %%mm0           \n\t"
2327                         "pmaddwd %%mm2, %%mm3           \n\t"
2328
2329                         "movq 8(%1, %%"REG_BP", 8), %%mm1\n\t"
2330                         "movq 24(%1, %%"REG_BP", 8), %%mm5\n\t"
2331                         "movd 4(%3, %%"REG_a"), %%mm4   \n\t"
2332                         "movd 4(%3, %%"REG_b"), %%mm2   \n\t"
2333                         "punpcklbw %%mm7, %%mm4         \n\t"
2334                         "punpcklbw %%mm7, %%mm2         \n\t"
2335                         "pmaddwd %%mm1, %%mm4           \n\t"
2336                         "pmaddwd %%mm2, %%mm5           \n\t"
2337                         "paddd %%mm4, %%mm0             \n\t"
2338                         "paddd %%mm5, %%mm3             \n\t"
2339                                                 
2340                         "psrad $8, %%mm0                \n\t"
2341                         "psrad $8, %%mm3                \n\t"
2342                         "packssdw %%mm3, %%mm0          \n\t"
2343                         "pmaddwd %%mm6, %%mm0           \n\t"
2344                         "packssdw %%mm0, %%mm0          \n\t"
2345                         "movd %%mm0, (%4, %%"REG_BP")   \n\t"
2346                         "add $4, %%"REG_BP"             \n\t"
2347                         " jnc 1b                        \n\t"
2348
2349                         "pop %%"REG_BP"                 \n\t"
2350 #if defined(PIC)
2351                         "pop %%"REG_b"                  \n\t"
2352 #endif
2353                         : "+a" (counter)
2354                         : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
2355 #if !defined(PIC)
2356                         : "%"REG_b
2357 #endif
2358                 );
2359         }
2360         else
2361         {
2362                 uint8_t *offset = src+filterSize;
2363                 long counter= -2*dstW;
2364 //              filter-= counter*filterSize/2;
2365                 filterPos-= counter/2;
2366                 dst-= counter/2;
2367                 asm volatile(
2368                         "pxor %%mm7, %%mm7              \n\t"
2369                         "movq "MANGLE(w02)", %%mm6      \n\t"
2370                         ASMALIGN(4)
2371                         "1:                             \n\t"
2372                         "mov %2, %%"REG_c"              \n\t"
2373                         "movzwl (%%"REG_c", %0), %%eax  \n\t"
2374                         "movzwl 2(%%"REG_c", %0), %%edx \n\t"
2375                         "mov %5, %%"REG_c"              \n\t"
2376                         "pxor %%mm4, %%mm4              \n\t"
2377                         "pxor %%mm5, %%mm5              \n\t"
2378                         "2:                             \n\t"
2379                         "movq (%1), %%mm1               \n\t"
2380                         "movq (%1, %6), %%mm3           \n\t"
2381                         "movd (%%"REG_c", %%"REG_a"), %%mm0\n\t"
2382                         "movd (%%"REG_c", %%"REG_d"), %%mm2\n\t"
2383                         "punpcklbw %%mm7, %%mm0         \n\t"
2384                         "punpcklbw %%mm7, %%mm2         \n\t"
2385                         "pmaddwd %%mm1, %%mm0           \n\t"
2386                         "pmaddwd %%mm2, %%mm3           \n\t"
2387                         "paddd %%mm3, %%mm5             \n\t"
2388                         "paddd %%mm0, %%mm4             \n\t"
2389                         "add $8, %1                     \n\t"
2390                         "add $4, %%"REG_c"              \n\t"
2391                         "cmp %4, %%"REG_c"              \n\t"
2392                         " jb 2b                         \n\t"
2393                         "add %6, %1                     \n\t"
2394                         "psrad $8, %%mm4                \n\t"
2395                         "psrad $8, %%mm5                \n\t"
2396                         "packssdw %%mm5, %%mm4          \n\t"
2397                         "pmaddwd %%mm6, %%mm4           \n\t"
2398                         "packssdw %%mm4, %%mm4          \n\t"
2399                         "mov %3, %%"REG_a"              \n\t"
2400                         "movd %%mm4, (%%"REG_a", %0)    \n\t"
2401                         "add $4, %0                     \n\t"
2402                         " jnc 1b                        \n\t"
2403
2404                         : "+r" (counter), "+r" (filter)
2405                         : "m" (filterPos), "m" (dst), "m"(offset),
2406                           "m" (src), "r" (filterSize*2)
2407                         : "%"REG_a, "%"REG_c, "%"REG_d
2408                 );
2409         }
2410 #else
2411 #ifdef HAVE_ALTIVEC
2412         hScale_altivec_real(dst, dstW, src, srcW, xInc, filter, filterPos, filterSize);
2413 #else
2414         int i;
2415         for(i=0; i<dstW; i++)
2416         {
2417                 int j;
2418                 int srcPos= filterPos[i];
2419                 int val=0;
2420 //              printf("filterPos: %d\n", filterPos[i]);
2421                 for(j=0; j<filterSize; j++)
2422                 {
2423 //                      printf("filter: %d, src: %d\n", filter[i], src[srcPos + j]);
2424                         val += ((int)src[srcPos + j])*filter[filterSize*i + j];
2425                 }
2426 //              filter += hFilterSize;
2427                 dst[i] = FFMIN(FFMAX(0, val>>7), (1<<15)-1); // the cubic equation does overflow ...
2428 //              dst[i] = val>>7;
2429         }
2430 #endif
2431 #endif
2432 }
2433       // *** horizontal scale Y line to temp buffer
2434 static inline void RENAME(hyscale)(uint16_t *dst, long dstWidth, uint8_t *src, int srcW, int xInc,
2435                                    int flags, int canMMX2BeUsed, int16_t *hLumFilter,
2436                                    int16_t *hLumFilterPos, int hLumFilterSize, void *funnyYCode, 
2437                                    int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
2438                                    int32_t *mmx2FilterPos)
2439 {
2440     if(srcFormat==PIX_FMT_YUYV422 || srcFormat==PIX_FMT_GRAY16BE)
2441     {
2442         RENAME(yuy2ToY)(formatConvBuffer, src, srcW);
2443         src= formatConvBuffer;
2444     }
2445     else if(srcFormat==PIX_FMT_UYVY422 || srcFormat==PIX_FMT_GRAY16LE)
2446     {
2447         RENAME(uyvyToY)(formatConvBuffer, src, srcW);
2448         src= formatConvBuffer;
2449     }
2450     else if(srcFormat==PIX_FMT_RGB32)
2451     {
2452         RENAME(bgr32ToY)(formatConvBuffer, src, srcW);
2453         src= formatConvBuffer;
2454     }
2455     else if(srcFormat==PIX_FMT_BGR24)
2456     {
2457         RENAME(bgr24ToY)(formatConvBuffer, src, srcW);
2458         src= formatConvBuffer;
2459     }
2460     else if(srcFormat==PIX_FMT_BGR565)
2461     {
2462         RENAME(bgr16ToY)(formatConvBuffer, src, srcW);
2463         src= formatConvBuffer;
2464     }
2465     else if(srcFormat==PIX_FMT_BGR555)
2466     {
2467         RENAME(bgr15ToY)(formatConvBuffer, src, srcW);
2468         src= formatConvBuffer;
2469     }
2470     else if(srcFormat==PIX_FMT_BGR32)
2471     {
2472         RENAME(rgb32ToY)(formatConvBuffer, src, srcW);
2473         src= formatConvBuffer;
2474     }
2475     else if(srcFormat==PIX_FMT_RGB24)
2476     {
2477         RENAME(rgb24ToY)(formatConvBuffer, src, srcW);
2478         src= formatConvBuffer;
2479     }
2480
2481 #ifdef HAVE_MMX
2482         // use the new MMX scaler if the mmx2 can't be used (its faster than the x86asm one)
2483     if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
2484 #else
2485     if(!(flags&SWS_FAST_BILINEAR))
2486 #endif
2487     {
2488         RENAME(hScale)(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize);
2489     }
2490     else // Fast Bilinear upscale / crap downscale
2491     {
2492 #if defined(ARCH_X86) || defined(ARCH_X86_64)
2493 #ifdef HAVE_MMX2
2494         int i;
2495 #if defined(PIC)
2496         uint64_t ebxsave __attribute__((aligned(8)));
2497 #endif
2498         if(canMMX2BeUsed)
2499         {
2500                 asm volatile(
2501 #if defined(PIC)
2502                         "mov %%"REG_b", %5    \n\t"
2503 #endif
2504                         "pxor %%mm7, %%mm7              \n\t"
2505                         "mov %0, %%"REG_c"              \n\t"
2506                         "mov %1, %%"REG_D"              \n\t"
2507                         "mov %2, %%"REG_d"              \n\t"
2508                         "mov %3, %%"REG_b"              \n\t"
2509                         "xor %%"REG_a", %%"REG_a"       \n\t" // i
2510                         PREFETCH" (%%"REG_c")           \n\t"
2511                         PREFETCH" 32(%%"REG_c")         \n\t"
2512                         PREFETCH" 64(%%"REG_c")         \n\t"
2513
2514 #ifdef ARCH_X86_64
2515
2516 #define FUNNY_Y_CODE \
2517                         "movl (%%"REG_b"), %%esi        \n\t"\
2518                         "call *%4                       \n\t"\
2519                         "movl (%%"REG_b", %%"REG_a"), %%esi\n\t"\
2520                         "add %%"REG_S", %%"REG_c"       \n\t"\
2521                         "add %%"REG_a", %%"REG_D"       \n\t"\
2522                         "xor %%"REG_a", %%"REG_a"       \n\t"\
2523
2524 #else
2525
2526 #define FUNNY_Y_CODE \
2527                         "movl (%%"REG_b"), %%esi        \n\t"\
2528                         "call *%4                       \n\t"\
2529                         "addl (%%"REG_b", %%"REG_a"), %%"REG_c"\n\t"\
2530                         "add %%"REG_a", %%"REG_D"       \n\t"\
2531                         "xor %%"REG_a", %%"REG_a"       \n\t"\
2532
2533 #endif
2534
2535 FUNNY_Y_CODE
2536 FUNNY_Y_CODE
2537 FUNNY_Y_CODE
2538 FUNNY_Y_CODE
2539 FUNNY_Y_CODE
2540 FUNNY_Y_CODE
2541 FUNNY_Y_CODE
2542 FUNNY_Y_CODE
2543
2544 #if defined(PIC)
2545                         "mov %5, %%"REG_b"    \n\t"
2546 #endif
2547                         :: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
2548                         "m" (funnyYCode)
2549 #if defined(PIC)
2550                         ,"m" (ebxsave)
2551 #endif
2552                         : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
2553 #if !defined(PIC)
2554                         ,"%"REG_b
2555 #endif
2556                 );
2557                 for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
2558         }
2559         else
2560         {
2561 #endif
2562         long xInc_shr16 = xInc >> 16;
2563         uint16_t xInc_mask = xInc & 0xffff;
2564         //NO MMX just normal asm ...
2565         asm volatile(
2566                 "xor %%"REG_a", %%"REG_a"       \n\t" // i
2567                 "xor %%"REG_d", %%"REG_d"       \n\t" // xx
2568                 "xorl %%ecx, %%ecx              \n\t" // 2*xalpha
2569                 ASMALIGN(4)
2570                 "1:                             \n\t"
2571                 "movzbl  (%0, %%"REG_d"), %%edi \n\t" //src[xx]
2572                 "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
2573                 "subl %%edi, %%esi              \n\t" //src[xx+1] - src[xx]
2574                 "imull %%ecx, %%esi             \n\t" //(src[xx+1] - src[xx])*2*xalpha
2575                 "shll $16, %%edi                \n\t"
2576                 "addl %%edi, %%esi              \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
2577                 "mov %1, %%"REG_D"              \n\t"
2578                 "shrl $9, %%esi                 \n\t"
2579                 "movw %%si, (%%"REG_D", %%"REG_a", 2)\n\t"
2580                 "addw %4, %%cx                  \n\t" //2*xalpha += xInc&0xFF
2581                 "adc %3, %%"REG_d"              \n\t" //xx+= xInc>>8 + carry
2582
2583                 "movzbl (%0, %%"REG_d"), %%edi  \n\t" //src[xx]
2584                 "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
2585                 "subl %%edi, %%esi              \n\t" //src[xx+1] - src[xx]
2586                 "imull %%ecx, %%esi             \n\t" //(src[xx+1] - src[xx])*2*xalpha
2587                 "shll $16, %%edi                \n\t"
2588                 "addl %%edi, %%esi              \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
2589                 "mov %1, %%"REG_D"              \n\t"
2590                 "shrl $9, %%esi                 \n\t"
2591                 "movw %%si, 2(%%"REG_D", %%"REG_a", 2)\n\t"
2592                 "addw %4, %%cx                  \n\t" //2*xalpha += xInc&0xFF
2593                 "adc %3, %%"REG_d"              \n\t" //xx+= xInc>>8 + carry
2594
2595
2596                 "add $2, %%"REG_a"              \n\t"
2597                 "cmp %2, %%"REG_a"              \n\t"
2598                 " jb 1b                         \n\t"
2599
2600
2601                 :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask)
2602                 : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
2603                 );
2604 #ifdef HAVE_MMX2
2605         } //if MMX2 can't be used
2606 #endif
2607 #else
2608         int i;
2609         unsigned int xpos=0;
2610         for(i=0;i<dstWidth;i++)
2611         {
2612                 register unsigned int xx=xpos>>16;
2613                 register unsigned int xalpha=(xpos&0xFFFF)>>9;
2614                 dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
2615                 xpos+=xInc;
2616         }
2617 #endif
2618     }
2619 }
2620
2621 inline static void RENAME(hcscale)(uint16_t *dst, long dstWidth, uint8_t *src1, uint8_t *src2,
2622                                    int srcW, int xInc, int flags, int canMMX2BeUsed, int16_t *hChrFilter,
2623                                    int16_t *hChrFilterPos, int hChrFilterSize, void *funnyUVCode,
2624                                    int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
2625                                    int32_t *mmx2FilterPos)
2626 {
2627     if(srcFormat==PIX_FMT_YUYV422)
2628     {
2629         RENAME(yuy2ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
2630         src1= formatConvBuffer;
2631         src2= formatConvBuffer+2048;
2632     }
2633     else if(srcFormat==PIX_FMT_UYVY422)
2634     {
2635         RENAME(uyvyToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
2636         src1= formatConvBuffer;
2637         src2= formatConvBuffer+2048;
2638     }
2639     else if(srcFormat==PIX_FMT_RGB32)
2640     {
2641         RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
2642         src1= formatConvBuffer;
2643         src2= formatConvBuffer+2048;
2644     }
2645     else if(srcFormat==PIX_FMT_BGR24)
2646     {
2647         RENAME(bgr24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
2648         src1= formatConvBuffer;
2649         src2= formatConvBuffer+2048;
2650     }
2651     else if(srcFormat==PIX_FMT_BGR565)
2652     {
2653         RENAME(bgr16ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
2654         src1= formatConvBuffer;
2655         src2= formatConvBuffer+2048;
2656     }
2657     else if(srcFormat==PIX_FMT_BGR555)
2658     {
2659         RENAME(bgr15ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
2660         src1= formatConvBuffer;
2661         src2= formatConvBuffer+2048;
2662     }
2663     else if(srcFormat==PIX_FMT_BGR32)
2664     {
2665         RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
2666         src1= formatConvBuffer;
2667         src2= formatConvBuffer+2048;
2668     }
2669     else if(srcFormat==PIX_FMT_RGB24)
2670     {
2671         RENAME(rgb24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
2672         src1= formatConvBuffer;
2673         src2= formatConvBuffer+2048;
2674     }
2675     else if(isGray(srcFormat))
2676     {
2677         return;
2678     }
2679
2680 #ifdef HAVE_MMX
2681         // use the new MMX scaler if the mmx2 can't be used (its faster than the x86asm one)
2682     if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
2683 #else
2684     if(!(flags&SWS_FAST_BILINEAR))
2685 #endif
2686     {
2687         RENAME(hScale)(dst     , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
2688         RENAME(hScale)(dst+2048, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
2689     }
2690     else // Fast Bilinear upscale / crap downscale
2691     {
2692 #if defined(ARCH_X86) || defined(ARCH_X86_64)
2693 #ifdef HAVE_MMX2
2694         int i;
2695 #if defined(PIC)
2696         uint64_t ebxsave __attribute__((aligned(8)));
2697 #endif
2698         if(canMMX2BeUsed)
2699         {
2700                 asm volatile(
2701 #if defined(PIC)
2702                         "mov %%"REG_b", %6    \n\t"
2703 #endif
2704                         "pxor %%mm7, %%mm7              \n\t"
2705                         "mov %0, %%"REG_c"              \n\t"
2706                         "mov %1, %%"REG_D"              \n\t"
2707                         "mov %2, %%"REG_d"              \n\t"
2708                         "mov %3, %%"REG_b"              \n\t"
2709                         "xor %%"REG_a", %%"REG_a"       \n\t" // i
2710                         PREFETCH" (%%"REG_c")           \n\t"
2711                         PREFETCH" 32(%%"REG_c")         \n\t"
2712                         PREFETCH" 64(%%"REG_c")         \n\t"
2713
2714 #ifdef ARCH_X86_64
2715
2716 #define FUNNY_UV_CODE \
2717                         "movl (%%"REG_b"), %%esi        \n\t"\
2718                         "call *%4                       \n\t"\
2719                         "movl (%%"REG_b", %%"REG_a"), %%esi\n\t"\
2720                         "add %%"REG_S", %%"REG_c"       \n\t"\
2721                         "add %%"REG_a", %%"REG_D"       \n\t"\
2722                         "xor %%"REG_a", %%"REG_a"       \n\t"\
2723
2724 #else
2725
2726 #define FUNNY_UV_CODE \
2727                         "movl (%%"REG_b"), %%esi        \n\t"\
2728                         "call *%4                       \n\t"\
2729                         "addl (%%"REG_b", %%"REG_a"), %%"REG_c"\n\t"\
2730                         "add %%"REG_a", %%"REG_D"       \n\t"\
2731                         "xor %%"REG_a", %%"REG_a"       \n\t"\
2732
2733 #endif
2734
2735 FUNNY_UV_CODE
2736 FUNNY_UV_CODE
2737 FUNNY_UV_CODE
2738 FUNNY_UV_CODE
2739                         "xor %%"REG_a", %%"REG_a"       \n\t" // i
2740                         "mov %5, %%"REG_c"              \n\t" // src
2741                         "mov %1, %%"REG_D"              \n\t" // buf1
2742                         "add $4096, %%"REG_D"           \n\t"
2743                         PREFETCH" (%%"REG_c")           \n\t"
2744                         PREFETCH" 32(%%"REG_c")         \n\t"
2745                         PREFETCH" 64(%%"REG_c")         \n\t"
2746
2747 FUNNY_UV_CODE
2748 FUNNY_UV_CODE
2749 FUNNY_UV_CODE
2750 FUNNY_UV_CODE
2751
2752 #if defined(PIC)
2753                         "mov %6, %%"REG_b"    \n\t"
2754 #endif
2755                         :: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
2756                         "m" (funnyUVCode), "m" (src2)
2757 #if defined(PIC)
2758                         ,"m" (ebxsave)
2759 #endif
2760                         : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
2761 #if !defined(PIC)
2762                          ,"%"REG_b
2763 #endif
2764                 );
2765                 for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
2766                 {
2767 //                      printf("%d %d %d\n", dstWidth, i, srcW);
2768                         dst[i] = src1[srcW-1]*128;
2769                         dst[i+2048] = src2[srcW-1]*128;
2770                 }
2771         }
2772         else
2773         {
2774 #endif
2775         long xInc_shr16 = (long) (xInc >> 16);
2776         uint16_t xInc_mask = xInc & 0xffff; 
2777         asm volatile(
2778                 "xor %%"REG_a", %%"REG_a"       \n\t" // i
2779                 "xor %%"REG_d", %%"REG_d"               \n\t" // xx
2780                 "xorl %%ecx, %%ecx              \n\t" // 2*xalpha
2781                 ASMALIGN(4)
2782                 "1:                             \n\t"
2783                 "mov %0, %%"REG_S"              \n\t"
2784                 "movzbl  (%%"REG_S", %%"REG_d"), %%edi  \n\t" //src[xx]
2785                 "movzbl 1(%%"REG_S", %%"REG_d"), %%esi  \n\t" //src[xx+1]
2786                 "subl %%edi, %%esi              \n\t" //src[xx+1] - src[xx]
2787                 "imull %%ecx, %%esi             \n\t" //(src[xx+1] - src[xx])*2*xalpha
2788                 "shll $16, %%edi                \n\t"
2789                 "addl %%edi, %%esi              \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
2790                 "mov %1, %%"REG_D"              \n\t"
2791                 "shrl $9, %%esi                 \n\t"
2792                 "movw %%si, (%%"REG_D", %%"REG_a", 2)\n\t"
2793
2794                 "movzbl  (%5, %%"REG_d"), %%edi \n\t" //src[xx]
2795                 "movzbl 1(%5, %%"REG_d"), %%esi \n\t" //src[xx+1]
2796                 "subl %%edi, %%esi              \n\t" //src[xx+1] - src[xx]
2797                 "imull %%ecx, %%esi             \n\t" //(src[xx+1] - src[xx])*2*xalpha
2798                 "shll $16, %%edi                \n\t"
2799                 "addl %%edi, %%esi              \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
2800                 "mov %1, %%"REG_D"              \n\t"
2801                 "shrl $9, %%esi                 \n\t"
2802                 "movw %%si, 4096(%%"REG_D", %%"REG_a", 2)\n\t"
2803
2804                 "addw %4, %%cx                  \n\t" //2*xalpha += xInc&0xFF
2805                 "adc %3, %%"REG_d"              \n\t" //xx+= xInc>>8 + carry
2806                 "add $1, %%"REG_a"              \n\t"
2807                 "cmp %2, %%"REG_a"              \n\t"
2808                 " jb 1b                         \n\t"
2809
2810 /* GCC-3.3 makes MPlayer crash on IA-32 machines when using "g" operand here,
2811    which is needed to support GCC-4.0 */
2812 #if defined(ARCH_X86_64) && ((__GNUC__ > 3) || ( __GNUC__ == 3 && __GNUC_MINOR__ >= 4))
2813                 :: "m" (src1), "m" (dst), "g" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
2814 #else
2815                 :: "m" (src1), "m" (dst), "m" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
2816 #endif
2817                 "r" (src2)
2818                 : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
2819                 );
2820 #ifdef HAVE_MMX2
2821         } //if MMX2 can't be used
2822 #endif
2823 #else
2824         int i;
2825         unsigned int xpos=0;
2826         for(i=0;i<dstWidth;i++)
2827         {
2828                 register unsigned int xx=xpos>>16;
2829                 register unsigned int xalpha=(xpos&0xFFFF)>>9;
2830                 dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
2831                 dst[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
2832 /* slower
2833           dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
2834           dst[i+2048]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
2835 */
2836                 xpos+=xInc;
2837         }
2838 #endif
2839    }
2840 }
2841
2842 static int RENAME(swScale)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
2843              int srcSliceH, uint8_t* dst[], int dstStride[]){
2844
2845         /* load a few things into local vars to make the code more readable? and faster */
2846         const int srcW= c->srcW;
2847         const int dstW= c->dstW;
2848         const int dstH= c->dstH;
2849         const int chrDstW= c->chrDstW;
2850         const int chrSrcW= c->chrSrcW;
2851         const int lumXInc= c->lumXInc;
2852         const int chrXInc= c->chrXInc;
2853         const int dstFormat= c->dstFormat;
2854         const int srcFormat= c->srcFormat;
2855         const int flags= c->flags;
2856         const int canMMX2BeUsed= c->canMMX2BeUsed;
2857         int16_t *vLumFilterPos= c->vLumFilterPos;
2858         int16_t *vChrFilterPos= c->vChrFilterPos;
2859         int16_t *hLumFilterPos= c->hLumFilterPos;
2860         int16_t *hChrFilterPos= c->hChrFilterPos;
2861         int16_t *vLumFilter= c->vLumFilter;
2862         int16_t *vChrFilter= c->vChrFilter;
2863         int16_t *hLumFilter= c->hLumFilter;
2864         int16_t *hChrFilter= c->hChrFilter;
2865         int32_t *lumMmxFilter= c->lumMmxFilter;
2866         int32_t *chrMmxFilter= c->chrMmxFilter;
2867         const int vLumFilterSize= c->vLumFilterSize;
2868         const int vChrFilterSize= c->vChrFilterSize;
2869         const int hLumFilterSize= c->hLumFilterSize;
2870         const int hChrFilterSize= c->hChrFilterSize;
2871         int16_t **lumPixBuf= c->lumPixBuf;
2872         int16_t **chrPixBuf= c->chrPixBuf;
2873         const int vLumBufSize= c->vLumBufSize;
2874         const int vChrBufSize= c->vChrBufSize;
2875         uint8_t *funnyYCode= c->funnyYCode;
2876         uint8_t *funnyUVCode= c->funnyUVCode;
2877         uint8_t *formatConvBuffer= c->formatConvBuffer;
2878         const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample;
2879         const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample);
2880         int lastDstY;
2881
2882         /* vars whch will change and which we need to storw back in the context */
2883         int dstY= c->dstY;
2884         int lumBufIndex= c->lumBufIndex;
2885         int chrBufIndex= c->chrBufIndex;
2886         int lastInLumBuf= c->lastInLumBuf;
2887         int lastInChrBuf= c->lastInChrBuf;
2888         
2889         if(isPacked(c->srcFormat)){
2890                 src[0]=
2891                 src[1]=
2892                 src[2]= src[0];
2893                 srcStride[0]=
2894                 srcStride[1]=
2895                 srcStride[2]= srcStride[0];
2896         }
2897         srcStride[1]<<= c->vChrDrop;
2898         srcStride[2]<<= c->vChrDrop;
2899
2900 //      printf("swscale %X %X %X -> %X %X %X\n", (int)src[0], (int)src[1], (int)src[2],
2901 //              (int)dst[0], (int)dst[1], (int)dst[2]);
2902
2903 #if 0 //self test FIXME move to a vfilter or something
2904 {
2905 static volatile int i=0;
2906 i++;
2907 if(srcFormat==PIX_FMT_YUV420P && i==1 && srcSliceH>= c->srcH)
2908         selfTest(src, srcStride, c->srcW, c->srcH);
2909 i--;
2910 }
2911 #endif
2912
2913 //printf("sws Strides:%d %d %d -> %d %d %d\n", srcStride[0],srcStride[1],srcStride[2],
2914 //dstStride[0],dstStride[1],dstStride[2]);
2915
2916         if(dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0)
2917         {
2918                 static int firstTime=1; //FIXME move this into the context perhaps
2919                 if(flags & SWS_PRINT_INFO && firstTime)
2920                 {
2921                         MSG_WARN("SwScaler: Warning: dstStride is not aligned!\n"
2922                                         "SwScaler:          ->cannot do aligned memory acesses anymore\n");
2923                         firstTime=0;
2924                 }
2925         }
2926
2927         /* Note the user might start scaling the picture in the middle so this will not get executed
2928            this is not really intended but works currently, so ppl might do it */
2929         if(srcSliceY ==0){
2930                 lumBufIndex=0;
2931                 chrBufIndex=0;
2932                 dstY=0; 
2933                 lastInLumBuf= -1;
2934                 lastInChrBuf= -1;
2935         }
2936
2937         lastDstY= dstY;
2938
2939         for(;dstY < dstH; dstY++){
2940                 unsigned char *dest =dst[0]+dstStride[0]*dstY;
2941                 const int chrDstY= dstY>>c->chrDstVSubSample;
2942                 unsigned char *uDest=dst[1]+dstStride[1]*chrDstY;
2943                 unsigned char *vDest=dst[2]+dstStride[2]*chrDstY;
2944
2945                 const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
2946                 const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
2947                 const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input
2948                 const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input
2949
2950 //printf("dstY:%d dstH:%d firstLumSrcY:%d lastInLumBuf:%d vLumBufSize: %d vChrBufSize: %d slice: %d %d vLumFilterSize: %d firstChrSrcY: %d vChrFilterSize: %d c->chrSrcVSubSample: %d\n",
2951 // dstY, dstH, firstLumSrcY, lastInLumBuf, vLumBufSize, vChrBufSize, srcSliceY, srcSliceH, vLumFilterSize, firstChrSrcY, vChrFilterSize,  c->chrSrcVSubSample);
2952                 //handle holes (FAST_BILINEAR & weird filters)
2953                 if(firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1;
2954                 if(firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1;
2955 //printf("%d %d %d\n", firstChrSrcY, lastInChrBuf, vChrBufSize);
2956                 ASSERT(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1)
2957                 ASSERT(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1)
2958
2959                 // Do we have enough lines in this slice to output the dstY line
2960                 if(lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample))
2961                 {
2962                         //Do horizontal scaling
2963                         while(lastInLumBuf < lastLumSrcY)
2964                         {
2965                                 uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
2966                                 lumBufIndex++;
2967 //                              printf("%d %d %d %d\n", lumBufIndex, vLumBufSize, lastInLumBuf,  lastLumSrcY);
2968                                 ASSERT(lumBufIndex < 2*vLumBufSize)
2969                                 ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
2970                                 ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
2971 //                              printf("%d %d\n", lumBufIndex, vLumBufSize);
2972                                 RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
2973                                                 flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
2974                                                 funnyYCode, c->srcFormat, formatConvBuffer, 
2975                                                 c->lumMmx2Filter, c->lumMmx2FilterPos);
2976                                 lastInLumBuf++;
2977                         }
2978                         while(lastInChrBuf < lastChrSrcY)
2979                         {
2980                                 uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
2981                                 uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
2982                                 chrBufIndex++;
2983                                 ASSERT(chrBufIndex < 2*vChrBufSize)
2984                                 ASSERT(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH))
2985                                 ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
2986                                 //FIXME replace parameters through context struct (some at least)
2987
2988                                 if(!(isGray(srcFormat) || isGray(dstFormat)))
2989                                         RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
2990                                                 flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
2991                                                 funnyUVCode, c->srcFormat, formatConvBuffer, 
2992                                                 c->chrMmx2Filter, c->chrMmx2FilterPos);
2993                                 lastInChrBuf++;
2994                         }
2995                         //wrap buf index around to stay inside the ring buffer
2996                         if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
2997                         if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
2998                 }
2999                 else // not enough lines left in this slice -> load the rest in the buffer
3000                 {
3001 /*              printf("%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\n",
3002                         firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY,
3003                         lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize,
3004                         vChrBufSize, vLumBufSize);*/
3005
3006                         //Do horizontal scaling
3007                         while(lastInLumBuf+1 < srcSliceY + srcSliceH)
3008                         {
3009                                 uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
3010                                 lumBufIndex++;
3011                                 ASSERT(lumBufIndex < 2*vLumBufSize)
3012                                 ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
3013                                 ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
3014                                 RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
3015                                                 flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
3016                                                 funnyYCode, c->srcFormat, formatConvBuffer, 
3017                                                 c->lumMmx2Filter, c->lumMmx2FilterPos);
3018                                 lastInLumBuf++;
3019                         }
3020                         while(lastInChrBuf+1 < (chrSrcSliceY + chrSrcSliceH))
3021                         {
3022                                 uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
3023                                 uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
3024                                 chrBufIndex++;
3025                                 ASSERT(chrBufIndex < 2*vChrBufSize)
3026                                 ASSERT(lastInChrBuf + 1 - chrSrcSliceY < chrSrcSliceH)
3027                                 ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
3028
3029                                 if(!(isGray(srcFormat) || isGray(dstFormat)))
3030                                         RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
3031                                                 flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
3032                                                 funnyUVCode, c->srcFormat, formatConvBuffer, 
3033                                                 c->chrMmx2Filter, c->chrMmx2FilterPos);
3034                                 lastInChrBuf++;
3035                         }
3036                         //wrap buf index around to stay inside the ring buffer
3037                         if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
3038                         if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
3039                         break; //we can't output a dstY line so let's try with the next slice
3040                 }
3041
3042 #ifdef HAVE_MMX
3043                 b5Dither= dither8[dstY&1];
3044                 g6Dither= dither4[dstY&1];
3045                 g5Dither= dither8[dstY&1];
3046                 r5Dither= dither8[(dstY+1)&1];
3047 #endif
3048             if(dstY < dstH-2)
3049             {
3050                 int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
3051                 int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
3052 #ifdef HAVE_MMX
3053                 int i;
3054             if(flags & SWS_ACCURATE_RND){
3055                         for(i=0; i<vLumFilterSize; i+=2){
3056                                 lumMmxFilter[2*i+0]= lumSrcPtr[i  ];
3057                                 lumMmxFilter[2*i+1]= lumSrcPtr[i+(vLumFilterSize>1)];
3058                                 lumMmxFilter[2*i+2]=
3059                                 lumMmxFilter[2*i+3]= vLumFilter[dstY*vLumFilterSize + i    ]
3060                                                 + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0);
3061                         }
3062                         for(i=0; i<vChrFilterSize; i+=2){
3063                                 chrMmxFilter[2*i+0]= chrSrcPtr[i  ];
3064                                 chrMmxFilter[2*i+1]= chrSrcPtr[i+(vChrFilterSize>1)];
3065                                 chrMmxFilter[2*i+2]=
3066                                 chrMmxFilter[2*i+3]= vChrFilter[chrDstY*vChrFilterSize + i    ]
3067                                                 + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0);
3068                         }
3069             }else{
3070                 for(i=0; i<vLumFilterSize; i++)
3071                 {
3072                         lumMmxFilter[4*i+0]= (int32_t)lumSrcPtr[i];
3073                         lumMmxFilter[4*i+2]= 
3074                         lumMmxFilter[4*i+3]= 
3075                                 ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001;
3076                 }
3077                 for(i=0; i<vChrFilterSize; i++)
3078                 {
3079                         chrMmxFilter[4*i+0]= (int32_t)chrSrcPtr[i];
3080                         chrMmxFilter[4*i+2]= 
3081                         chrMmxFilter[4*i+3]= 
3082                                 ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001;
3083                 }
3084             }
3085 #endif
3086                 if(dstFormat == PIX_FMT_NV12 || dstFormat&