26cd2742a363dbee515fe6d9837f2862dac83d8c
[ffmpeg.git] / libswscale / x86 / swscale_template.c
1 /*
2  * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of Libav.
5  *
6  * Libav is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * Libav is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with Libav; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 #undef REAL_MOVNTQ
22 #undef MOVNTQ
23 #undef PREFETCH
24
25 #if COMPILE_TEMPLATE_MMX2
26 #define PREFETCH "prefetchnta"
27 #else
28 #define PREFETCH  " # nop"
29 #endif
30
31 #if COMPILE_TEMPLATE_MMX2
32 #define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
33 #else
34 #define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
35 #endif
36 #define MOVNTQ(a,b)  REAL_MOVNTQ(a,b)
37
38 #define YSCALEYUV2YV12X(offset, dest, end, pos) \
39     __asm__ volatile(\
40         "movq             "VROUNDER_OFFSET"(%0), %%mm3      \n\t"\
41         "movq                             %%mm3, %%mm4      \n\t"\
42         "lea                     " offset "(%0), %%"REG_d"  \n\t"\
43         "mov                        (%%"REG_d"), %%"REG_S"  \n\t"\
44         ".p2align                             4             \n\t" /* FIXME Unroll? */\
45         "1:                                                 \n\t"\
46         "movq                      8(%%"REG_d"), %%mm0      \n\t" /* filterCoeff */\
47         "movq                (%%"REG_S", %3, 2), %%mm2      \n\t" /* srcData */\
48         "movq               8(%%"REG_S", %3, 2), %%mm5      \n\t" /* srcData */\
49         "add                                $16, %%"REG_d"  \n\t"\
50         "mov                        (%%"REG_d"), %%"REG_S"  \n\t"\
51         "test                         %%"REG_S", %%"REG_S"  \n\t"\
52         "pmulhw                           %%mm0, %%mm2      \n\t"\
53         "pmulhw                           %%mm0, %%mm5      \n\t"\
54         "paddw                            %%mm2, %%mm3      \n\t"\
55         "paddw                            %%mm5, %%mm4      \n\t"\
56         " jnz                                1b             \n\t"\
57         "psraw                               $3, %%mm3      \n\t"\
58         "psraw                               $3, %%mm4      \n\t"\
59         "packuswb                         %%mm4, %%mm3      \n\t"\
60         MOVNTQ(%%mm3, (%1, %3))\
61         "add                                 $8, %3         \n\t"\
62         "cmp                                 %2, %3         \n\t"\
63         "movq             "VROUNDER_OFFSET"(%0), %%mm3      \n\t"\
64         "movq                             %%mm3, %%mm4      \n\t"\
65         "lea                     " offset "(%0), %%"REG_d"  \n\t"\
66         "mov                        (%%"REG_d"), %%"REG_S"  \n\t"\
67         "jb                                  1b             \n\t"\
68         :: "r" (&c->redDither),\
69            "r" (dest), "g" ((x86_reg)(end)), "r"((x86_reg)(pos))\
70         : "%"REG_d, "%"REG_S\
71     );
72
73 static void RENAME(yuv2yuvX)(SwsContext *c, const int16_t *lumFilter,
74                              const int16_t **lumSrc, int lumFilterSize,
75                              const int16_t *chrFilter, const int16_t **chrUSrc,
76                              const int16_t **chrVSrc,
77                              int chrFilterSize, const int16_t **alpSrc,
78                              uint8_t *dest[4], int dstW, int chrDstW)
79 {
80     uint8_t *yDest = dest[0], *uDest = dest[1], *vDest = dest[2],
81             *aDest = CONFIG_SWSCALE_ALPHA ? dest[3] : NULL;
82
83     if (uDest) {
84         x86_reg uv_off = c->uv_offx2 >> 1;
85         YSCALEYUV2YV12X(CHR_MMX_FILTER_OFFSET, uDest, chrDstW, 0)
86         YSCALEYUV2YV12X(CHR_MMX_FILTER_OFFSET, vDest - uv_off, chrDstW + uv_off, uv_off)
87     }
88     if (CONFIG_SWSCALE_ALPHA && aDest) {
89         YSCALEYUV2YV12X(ALP_MMX_FILTER_OFFSET, aDest, dstW, 0)
90     }
91
92     YSCALEYUV2YV12X(LUM_MMX_FILTER_OFFSET, yDest, dstW, 0)
93 }
94
95 #define YSCALEYUV2YV12X_ACCURATE(offset, dest, end, pos) \
96     __asm__ volatile(\
97         "lea                     " offset "(%0), %%"REG_d"  \n\t"\
98         "pxor                             %%mm4, %%mm4      \n\t"\
99         "pxor                             %%mm5, %%mm5      \n\t"\
100         "pxor                             %%mm6, %%mm6      \n\t"\
101         "pxor                             %%mm7, %%mm7      \n\t"\
102         "mov                        (%%"REG_d"), %%"REG_S"  \n\t"\
103         ".p2align                             4             \n\t"\
104         "1:                                                 \n\t"\
105         "movq                (%%"REG_S", %3, 2), %%mm0      \n\t" /* srcData */\
106         "movq               8(%%"REG_S", %3, 2), %%mm2      \n\t" /* srcData */\
107         "mov        "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S"  \n\t"\
108         "movq                (%%"REG_S", %3, 2), %%mm1      \n\t" /* srcData */\
109         "movq                             %%mm0, %%mm3      \n\t"\
110         "punpcklwd                        %%mm1, %%mm0      \n\t"\
111         "punpckhwd                        %%mm1, %%mm3      \n\t"\
112         "movq       "STR(APCK_COEF)"(%%"REG_d"), %%mm1      \n\t" /* filterCoeff */\
113         "pmaddwd                          %%mm1, %%mm0      \n\t"\
114         "pmaddwd                          %%mm1, %%mm3      \n\t"\
115         "paddd                            %%mm0, %%mm4      \n\t"\
116         "paddd                            %%mm3, %%mm5      \n\t"\
117         "movq               8(%%"REG_S", %3, 2), %%mm3      \n\t" /* srcData */\
118         "mov        "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S"  \n\t"\
119         "add                  $"STR(APCK_SIZE)", %%"REG_d"  \n\t"\
120         "test                         %%"REG_S", %%"REG_S"  \n\t"\
121         "movq                             %%mm2, %%mm0      \n\t"\
122         "punpcklwd                        %%mm3, %%mm2      \n\t"\
123         "punpckhwd                        %%mm3, %%mm0      \n\t"\
124         "pmaddwd                          %%mm1, %%mm2      \n\t"\
125         "pmaddwd                          %%mm1, %%mm0      \n\t"\
126         "paddd                            %%mm2, %%mm6      \n\t"\
127         "paddd                            %%mm0, %%mm7      \n\t"\
128         " jnz                                1b             \n\t"\
129         "psrad                              $16, %%mm4      \n\t"\
130         "psrad                              $16, %%mm5      \n\t"\
131         "psrad                              $16, %%mm6      \n\t"\
132         "psrad                              $16, %%mm7      \n\t"\
133         "movq             "VROUNDER_OFFSET"(%0), %%mm0      \n\t"\
134         "packssdw                         %%mm5, %%mm4      \n\t"\
135         "packssdw                         %%mm7, %%mm6      \n\t"\
136         "paddw                            %%mm0, %%mm4      \n\t"\
137         "paddw                            %%mm0, %%mm6      \n\t"\
138         "psraw                               $3, %%mm4      \n\t"\
139         "psraw                               $3, %%mm6      \n\t"\
140         "packuswb                         %%mm6, %%mm4      \n\t"\
141         MOVNTQ(%%mm4, (%1, %3))\
142         "add                                 $8, %3         \n\t"\
143         "cmp                                 %2, %3         \n\t"\
144         "lea                     " offset "(%0), %%"REG_d"  \n\t"\
145         "pxor                             %%mm4, %%mm4      \n\t"\
146         "pxor                             %%mm5, %%mm5      \n\t"\
147         "pxor                             %%mm6, %%mm6      \n\t"\
148         "pxor                             %%mm7, %%mm7      \n\t"\
149         "mov                        (%%"REG_d"), %%"REG_S"  \n\t"\
150         "jb                                  1b             \n\t"\
151         :: "r" (&c->redDither),\
152         "r" (dest), "g" ((x86_reg)(end)), "r"((x86_reg)(pos))\
153         : "%"REG_a, "%"REG_d, "%"REG_S\
154     );
155
156 static void RENAME(yuv2yuvX_ar)(SwsContext *c, const int16_t *lumFilter,
157                                 const int16_t **lumSrc, int lumFilterSize,
158                                 const int16_t *chrFilter, const int16_t **chrUSrc,
159                                 const int16_t **chrVSrc,
160                                 int chrFilterSize, const int16_t **alpSrc,
161                                 uint8_t *dest[4], int dstW, int chrDstW)
162 {
163     uint8_t *yDest = dest[0], *uDest = dest[1], *vDest = dest[2],
164             *aDest = CONFIG_SWSCALE_ALPHA ? dest[3] : NULL;
165
166     if (uDest) {
167         x86_reg uv_off = c->uv_offx2 >> 1;
168         YSCALEYUV2YV12X_ACCURATE(CHR_MMX_FILTER_OFFSET, uDest, chrDstW, 0)
169         YSCALEYUV2YV12X_ACCURATE(CHR_MMX_FILTER_OFFSET, vDest - uv_off, chrDstW + uv_off, uv_off)
170     }
171     if (CONFIG_SWSCALE_ALPHA && aDest) {
172         YSCALEYUV2YV12X_ACCURATE(ALP_MMX_FILTER_OFFSET, aDest, dstW, 0)
173     }
174
175     YSCALEYUV2YV12X_ACCURATE(LUM_MMX_FILTER_OFFSET, yDest, dstW, 0)
176 }
177
178 static void RENAME(yuv2yuv1)(SwsContext *c, const int16_t *lumSrc,
179                              const int16_t *chrUSrc, const int16_t *chrVSrc,
180                              const int16_t *alpSrc,
181                              uint8_t *dst[4], int dstW, int chrDstW)
182 {
183     int p= 4;
184     const int16_t *src[4]= {
185         lumSrc + dstW,     chrUSrc + chrDstW,
186         chrVSrc + chrDstW, alpSrc + dstW
187     };
188     x86_reg counter[4]= { dstW, chrDstW, chrDstW, dstW };
189
190     while (p--) {
191         if (dst[p]) {
192             __asm__ volatile(
193                 "mov %2, %%"REG_a"                    \n\t"
194                 ".p2align               4             \n\t" /* FIXME Unroll? */
195                 "1:                                   \n\t"
196                 "movq  (%0, %%"REG_a", 2), %%mm0      \n\t"
197                 "movq 8(%0, %%"REG_a", 2), %%mm1      \n\t"
198                 "psraw                 $7, %%mm0      \n\t"
199                 "psraw                 $7, %%mm1      \n\t"
200                 "packuswb           %%mm1, %%mm0      \n\t"
201                 MOVNTQ(%%mm0, (%1, %%REGa))
202                 "add                   $8, %%"REG_a"  \n\t"
203                 "jnc                   1b             \n\t"
204                 :: "r" (src[p]), "r" (dst[p] + counter[p]),
205                    "g" (-counter[p])
206                 : "%"REG_a
207             );
208         }
209     }
210 }
211
212 static void RENAME(yuv2yuv1_ar)(SwsContext *c, const int16_t *lumSrc,
213                                 const int16_t *chrUSrc, const int16_t *chrVSrc,
214                                 const int16_t *alpSrc,
215                                 uint8_t *dst[4], int dstW, int chrDstW)
216 {
217     int p= 4;
218     const int16_t *src[4]= {
219         lumSrc + dstW,     chrUSrc + chrDstW,
220         chrVSrc + chrDstW, alpSrc + dstW
221     };
222     x86_reg counter[4]= { dstW, chrDstW, chrDstW, dstW };
223
224     while (p--) {
225         if (dst[p]) {
226             __asm__ volatile(
227                 "mov %2, %%"REG_a"                    \n\t"
228                 "pcmpeqw %%mm7, %%mm7                 \n\t"
229                 "psrlw                 $15, %%mm7     \n\t"
230                 "psllw                  $6, %%mm7     \n\t"
231                 ".p2align                4            \n\t" /* FIXME Unroll? */
232                 "1:                                   \n\t"
233                 "movq  (%0, %%"REG_a", 2), %%mm0      \n\t"
234                 "movq 8(%0, %%"REG_a", 2), %%mm1      \n\t"
235                 "paddsw             %%mm7, %%mm0      \n\t"
236                 "paddsw             %%mm7, %%mm1      \n\t"
237                 "psraw                 $7, %%mm0      \n\t"
238                 "psraw                 $7, %%mm1      \n\t"
239                 "packuswb           %%mm1, %%mm0      \n\t"
240                 MOVNTQ(%%mm0, (%1, %%REGa))
241                 "add                   $8, %%"REG_a"  \n\t"
242                 "jnc                   1b             \n\t"
243                 :: "r" (src[p]), "r" (dst[p] + counter[p]),
244                    "g" (-counter[p])
245                 : "%"REG_a
246             );
247         }
248     }
249 }
250
251 #define YSCALEYUV2PACKEDX_UV \
252     __asm__ volatile(\
253         "xor                   %%"REG_a", %%"REG_a"     \n\t"\
254         ".p2align                      4                \n\t"\
255         "nop                                            \n\t"\
256         "1:                                             \n\t"\
257         "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d"     \n\t"\
258         "mov                 (%%"REG_d"), %%"REG_S"     \n\t"\
259         "movq      "VROUNDER_OFFSET"(%0), %%mm3         \n\t"\
260         "movq                      %%mm3, %%mm4         \n\t"\
261         ".p2align                      4                \n\t"\
262         "2:                                             \n\t"\
263         "movq               8(%%"REG_d"), %%mm0         \n\t" /* filterCoeff */\
264         "movq     (%%"REG_S", %%"REG_a"), %%mm2         \n\t" /* UsrcData */\
265         "add                          %6, %%"REG_S"     \n\t" \
266         "movq     (%%"REG_S", %%"REG_a"), %%mm5         \n\t" /* VsrcData */\
267         "add                         $16, %%"REG_d"     \n\t"\
268         "mov                 (%%"REG_d"), %%"REG_S"     \n\t"\
269         "pmulhw                    %%mm0, %%mm2         \n\t"\
270         "pmulhw                    %%mm0, %%mm5         \n\t"\
271         "paddw                     %%mm2, %%mm3         \n\t"\
272         "paddw                     %%mm5, %%mm4         \n\t"\
273         "test                  %%"REG_S", %%"REG_S"     \n\t"\
274         " jnz                         2b                \n\t"\
275
276 #define YSCALEYUV2PACKEDX_YA(offset,coeff,src1,src2,dst1,dst2) \
277     "lea                "offset"(%0), %%"REG_d"     \n\t"\
278     "mov                 (%%"REG_d"), %%"REG_S"     \n\t"\
279     "movq      "VROUNDER_OFFSET"(%0), "#dst1"       \n\t"\
280     "movq                    "#dst1", "#dst2"       \n\t"\
281     ".p2align                      4                \n\t"\
282     "2:                                             \n\t"\
283     "movq               8(%%"REG_d"), "#coeff"      \n\t" /* filterCoeff */\
284     "movq  (%%"REG_S", %%"REG_a", 2), "#src1"       \n\t" /* Y1srcData */\
285     "movq 8(%%"REG_S", %%"REG_a", 2), "#src2"       \n\t" /* Y2srcData */\
286     "add                         $16, %%"REG_d"            \n\t"\
287     "mov                 (%%"REG_d"), %%"REG_S"     \n\t"\
288     "pmulhw                 "#coeff", "#src1"       \n\t"\
289     "pmulhw                 "#coeff", "#src2"       \n\t"\
290     "paddw                   "#src1", "#dst1"       \n\t"\
291     "paddw                   "#src2", "#dst2"       \n\t"\
292     "test                  %%"REG_S", %%"REG_S"     \n\t"\
293     " jnz                         2b                \n\t"\
294
295 #define YSCALEYUV2PACKEDX \
296     YSCALEYUV2PACKEDX_UV \
297     YSCALEYUV2PACKEDX_YA(LUM_MMX_FILTER_OFFSET,%%mm0,%%mm2,%%mm5,%%mm1,%%mm7) \
298
299 #define YSCALEYUV2PACKEDX_END                     \
300         :: "r" (&c->redDither),                   \
301             "m" (dummy), "m" (dummy), "m" (dummy),\
302             "r" (dest), "m" (dstW_reg), "m"(uv_off) \
303         : "%"REG_a, "%"REG_d, "%"REG_S            \
304     );
305
306 #define YSCALEYUV2PACKEDX_ACCURATE_UV \
307     __asm__ volatile(\
308         "xor %%"REG_a", %%"REG_a"                       \n\t"\
309         ".p2align                      4                \n\t"\
310         "nop                                            \n\t"\
311         "1:                                             \n\t"\
312         "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d"     \n\t"\
313         "mov                 (%%"REG_d"), %%"REG_S"     \n\t"\
314         "pxor                      %%mm4, %%mm4         \n\t"\
315         "pxor                      %%mm5, %%mm5         \n\t"\
316         "pxor                      %%mm6, %%mm6         \n\t"\
317         "pxor                      %%mm7, %%mm7         \n\t"\
318         ".p2align                      4                \n\t"\
319         "2:                                             \n\t"\
320         "movq     (%%"REG_S", %%"REG_a"), %%mm0         \n\t" /* UsrcData */\
321         "add                          %6, %%"REG_S"      \n\t" \
322         "movq     (%%"REG_S", %%"REG_a"), %%mm2         \n\t" /* VsrcData */\
323         "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S"     \n\t"\
324         "movq     (%%"REG_S", %%"REG_a"), %%mm1         \n\t" /* UsrcData */\
325         "movq                      %%mm0, %%mm3         \n\t"\
326         "punpcklwd                 %%mm1, %%mm0         \n\t"\
327         "punpckhwd                 %%mm1, %%mm3         \n\t"\
328         "movq "STR(APCK_COEF)"(%%"REG_d"),%%mm1         \n\t" /* filterCoeff */\
329         "pmaddwd                   %%mm1, %%mm0         \n\t"\
330         "pmaddwd                   %%mm1, %%mm3         \n\t"\
331         "paddd                     %%mm0, %%mm4         \n\t"\
332         "paddd                     %%mm3, %%mm5         \n\t"\
333         "add                          %6, %%"REG_S"      \n\t" \
334         "movq     (%%"REG_S", %%"REG_a"), %%mm3         \n\t" /* VsrcData */\
335         "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S"     \n\t"\
336         "add           $"STR(APCK_SIZE)", %%"REG_d"     \n\t"\
337         "test                  %%"REG_S", %%"REG_S"     \n\t"\
338         "movq                      %%mm2, %%mm0         \n\t"\
339         "punpcklwd                 %%mm3, %%mm2         \n\t"\
340         "punpckhwd                 %%mm3, %%mm0         \n\t"\
341         "pmaddwd                   %%mm1, %%mm2         \n\t"\
342         "pmaddwd                   %%mm1, %%mm0         \n\t"\
343         "paddd                     %%mm2, %%mm6         \n\t"\
344         "paddd                     %%mm0, %%mm7         \n\t"\
345         " jnz                         2b                \n\t"\
346         "psrad                       $16, %%mm4         \n\t"\
347         "psrad                       $16, %%mm5         \n\t"\
348         "psrad                       $16, %%mm6         \n\t"\
349         "psrad                       $16, %%mm7         \n\t"\
350         "movq      "VROUNDER_OFFSET"(%0), %%mm0         \n\t"\
351         "packssdw                  %%mm5, %%mm4         \n\t"\
352         "packssdw                  %%mm7, %%mm6         \n\t"\
353         "paddw                     %%mm0, %%mm4         \n\t"\
354         "paddw                     %%mm0, %%mm6         \n\t"\
355         "movq                      %%mm4, "U_TEMP"(%0)  \n\t"\
356         "movq                      %%mm6, "V_TEMP"(%0)  \n\t"\
357
358 #define YSCALEYUV2PACKEDX_ACCURATE_YA(offset) \
359     "lea                "offset"(%0), %%"REG_d"     \n\t"\
360     "mov                 (%%"REG_d"), %%"REG_S"     \n\t"\
361     "pxor                      %%mm1, %%mm1         \n\t"\
362     "pxor                      %%mm5, %%mm5         \n\t"\
363     "pxor                      %%mm7, %%mm7         \n\t"\
364     "pxor                      %%mm6, %%mm6         \n\t"\
365     ".p2align                      4                \n\t"\
366     "2:                                             \n\t"\
367     "movq  (%%"REG_S", %%"REG_a", 2), %%mm0         \n\t" /* Y1srcData */\
368     "movq 8(%%"REG_S", %%"REG_a", 2), %%mm2         \n\t" /* Y2srcData */\
369     "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S"     \n\t"\
370     "movq  (%%"REG_S", %%"REG_a", 2), %%mm4         \n\t" /* Y1srcData */\
371     "movq                      %%mm0, %%mm3         \n\t"\
372     "punpcklwd                 %%mm4, %%mm0         \n\t"\
373     "punpckhwd                 %%mm4, %%mm3         \n\t"\
374     "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm4         \n\t" /* filterCoeff */\
375     "pmaddwd                   %%mm4, %%mm0         \n\t"\
376     "pmaddwd                   %%mm4, %%mm3         \n\t"\
377     "paddd                     %%mm0, %%mm1         \n\t"\
378     "paddd                     %%mm3, %%mm5         \n\t"\
379     "movq 8(%%"REG_S", %%"REG_a", 2), %%mm3         \n\t" /* Y2srcData */\
380     "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S"     \n\t"\
381     "add           $"STR(APCK_SIZE)", %%"REG_d"     \n\t"\
382     "test                  %%"REG_S", %%"REG_S"     \n\t"\
383     "movq                      %%mm2, %%mm0         \n\t"\
384     "punpcklwd                 %%mm3, %%mm2         \n\t"\
385     "punpckhwd                 %%mm3, %%mm0         \n\t"\
386     "pmaddwd                   %%mm4, %%mm2         \n\t"\
387     "pmaddwd                   %%mm4, %%mm0         \n\t"\
388     "paddd                     %%mm2, %%mm7         \n\t"\
389     "paddd                     %%mm0, %%mm6         \n\t"\
390     " jnz                         2b                \n\t"\
391     "psrad                       $16, %%mm1         \n\t"\
392     "psrad                       $16, %%mm5         \n\t"\
393     "psrad                       $16, %%mm7         \n\t"\
394     "psrad                       $16, %%mm6         \n\t"\
395     "movq      "VROUNDER_OFFSET"(%0), %%mm0         \n\t"\
396     "packssdw                  %%mm5, %%mm1         \n\t"\
397     "packssdw                  %%mm6, %%mm7         \n\t"\
398     "paddw                     %%mm0, %%mm1         \n\t"\
399     "paddw                     %%mm0, %%mm7         \n\t"\
400     "movq               "U_TEMP"(%0), %%mm3         \n\t"\
401     "movq               "V_TEMP"(%0), %%mm4         \n\t"\
402
403 #define YSCALEYUV2PACKEDX_ACCURATE \
404     YSCALEYUV2PACKEDX_ACCURATE_UV \
405     YSCALEYUV2PACKEDX_ACCURATE_YA(LUM_MMX_FILTER_OFFSET)
406
407 #define YSCALEYUV2RGBX \
408     "psubw  "U_OFFSET"(%0), %%mm3       \n\t" /* (U-128)8*/\
409     "psubw  "V_OFFSET"(%0), %%mm4       \n\t" /* (V-128)8*/\
410     "movq            %%mm3, %%mm2       \n\t" /* (U-128)8*/\
411     "movq            %%mm4, %%mm5       \n\t" /* (V-128)8*/\
412     "pmulhw "UG_COEFF"(%0), %%mm3       \n\t"\
413     "pmulhw "VG_COEFF"(%0), %%mm4       \n\t"\
414     /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
415     "pmulhw "UB_COEFF"(%0), %%mm2       \n\t"\
416     "pmulhw "VR_COEFF"(%0), %%mm5       \n\t"\
417     "psubw  "Y_OFFSET"(%0), %%mm1       \n\t" /* 8(Y-16)*/\
418     "psubw  "Y_OFFSET"(%0), %%mm7       \n\t" /* 8(Y-16)*/\
419     "pmulhw  "Y_COEFF"(%0), %%mm1       \n\t"\
420     "pmulhw  "Y_COEFF"(%0), %%mm7       \n\t"\
421     /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
422     "paddw           %%mm3, %%mm4       \n\t"\
423     "movq            %%mm2, %%mm0       \n\t"\
424     "movq            %%mm5, %%mm6       \n\t"\
425     "movq            %%mm4, %%mm3       \n\t"\
426     "punpcklwd       %%mm2, %%mm2       \n\t"\
427     "punpcklwd       %%mm5, %%mm5       \n\t"\
428     "punpcklwd       %%mm4, %%mm4       \n\t"\
429     "paddw           %%mm1, %%mm2       \n\t"\
430     "paddw           %%mm1, %%mm5       \n\t"\
431     "paddw           %%mm1, %%mm4       \n\t"\
432     "punpckhwd       %%mm0, %%mm0       \n\t"\
433     "punpckhwd       %%mm6, %%mm6       \n\t"\
434     "punpckhwd       %%mm3, %%mm3       \n\t"\
435     "paddw           %%mm7, %%mm0       \n\t"\
436     "paddw           %%mm7, %%mm6       \n\t"\
437     "paddw           %%mm7, %%mm3       \n\t"\
438     /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
439     "packuswb        %%mm0, %%mm2       \n\t"\
440     "packuswb        %%mm6, %%mm5       \n\t"\
441     "packuswb        %%mm3, %%mm4       \n\t"\
442
443 #define REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) \
444     "movq       "#b", "#q2"     \n\t" /* B */\
445     "movq       "#r", "#t"      \n\t" /* R */\
446     "punpcklbw  "#g", "#b"      \n\t" /* GBGBGBGB 0 */\
447     "punpcklbw  "#a", "#r"      \n\t" /* ARARARAR 0 */\
448     "punpckhbw  "#g", "#q2"     \n\t" /* GBGBGBGB 2 */\
449     "punpckhbw  "#a", "#t"      \n\t" /* ARARARAR 2 */\
450     "movq       "#b", "#q0"     \n\t" /* GBGBGBGB 0 */\
451     "movq      "#q2", "#q3"     \n\t" /* GBGBGBGB 2 */\
452     "punpcklwd  "#r", "#q0"     \n\t" /* ARGBARGB 0 */\
453     "punpckhwd  "#r", "#b"      \n\t" /* ARGBARGB 1 */\
454     "punpcklwd  "#t", "#q2"     \n\t" /* ARGBARGB 2 */\
455     "punpckhwd  "#t", "#q3"     \n\t" /* ARGBARGB 3 */\
456 \
457     MOVNTQ(   q0,   (dst, index, 4))\
458     MOVNTQ(    b,  8(dst, index, 4))\
459     MOVNTQ(   q2, 16(dst, index, 4))\
460     MOVNTQ(   q3, 24(dst, index, 4))\
461 \
462     "add      $8, "#index"      \n\t"\
463     "cmp "#dstw", "#index"      \n\t"\
464     " jb      1b                \n\t"
465 #define WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t)  REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t)
466
467 static void RENAME(yuv2rgb32_X_ar)(SwsContext *c, const int16_t *lumFilter,
468                                    const int16_t **lumSrc, int lumFilterSize,
469                                    const int16_t *chrFilter, const int16_t **chrUSrc,
470                                    const int16_t **chrVSrc,
471                                    int chrFilterSize, const int16_t **alpSrc,
472                                    uint8_t *dest, int dstW, int dstY)
473 {
474     x86_reg dummy=0;
475     x86_reg dstW_reg = dstW;
476     x86_reg uv_off = c->uv_offx2;
477
478     if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
479         YSCALEYUV2PACKEDX_ACCURATE
480         YSCALEYUV2RGBX
481         "movq                      %%mm2, "U_TEMP"(%0)  \n\t"
482         "movq                      %%mm4, "V_TEMP"(%0)  \n\t"
483         "movq                      %%mm5, "Y_TEMP"(%0)  \n\t"
484         YSCALEYUV2PACKEDX_ACCURATE_YA(ALP_MMX_FILTER_OFFSET)
485         "movq               "Y_TEMP"(%0), %%mm5         \n\t"
486         "psraw                        $3, %%mm1         \n\t"
487         "psraw                        $3, %%mm7         \n\t"
488         "packuswb                  %%mm7, %%mm1         \n\t"
489         WRITEBGR32(%4, %5, %%REGa, %%mm3, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm2, %%mm6)
490         YSCALEYUV2PACKEDX_END
491     } else {
492         YSCALEYUV2PACKEDX_ACCURATE
493         YSCALEYUV2RGBX
494         "pcmpeqd %%mm7, %%mm7 \n\t"
495         WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
496         YSCALEYUV2PACKEDX_END
497     }
498 }
499
500 static void RENAME(yuv2rgb32_X)(SwsContext *c, const int16_t *lumFilter,
501                                 const int16_t **lumSrc, int lumFilterSize,
502                                 const int16_t *chrFilter, const int16_t **chrUSrc,
503                                 const int16_t **chrVSrc,
504                                 int chrFilterSize, const int16_t **alpSrc,
505                                 uint8_t *dest, int dstW, int dstY)
506 {
507     x86_reg dummy=0;
508     x86_reg dstW_reg = dstW;
509     x86_reg uv_off = c->uv_offx2;
510
511     if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
512         YSCALEYUV2PACKEDX
513         YSCALEYUV2RGBX
514         YSCALEYUV2PACKEDX_YA(ALP_MMX_FILTER_OFFSET, %%mm0, %%mm3, %%mm6, %%mm1, %%mm7)
515         "psraw                        $3, %%mm1         \n\t"
516         "psraw                        $3, %%mm7         \n\t"
517         "packuswb                  %%mm7, %%mm1         \n\t"
518         WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
519         YSCALEYUV2PACKEDX_END
520     } else {
521         YSCALEYUV2PACKEDX
522         YSCALEYUV2RGBX
523         "pcmpeqd %%mm7, %%mm7 \n\t"
524         WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
525         YSCALEYUV2PACKEDX_END
526     }
527 }
528
529 #define REAL_WRITERGB16(dst, dstw, index) \
530     "pand "MANGLE(bF8)", %%mm2  \n\t" /* B */\
531     "pand "MANGLE(bFC)", %%mm4  \n\t" /* G */\
532     "pand "MANGLE(bF8)", %%mm5  \n\t" /* R */\
533     "psrlq           $3, %%mm2  \n\t"\
534 \
535     "movq         %%mm2, %%mm1  \n\t"\
536     "movq         %%mm4, %%mm3  \n\t"\
537 \
538     "punpcklbw    %%mm7, %%mm3  \n\t"\
539     "punpcklbw    %%mm5, %%mm2  \n\t"\
540     "punpckhbw    %%mm7, %%mm4  \n\t"\
541     "punpckhbw    %%mm5, %%mm1  \n\t"\
542 \
543     "psllq           $3, %%mm3  \n\t"\
544     "psllq           $3, %%mm4  \n\t"\
545 \
546     "por          %%mm3, %%mm2  \n\t"\
547     "por          %%mm4, %%mm1  \n\t"\
548 \
549     MOVNTQ(%%mm2,  (dst, index, 2))\
550     MOVNTQ(%%mm1, 8(dst, index, 2))\
551 \
552     "add             $8, "#index"   \n\t"\
553     "cmp        "#dstw", "#index"   \n\t"\
554     " jb             1b             \n\t"
555 #define WRITERGB16(dst, dstw, index)  REAL_WRITERGB16(dst, dstw, index)
556
557 static void RENAME(yuv2rgb565_X_ar)(SwsContext *c, const int16_t *lumFilter,
558                                     const int16_t **lumSrc, int lumFilterSize,
559                                     const int16_t *chrFilter, const int16_t **chrUSrc,
560                                     const int16_t **chrVSrc,
561                                     int chrFilterSize, const int16_t **alpSrc,
562                                     uint8_t *dest, int dstW, int dstY)
563 {
564     x86_reg dummy=0;
565     x86_reg dstW_reg = dstW;
566     x86_reg uv_off = c->uv_offx2;
567
568     YSCALEYUV2PACKEDX_ACCURATE
569     YSCALEYUV2RGBX
570     "pxor %%mm7, %%mm7 \n\t"
571     /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
572 #ifdef DITHER1XBPP
573     "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
574     "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
575     "paddusb "RED_DITHER"(%0), %%mm5\n\t"
576 #endif
577     WRITERGB16(%4, %5, %%REGa)
578     YSCALEYUV2PACKEDX_END
579 }
580
581 static void RENAME(yuv2rgb565_X)(SwsContext *c, const int16_t *lumFilter,
582                                  const int16_t **lumSrc, int lumFilterSize,
583                                  const int16_t *chrFilter, const int16_t **chrUSrc,
584                                  const int16_t **chrVSrc,
585                                  int chrFilterSize, const int16_t **alpSrc,
586                                  uint8_t *dest, int dstW, int dstY)
587 {
588     x86_reg dummy=0;
589     x86_reg dstW_reg = dstW;
590     x86_reg uv_off = c->uv_offx2;
591
592     YSCALEYUV2PACKEDX
593     YSCALEYUV2RGBX
594     "pxor %%mm7, %%mm7 \n\t"
595     /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
596 #ifdef DITHER1XBPP
597     "paddusb "BLUE_DITHER"(%0), %%mm2  \n\t"
598     "paddusb "GREEN_DITHER"(%0), %%mm4  \n\t"
599     "paddusb "RED_DITHER"(%0), %%mm5  \n\t"
600 #endif
601     WRITERGB16(%4, %5, %%REGa)
602     YSCALEYUV2PACKEDX_END
603 }
604
605 #define REAL_WRITERGB15(dst, dstw, index) \
606     "pand "MANGLE(bF8)", %%mm2  \n\t" /* B */\
607     "pand "MANGLE(bF8)", %%mm4  \n\t" /* G */\
608     "pand "MANGLE(bF8)", %%mm5  \n\t" /* R */\
609     "psrlq           $3, %%mm2  \n\t"\
610     "psrlq           $1, %%mm5  \n\t"\
611 \
612     "movq         %%mm2, %%mm1  \n\t"\
613     "movq         %%mm4, %%mm3  \n\t"\
614 \
615     "punpcklbw    %%mm7, %%mm3  \n\t"\
616     "punpcklbw    %%mm5, %%mm2  \n\t"\
617     "punpckhbw    %%mm7, %%mm4  \n\t"\
618     "punpckhbw    %%mm5, %%mm1  \n\t"\
619 \
620     "psllq           $2, %%mm3  \n\t"\
621     "psllq           $2, %%mm4  \n\t"\
622 \
623     "por          %%mm3, %%mm2  \n\t"\
624     "por          %%mm4, %%mm1  \n\t"\
625 \
626     MOVNTQ(%%mm2,  (dst, index, 2))\
627     MOVNTQ(%%mm1, 8(dst, index, 2))\
628 \
629     "add             $8, "#index"   \n\t"\
630     "cmp        "#dstw", "#index"   \n\t"\
631     " jb             1b             \n\t"
632 #define WRITERGB15(dst, dstw, index)  REAL_WRITERGB15(dst, dstw, index)
633
634 static void RENAME(yuv2rgb555_X_ar)(SwsContext *c, const int16_t *lumFilter,
635                                     const int16_t **lumSrc, int lumFilterSize,
636                                     const int16_t *chrFilter, const int16_t **chrUSrc,
637                                     const int16_t **chrVSrc,
638                                     int chrFilterSize, const int16_t **alpSrc,
639                                     uint8_t *dest, int dstW, int dstY)
640 {
641     x86_reg dummy=0;
642     x86_reg dstW_reg = dstW;
643     x86_reg uv_off = c->uv_offx2;
644
645     YSCALEYUV2PACKEDX_ACCURATE
646     YSCALEYUV2RGBX
647     "pxor %%mm7, %%mm7 \n\t"
648     /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
649 #ifdef DITHER1XBPP
650     "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
651     "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
652     "paddusb "RED_DITHER"(%0), %%mm5\n\t"
653 #endif
654     WRITERGB15(%4, %5, %%REGa)
655     YSCALEYUV2PACKEDX_END
656 }
657
658 static void RENAME(yuv2rgb555_X)(SwsContext *c, const int16_t *lumFilter,
659                                  const int16_t **lumSrc, int lumFilterSize,
660                                  const int16_t *chrFilter, const int16_t **chrUSrc,
661                                  const int16_t **chrVSrc,
662                                  int chrFilterSize, const int16_t **alpSrc,
663                                  uint8_t *dest, int dstW, int dstY)
664 {
665     x86_reg dummy=0;
666     x86_reg dstW_reg = dstW;
667     x86_reg uv_off = c->uv_offx2;
668
669     YSCALEYUV2PACKEDX
670     YSCALEYUV2RGBX
671     "pxor %%mm7, %%mm7 \n\t"
672     /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
673 #ifdef DITHER1XBPP
674     "paddusb "BLUE_DITHER"(%0), %%mm2  \n\t"
675     "paddusb "GREEN_DITHER"(%0), %%mm4  \n\t"
676     "paddusb "RED_DITHER"(%0), %%mm5  \n\t"
677 #endif
678     WRITERGB15(%4, %5, %%REGa)
679     YSCALEYUV2PACKEDX_END
680 }
681
682 #define WRITEBGR24MMX(dst, dstw, index) \
683     /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
684     "movq      %%mm2, %%mm1     \n\t" /* B */\
685     "movq      %%mm5, %%mm6     \n\t" /* R */\
686     "punpcklbw %%mm4, %%mm2     \n\t" /* GBGBGBGB 0 */\
687     "punpcklbw %%mm7, %%mm5     \n\t" /* 0R0R0R0R 0 */\
688     "punpckhbw %%mm4, %%mm1     \n\t" /* GBGBGBGB 2 */\
689     "punpckhbw %%mm7, %%mm6     \n\t" /* 0R0R0R0R 2 */\
690     "movq      %%mm2, %%mm0     \n\t" /* GBGBGBGB 0 */\
691     "movq      %%mm1, %%mm3     \n\t" /* GBGBGBGB 2 */\
692     "punpcklwd %%mm5, %%mm0     \n\t" /* 0RGB0RGB 0 */\
693     "punpckhwd %%mm5, %%mm2     \n\t" /* 0RGB0RGB 1 */\
694     "punpcklwd %%mm6, %%mm1     \n\t" /* 0RGB0RGB 2 */\
695     "punpckhwd %%mm6, %%mm3     \n\t" /* 0RGB0RGB 3 */\
696 \
697     "movq      %%mm0, %%mm4     \n\t" /* 0RGB0RGB 0 */\
698     "movq      %%mm2, %%mm6     \n\t" /* 0RGB0RGB 1 */\
699     "movq      %%mm1, %%mm5     \n\t" /* 0RGB0RGB 2 */\
700     "movq      %%mm3, %%mm7     \n\t" /* 0RGB0RGB 3 */\
701 \
702     "psllq       $40, %%mm0     \n\t" /* RGB00000 0 */\
703     "psllq       $40, %%mm2     \n\t" /* RGB00000 1 */\
704     "psllq       $40, %%mm1     \n\t" /* RGB00000 2 */\
705     "psllq       $40, %%mm3     \n\t" /* RGB00000 3 */\
706 \
707     "punpckhdq %%mm4, %%mm0     \n\t" /* 0RGBRGB0 0 */\
708     "punpckhdq %%mm6, %%mm2     \n\t" /* 0RGBRGB0 1 */\
709     "punpckhdq %%mm5, %%mm1     \n\t" /* 0RGBRGB0 2 */\
710     "punpckhdq %%mm7, %%mm3     \n\t" /* 0RGBRGB0 3 */\
711 \
712     "psrlq        $8, %%mm0     \n\t" /* 00RGBRGB 0 */\
713     "movq      %%mm2, %%mm6     \n\t" /* 0RGBRGB0 1 */\
714     "psllq       $40, %%mm2     \n\t" /* GB000000 1 */\
715     "por       %%mm2, %%mm0     \n\t" /* GBRGBRGB 0 */\
716     MOVNTQ(%%mm0, (dst))\
717 \
718     "psrlq       $24, %%mm6     \n\t" /* 0000RGBR 1 */\
719     "movq      %%mm1, %%mm5     \n\t" /* 0RGBRGB0 2 */\
720     "psllq       $24, %%mm1     \n\t" /* BRGB0000 2 */\
721     "por       %%mm1, %%mm6     \n\t" /* BRGBRGBR 1 */\
722     MOVNTQ(%%mm6, 8(dst))\
723 \
724     "psrlq       $40, %%mm5     \n\t" /* 000000RG 2 */\
725     "psllq        $8, %%mm3     \n\t" /* RGBRGB00 3 */\
726     "por       %%mm3, %%mm5     \n\t" /* RGBRGBRG 2 */\
727     MOVNTQ(%%mm5, 16(dst))\
728 \
729     "add         $24, "#dst"    \n\t"\
730 \
731     "add          $8, "#index"  \n\t"\
732     "cmp     "#dstw", "#index"  \n\t"\
733     " jb          1b            \n\t"
734
735 #define WRITEBGR24MMX2(dst, dstw, index) \
736     /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
737     "movq "MANGLE(ff_M24A)", %%mm0 \n\t"\
738     "movq "MANGLE(ff_M24C)", %%mm7 \n\t"\
739     "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2  B1 B0 B1 B0 */\
740     "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2  G1 G0 G1 G0 */\
741     "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0  R1 R0 R1 R0 */\
742 \
743     "pand   %%mm0, %%mm1        \n\t" /*    B2        B1       B0 */\
744     "pand   %%mm0, %%mm3        \n\t" /*    G2        G1       G0 */\
745     "pand   %%mm7, %%mm6        \n\t" /*       R1        R0       */\
746 \
747     "psllq     $8, %%mm3        \n\t" /* G2        G1       G0    */\
748     "por    %%mm1, %%mm6        \n\t"\
749     "por    %%mm3, %%mm6        \n\t"\
750     MOVNTQ(%%mm6, (dst))\
751 \
752     "psrlq     $8, %%mm4        \n\t" /* 00 G7 G6 G5  G4 G3 G2 G1 */\
753     "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4  B3 B2 B3 B2 */\
754     "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3  G4 G3 G4 G3 */\
755     "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4  R3 R2 R3 R2 */\
756 \
757     "pand "MANGLE(ff_M24B)", %%mm1 \n\t" /* B5       B4        B3    */\
758     "pand   %%mm7, %%mm3        \n\t" /*       G4        G3       */\
759     "pand   %%mm0, %%mm6        \n\t" /*    R4        R3       R2 */\
760 \
761     "por    %%mm1, %%mm3        \n\t" /* B5    G4 B4     G3 B3    */\
762     "por    %%mm3, %%mm6        \n\t"\
763     MOVNTQ(%%mm6, 8(dst))\
764 \
765     "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6  B7 B6 B6 B7 */\
766     "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7  G6 G5 G6 G5 */\
767     "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6  R5 R4 R5 R4 */\
768 \
769     "pand   %%mm7, %%mm1        \n\t" /*       B7        B6       */\
770     "pand   %%mm0, %%mm3        \n\t" /*    G7        G6       G5 */\
771     "pand "MANGLE(ff_M24B)", %%mm6 \n\t" /* R7       R6        R5    */\
772 \
773     "por    %%mm1, %%mm3        \n\t"\
774     "por    %%mm3, %%mm6        \n\t"\
775     MOVNTQ(%%mm6, 16(dst))\
776 \
777     "add      $24, "#dst"       \n\t"\
778 \
779     "add       $8, "#index"     \n\t"\
780     "cmp  "#dstw", "#index"     \n\t"\
781     " jb       1b               \n\t"
782
783 #if COMPILE_TEMPLATE_MMX2
784 #undef WRITEBGR24
785 #define WRITEBGR24(dst, dstw, index)  WRITEBGR24MMX2(dst, dstw, index)
786 #else
787 #undef WRITEBGR24
788 #define WRITEBGR24(dst, dstw, index)  WRITEBGR24MMX(dst, dstw, index)
789 #endif
790
791 static void RENAME(yuv2bgr24_X_ar)(SwsContext *c, const int16_t *lumFilter,
792                                    const int16_t **lumSrc, int lumFilterSize,
793                                    const int16_t *chrFilter, const int16_t **chrUSrc,
794                                    const int16_t **chrVSrc,
795                                    int chrFilterSize, const int16_t **alpSrc,
796                                    uint8_t *dest, int dstW, int dstY)
797 {
798     x86_reg dummy=0;
799     x86_reg dstW_reg = dstW;
800     x86_reg uv_off = c->uv_offx2;
801
802     YSCALEYUV2PACKEDX_ACCURATE
803     YSCALEYUV2RGBX
804     "pxor %%mm7, %%mm7 \n\t"
805     "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"\n\t" //FIXME optimize
806     "add %4, %%"REG_c"                        \n\t"
807     WRITEBGR24(%%REGc, %5, %%REGa)
808     :: "r" (&c->redDither),
809        "m" (dummy), "m" (dummy), "m" (dummy),
810        "r" (dest), "m" (dstW_reg), "m"(uv_off)
811     : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
812     );
813 }
814
815 static void RENAME(yuv2bgr24_X)(SwsContext *c, const int16_t *lumFilter,
816                                 const int16_t **lumSrc, int lumFilterSize,
817                                 const int16_t *chrFilter, const int16_t **chrUSrc,
818                                 const int16_t **chrVSrc,
819                                 int chrFilterSize, const int16_t **alpSrc,
820                                 uint8_t *dest, int dstW, int dstY)
821 {
822     x86_reg dummy=0;
823     x86_reg dstW_reg = dstW;
824     x86_reg uv_off = c->uv_offx2;
825
826     YSCALEYUV2PACKEDX
827     YSCALEYUV2RGBX
828     "pxor                    %%mm7, %%mm7       \n\t"
829     "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"   \n\t" //FIXME optimize
830     "add                        %4, %%"REG_c"   \n\t"
831     WRITEBGR24(%%REGc, %5, %%REGa)
832     :: "r" (&c->redDither),
833        "m" (dummy), "m" (dummy), "m" (dummy),
834        "r" (dest),  "m" (dstW_reg), "m"(uv_off)
835     : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
836     );
837 }
838
839 #define REAL_WRITEYUY2(dst, dstw, index) \
840     "packuswb  %%mm3, %%mm3     \n\t"\
841     "packuswb  %%mm4, %%mm4     \n\t"\
842     "packuswb  %%mm7, %%mm1     \n\t"\
843     "punpcklbw %%mm4, %%mm3     \n\t"\
844     "movq      %%mm1, %%mm7     \n\t"\
845     "punpcklbw %%mm3, %%mm1     \n\t"\
846     "punpckhbw %%mm3, %%mm7     \n\t"\
847 \
848     MOVNTQ(%%mm1, (dst, index, 2))\
849     MOVNTQ(%%mm7, 8(dst, index, 2))\
850 \
851     "add          $8, "#index"  \n\t"\
852     "cmp     "#dstw", "#index"  \n\t"\
853     " jb          1b            \n\t"
854 #define WRITEYUY2(dst, dstw, index)  REAL_WRITEYUY2(dst, dstw, index)
855
856 static void RENAME(yuv2yuyv422_X_ar)(SwsContext *c, const int16_t *lumFilter,
857                                      const int16_t **lumSrc, int lumFilterSize,
858                                      const int16_t *chrFilter, const int16_t **chrUSrc,
859                                      const int16_t **chrVSrc,
860                                      int chrFilterSize, const int16_t **alpSrc,
861                                      uint8_t *dest, int dstW, int dstY)
862 {
863     x86_reg dummy=0;
864     x86_reg dstW_reg = dstW;
865     x86_reg uv_off = c->uv_offx2;
866
867     YSCALEYUV2PACKEDX_ACCURATE
868     /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
869     "psraw $3, %%mm3    \n\t"
870     "psraw $3, %%mm4    \n\t"
871     "psraw $3, %%mm1    \n\t"
872     "psraw $3, %%mm7    \n\t"
873     WRITEYUY2(%4, %5, %%REGa)
874     YSCALEYUV2PACKEDX_END
875 }
876
877 static void RENAME(yuv2yuyv422_X)(SwsContext *c, const int16_t *lumFilter,
878                                   const int16_t **lumSrc, int lumFilterSize,
879                                   const int16_t *chrFilter, const int16_t **chrUSrc,
880                                   const int16_t **chrVSrc,
881                                   int chrFilterSize, const int16_t **alpSrc,
882                                   uint8_t *dest, int dstW, int dstY)
883 {
884     x86_reg dummy=0;
885     x86_reg dstW_reg = dstW;
886     x86_reg uv_off = c->uv_offx2;
887
888     YSCALEYUV2PACKEDX
889     /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
890     "psraw $3, %%mm3    \n\t"
891     "psraw $3, %%mm4    \n\t"
892     "psraw $3, %%mm1    \n\t"
893     "psraw $3, %%mm7    \n\t"
894     WRITEYUY2(%4, %5, %%REGa)
895     YSCALEYUV2PACKEDX_END
896 }
897
898 #define REAL_YSCALEYUV2RGB_UV(index, c) \
899     "xor            "#index", "#index"  \n\t"\
900     ".p2align              4            \n\t"\
901     "1:                                 \n\t"\
902     "movq     (%2, "#index"), %%mm2     \n\t" /* uvbuf0[eax]*/\
903     "movq     (%3, "#index"), %%mm3     \n\t" /* uvbuf1[eax]*/\
904     "add           "UV_OFFx2"("#c"), "#index"  \n\t" \
905     "movq     (%2, "#index"), %%mm5     \n\t" /* uvbuf0[eax+2048]*/\
906     "movq     (%3, "#index"), %%mm4     \n\t" /* uvbuf1[eax+2048]*/\
907     "sub           "UV_OFFx2"("#c"), "#index"  \n\t" \
908     "psubw             %%mm3, %%mm2     \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
909     "psubw             %%mm4, %%mm5     \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
910     "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0    \n\t"\
911     "pmulhw            %%mm0, %%mm2     \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
912     "pmulhw            %%mm0, %%mm5     \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
913     "psraw                $4, %%mm3     \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
914     "psraw                $4, %%mm4     \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
915     "paddw             %%mm2, %%mm3     \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
916     "paddw             %%mm5, %%mm4     \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
917     "psubw  "U_OFFSET"("#c"), %%mm3     \n\t" /* (U-128)8*/\
918     "psubw  "V_OFFSET"("#c"), %%mm4     \n\t" /* (V-128)8*/\
919     "movq              %%mm3, %%mm2     \n\t" /* (U-128)8*/\
920     "movq              %%mm4, %%mm5     \n\t" /* (V-128)8*/\
921     "pmulhw "UG_COEFF"("#c"), %%mm3     \n\t"\
922     "pmulhw "VG_COEFF"("#c"), %%mm4     \n\t"\
923     /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
924
925 #define REAL_YSCALEYUV2RGB_YA(index, c, b1, b2) \
926     "movq  ("#b1", "#index", 2), %%mm0     \n\t" /*buf0[eax]*/\
927     "movq  ("#b2", "#index", 2), %%mm1     \n\t" /*buf1[eax]*/\
928     "movq 8("#b1", "#index", 2), %%mm6     \n\t" /*buf0[eax]*/\
929     "movq 8("#b2", "#index", 2), %%mm7     \n\t" /*buf1[eax]*/\
930     "psubw             %%mm1, %%mm0     \n\t" /* buf0[eax] - buf1[eax]*/\
931     "psubw             %%mm7, %%mm6     \n\t" /* buf0[eax] - buf1[eax]*/\
932     "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0  \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
933     "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6  \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
934     "psraw                $4, %%mm1     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
935     "psraw                $4, %%mm7     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
936     "paddw             %%mm0, %%mm1     \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
937     "paddw             %%mm6, %%mm7     \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
938
939 #define REAL_YSCALEYUV2RGB_COEFF(c) \
940     "pmulhw "UB_COEFF"("#c"), %%mm2     \n\t"\
941     "pmulhw "VR_COEFF"("#c"), %%mm5     \n\t"\
942     "psubw  "Y_OFFSET"("#c"), %%mm1     \n\t" /* 8(Y-16)*/\
943     "psubw  "Y_OFFSET"("#c"), %%mm7     \n\t" /* 8(Y-16)*/\
944     "pmulhw  "Y_COEFF"("#c"), %%mm1     \n\t"\
945     "pmulhw  "Y_COEFF"("#c"), %%mm7     \n\t"\
946     /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
947     "paddw             %%mm3, %%mm4     \n\t"\
948     "movq              %%mm2, %%mm0     \n\t"\
949     "movq              %%mm5, %%mm6     \n\t"\
950     "movq              %%mm4, %%mm3     \n\t"\
951     "punpcklwd         %%mm2, %%mm2     \n\t"\
952     "punpcklwd         %%mm5, %%mm5     \n\t"\
953     "punpcklwd         %%mm4, %%mm4     \n\t"\
954     "paddw             %%mm1, %%mm2     \n\t"\
955     "paddw             %%mm1, %%mm5     \n\t"\
956     "paddw             %%mm1, %%mm4     \n\t"\
957     "punpckhwd         %%mm0, %%mm0     \n\t"\
958     "punpckhwd         %%mm6, %%mm6     \n\t"\
959     "punpckhwd         %%mm3, %%mm3     \n\t"\
960     "paddw             %%mm7, %%mm0     \n\t"\
961     "paddw             %%mm7, %%mm6     \n\t"\
962     "paddw             %%mm7, %%mm3     \n\t"\
963     /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
964     "packuswb          %%mm0, %%mm2     \n\t"\
965     "packuswb          %%mm6, %%mm5     \n\t"\
966     "packuswb          %%mm3, %%mm4     \n\t"\
967
968 #define YSCALEYUV2RGB_YA(index, c, b1, b2) REAL_YSCALEYUV2RGB_YA(index, c, b1, b2)
969
970 #define YSCALEYUV2RGB(index, c) \
971     REAL_YSCALEYUV2RGB_UV(index, c) \
972     REAL_YSCALEYUV2RGB_YA(index, c, %0, %1) \
973     REAL_YSCALEYUV2RGB_COEFF(c)
974
975 /**
976  * vertical bilinear scale YV12 to RGB
977  */
978 static void RENAME(yuv2rgb32_2)(SwsContext *c, const int16_t *buf[2],
979                                 const int16_t *ubuf[2], const int16_t *vbuf[2],
980                                 const int16_t *abuf[2], uint8_t *dest,
981                                 int dstW, int yalpha, int uvalpha, int y)
982 {
983     const int16_t *buf0  = buf[0],  *buf1  = buf[1],
984                   *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
985
986     if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
987         const int16_t *abuf0 = abuf[0], *abuf1 = abuf[1];
988 #if ARCH_X86_64
989         __asm__ volatile(
990             YSCALEYUV2RGB(%%r8, %5)
991             YSCALEYUV2RGB_YA(%%r8, %5, %6, %7)
992             "psraw                  $3, %%mm1       \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
993             "psraw                  $3, %%mm7       \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
994             "packuswb            %%mm7, %%mm1       \n\t"
995             WRITEBGR32(%4, 8280(%5), %%r8, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
996             :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "r" (dest),
997                "a" (&c->redDither),
998                "r" (abuf0), "r" (abuf1)
999             : "%r8"
1000         );
1001 #else
1002         *(const uint16_t **)(&c->u_temp)=abuf0;
1003         *(const uint16_t **)(&c->v_temp)=abuf1;
1004         __asm__ volatile(
1005             "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1006             "mov        %4, %%"REG_b"               \n\t"
1007             "push %%"REG_BP"                        \n\t"
1008             YSCALEYUV2RGB(%%REGBP, %5)
1009             "push                   %0              \n\t"
1010             "push                   %1              \n\t"
1011             "mov          "U_TEMP"(%5), %0          \n\t"
1012             "mov          "V_TEMP"(%5), %1          \n\t"
1013             YSCALEYUV2RGB_YA(%%REGBP, %5, %0, %1)
1014             "psraw                  $3, %%mm1       \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
1015             "psraw                  $3, %%mm7       \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
1016             "packuswb            %%mm7, %%mm1       \n\t"
1017             "pop                    %1              \n\t"
1018             "pop                    %0              \n\t"
1019             WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
1020             "pop %%"REG_BP"                         \n\t"
1021             "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1022             :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1023                "a" (&c->redDither)
1024         );
1025 #endif
1026     } else {
1027         __asm__ volatile(
1028             "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1029             "mov        %4, %%"REG_b"               \n\t"
1030             "push %%"REG_BP"                        \n\t"
1031             YSCALEYUV2RGB(%%REGBP, %5)
1032             "pcmpeqd %%mm7, %%mm7                   \n\t"
1033             WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1034             "pop %%"REG_BP"                         \n\t"
1035             "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1036             :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1037                "a" (&c->redDither)
1038         );
1039     }
1040 }
1041
1042 static void RENAME(yuv2bgr24_2)(SwsContext *c, const int16_t *buf[2],
1043                                 const int16_t *ubuf[2], const int16_t *vbuf[2],
1044                                 const int16_t *abuf[2], uint8_t *dest,
1045                                 int dstW, int yalpha, int uvalpha, int y)
1046 {
1047     const int16_t *buf0  = buf[0],  *buf1  = buf[1],
1048                   *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
1049
1050     //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
1051     __asm__ volatile(
1052         "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1053         "mov        %4, %%"REG_b"               \n\t"
1054         "push %%"REG_BP"                        \n\t"
1055         YSCALEYUV2RGB(%%REGBP, %5)
1056         "pxor    %%mm7, %%mm7                   \n\t"
1057         WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1058         "pop %%"REG_BP"                         \n\t"
1059         "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1060         :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1061            "a" (&c->redDither)
1062     );
1063 }
1064
1065 static void RENAME(yuv2rgb555_2)(SwsContext *c, const int16_t *buf[2],
1066                                  const int16_t *ubuf[2], const int16_t *vbuf[2],
1067                                  const int16_t *abuf[2], uint8_t *dest,
1068                                  int dstW, int yalpha, int uvalpha, int y)
1069 {
1070     const int16_t *buf0  = buf[0],  *buf1  = buf[1],
1071                   *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
1072
1073     //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
1074     __asm__ volatile(
1075         "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1076         "mov        %4, %%"REG_b"               \n\t"
1077         "push %%"REG_BP"                        \n\t"
1078         YSCALEYUV2RGB(%%REGBP, %5)
1079         "pxor    %%mm7, %%mm7                   \n\t"
1080         /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1081 #ifdef DITHER1XBPP
1082         "paddusb "BLUE_DITHER"(%5), %%mm2      \n\t"
1083         "paddusb "GREEN_DITHER"(%5), %%mm4      \n\t"
1084         "paddusb "RED_DITHER"(%5), %%mm5      \n\t"
1085 #endif
1086         WRITERGB15(%%REGb, 8280(%5), %%REGBP)
1087         "pop %%"REG_BP"                         \n\t"
1088         "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1089         :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1090            "a" (&c->redDither)
1091     );
1092 }
1093
1094 static void RENAME(yuv2rgb565_2)(SwsContext *c, const int16_t *buf[2],
1095                                  const int16_t *ubuf[2], const int16_t *vbuf[2],
1096                                  const int16_t *abuf[2], uint8_t *dest,
1097                                  int dstW, int yalpha, int uvalpha, int y)
1098 {
1099     const int16_t *buf0  = buf[0],  *buf1  = buf[1],
1100                   *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
1101
1102     //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
1103     __asm__ volatile(
1104         "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1105         "mov        %4, %%"REG_b"               \n\t"
1106         "push %%"REG_BP"                        \n\t"
1107         YSCALEYUV2RGB(%%REGBP, %5)
1108         "pxor    %%mm7, %%mm7                   \n\t"
1109         /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1110 #ifdef DITHER1XBPP
1111         "paddusb "BLUE_DITHER"(%5), %%mm2      \n\t"
1112         "paddusb "GREEN_DITHER"(%5), %%mm4      \n\t"
1113         "paddusb "RED_DITHER"(%5), %%mm5      \n\t"
1114 #endif
1115         WRITERGB16(%%REGb, 8280(%5), %%REGBP)
1116         "pop %%"REG_BP"                         \n\t"
1117         "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1118         :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1119            "a" (&c->redDither)
1120     );
1121 }
1122
1123 #define REAL_YSCALEYUV2PACKED(index, c) \
1124     "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0              \n\t"\
1125     "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1              \n\t"\
1126     "psraw                $3, %%mm0                           \n\t"\
1127     "psraw                $3, %%mm1                           \n\t"\
1128     "movq              %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c") \n\t"\
1129     "movq              %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c") \n\t"\
1130     "xor            "#index", "#index"                        \n\t"\
1131     ".p2align              4            \n\t"\
1132     "1:                                 \n\t"\
1133     "movq     (%2, "#index"), %%mm2     \n\t" /* uvbuf0[eax]*/\
1134     "movq     (%3, "#index"), %%mm3     \n\t" /* uvbuf1[eax]*/\
1135     "add           "UV_OFFx2"("#c"), "#index"  \n\t" \
1136     "movq     (%2, "#index"), %%mm5     \n\t" /* uvbuf0[eax+2048]*/\
1137     "movq     (%3, "#index"), %%mm4     \n\t" /* uvbuf1[eax+2048]*/\
1138     "sub           "UV_OFFx2"("#c"), "#index"  \n\t" \
1139     "psubw             %%mm3, %%mm2     \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
1140     "psubw             %%mm4, %%mm5     \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
1141     "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0    \n\t"\
1142     "pmulhw            %%mm0, %%mm2     \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
1143     "pmulhw            %%mm0, %%mm5     \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
1144     "psraw                $7, %%mm3     \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
1145     "psraw                $7, %%mm4     \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
1146     "paddw             %%mm2, %%mm3     \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
1147     "paddw             %%mm5, %%mm4     \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
1148     "movq  (%0, "#index", 2), %%mm0     \n\t" /*buf0[eax]*/\
1149     "movq  (%1, "#index", 2), %%mm1     \n\t" /*buf1[eax]*/\
1150     "movq 8(%0, "#index", 2), %%mm6     \n\t" /*buf0[eax]*/\
1151     "movq 8(%1, "#index", 2), %%mm7     \n\t" /*buf1[eax]*/\
1152     "psubw             %%mm1, %%mm0     \n\t" /* buf0[eax] - buf1[eax]*/\
1153     "psubw             %%mm7, %%mm6     \n\t" /* buf0[eax] - buf1[eax]*/\
1154     "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0  \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
1155     "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6  \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
1156     "psraw                $7, %%mm1     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
1157     "psraw                $7, %%mm7     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
1158     "paddw             %%mm0, %%mm1     \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
1159     "paddw             %%mm6, %%mm7     \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
1160
1161 #define YSCALEYUV2PACKED(index, c)  REAL_YSCALEYUV2PACKED(index, c)
1162
1163 static void RENAME(yuv2yuyv422_2)(SwsContext *c, const int16_t *buf[2],
1164                                   const int16_t *ubuf[2], const int16_t *vbuf[2],
1165                                   const int16_t *abuf[2], uint8_t *dest,
1166                                   int dstW, int yalpha, int uvalpha, int y)
1167 {
1168     const int16_t *buf0  = buf[0],  *buf1  = buf[1],
1169                   *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
1170
1171     //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
1172     __asm__ volatile(
1173         "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1174         "mov %4, %%"REG_b"                        \n\t"
1175         "push %%"REG_BP"                        \n\t"
1176         YSCALEYUV2PACKED(%%REGBP, %5)
1177         WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1178         "pop %%"REG_BP"                         \n\t"
1179         "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1180         :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1181            "a" (&c->redDither)
1182     );
1183 }
1184
1185 #define REAL_YSCALEYUV2RGB1(index, c) \
1186     "xor            "#index", "#index"  \n\t"\
1187     ".p2align              4            \n\t"\
1188     "1:                                 \n\t"\
1189     "movq     (%2, "#index"), %%mm3     \n\t" /* uvbuf0[eax]*/\
1190     "add           "UV_OFFx2"("#c"), "#index"  \n\t" \
1191     "movq     (%2, "#index"), %%mm4     \n\t" /* uvbuf0[eax+2048]*/\
1192     "sub           "UV_OFFx2"("#c"), "#index"  \n\t" \
1193     "psraw                $4, %%mm3     \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
1194     "psraw                $4, %%mm4     \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
1195     "psubw  "U_OFFSET"("#c"), %%mm3     \n\t" /* (U-128)8*/\
1196     "psubw  "V_OFFSET"("#c"), %%mm4     \n\t" /* (V-128)8*/\
1197     "movq              %%mm3, %%mm2     \n\t" /* (U-128)8*/\
1198     "movq              %%mm4, %%mm5     \n\t" /* (V-128)8*/\
1199     "pmulhw "UG_COEFF"("#c"), %%mm3     \n\t"\
1200     "pmulhw "VG_COEFF"("#c"), %%mm4     \n\t"\
1201     /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
1202     "movq  (%0, "#index", 2), %%mm1     \n\t" /*buf0[eax]*/\
1203     "movq 8(%0, "#index", 2), %%mm7     \n\t" /*buf0[eax]*/\
1204     "psraw                $4, %%mm1     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
1205     "psraw                $4, %%mm7     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
1206     "pmulhw "UB_COEFF"("#c"), %%mm2     \n\t"\
1207     "pmulhw "VR_COEFF"("#c"), %%mm5     \n\t"\
1208     "psubw  "Y_OFFSET"("#c"), %%mm1     \n\t" /* 8(Y-16)*/\
1209     "psubw  "Y_OFFSET"("#c"), %%mm7     \n\t" /* 8(Y-16)*/\
1210     "pmulhw  "Y_COEFF"("#c"), %%mm1     \n\t"\
1211     "pmulhw  "Y_COEFF"("#c"), %%mm7     \n\t"\
1212     /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
1213     "paddw             %%mm3, %%mm4     \n\t"\
1214     "movq              %%mm2, %%mm0     \n\t"\
1215     "movq              %%mm5, %%mm6     \n\t"\
1216     "movq              %%mm4, %%mm3     \n\t"\
1217     "punpcklwd         %%mm2, %%mm2     \n\t"\
1218     "punpcklwd         %%mm5, %%mm5     \n\t"\
1219     "punpcklwd         %%mm4, %%mm4     \n\t"\
1220     "paddw             %%mm1, %%mm2     \n\t"\
1221     "paddw             %%mm1, %%mm5     \n\t"\
1222     "paddw             %%mm1, %%mm4     \n\t"\
1223     "punpckhwd         %%mm0, %%mm0     \n\t"\
1224     "punpckhwd         %%mm6, %%mm6     \n\t"\
1225     "punpckhwd         %%mm3, %%mm3     \n\t"\
1226     "paddw             %%mm7, %%mm0     \n\t"\
1227     "paddw             %%mm7, %%mm6     \n\t"\
1228     "paddw             %%mm7, %%mm3     \n\t"\
1229     /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
1230     "packuswb          %%mm0, %%mm2     \n\t"\
1231     "packuswb          %%mm6, %%mm5     \n\t"\
1232     "packuswb          %%mm3, %%mm4     \n\t"\
1233
1234 #define YSCALEYUV2RGB1(index, c)  REAL_YSCALEYUV2RGB1(index, c)
1235
1236 // do vertical chrominance interpolation
1237 #define REAL_YSCALEYUV2RGB1b(index, c) \
1238     "xor            "#index", "#index"  \n\t"\
1239     ".p2align              4            \n\t"\
1240     "1:                                 \n\t"\
1241     "movq     (%2, "#index"), %%mm2     \n\t" /* uvbuf0[eax]*/\
1242     "movq     (%3, "#index"), %%mm3     \n\t" /* uvbuf1[eax]*/\
1243     "add           "UV_OFFx2"("#c"), "#index"  \n\t" \
1244     "movq     (%2, "#index"), %%mm5     \n\t" /* uvbuf0[eax+2048]*/\
1245     "movq     (%3, "#index"), %%mm4     \n\t" /* uvbuf1[eax+2048]*/\
1246     "sub           "UV_OFFx2"("#c"), "#index"  \n\t" \
1247     "paddw             %%mm2, %%mm3     \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
1248     "paddw             %%mm5, %%mm4     \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
1249     "psrlw                $5, %%mm3     \n\t" /*FIXME might overflow*/\
1250     "psrlw                $5, %%mm4     \n\t" /*FIXME might overflow*/\
1251     "psubw  "U_OFFSET"("#c"), %%mm3     \n\t" /* (U-128)8*/\
1252     "psubw  "V_OFFSET"("#c"), %%mm4     \n\t" /* (V-128)8*/\
1253     "movq              %%mm3, %%mm2     \n\t" /* (U-128)8*/\
1254     "movq              %%mm4, %%mm5     \n\t" /* (V-128)8*/\
1255     "pmulhw "UG_COEFF"("#c"), %%mm3     \n\t"\
1256     "pmulhw "VG_COEFF"("#c"), %%mm4     \n\t"\
1257     /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
1258     "movq  (%0, "#index", 2), %%mm1     \n\t" /*buf0[eax]*/\
1259     "movq 8(%0, "#index", 2), %%mm7     \n\t" /*buf0[eax]*/\
1260     "psraw                $4, %%mm1     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
1261     "psraw                $4, %%mm7     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
1262     "pmulhw "UB_COEFF"("#c"), %%mm2     \n\t"\
1263     "pmulhw "VR_COEFF"("#c"), %%mm5     \n\t"\
1264     "psubw  "Y_OFFSET"("#c"), %%mm1     \n\t" /* 8(Y-16)*/\
1265     "psubw  "Y_OFFSET"("#c"), %%mm7     \n\t" /* 8(Y-16)*/\
1266     "pmulhw  "Y_COEFF"("#c"), %%mm1     \n\t"\
1267     "pmulhw  "Y_COEFF"("#c"), %%mm7     \n\t"\
1268     /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
1269     "paddw             %%mm3, %%mm4     \n\t"\
1270     "movq              %%mm2, %%mm0     \n\t"\
1271     "movq              %%mm5, %%mm6     \n\t"\
1272     "movq              %%mm4, %%mm3     \n\t"\
1273     "punpcklwd         %%mm2, %%mm2     \n\t"\
1274     "punpcklwd         %%mm5, %%mm5     \n\t"\
1275     "punpcklwd         %%mm4, %%mm4     \n\t"\
1276     "paddw             %%mm1, %%mm2     \n\t"\
1277     "paddw             %%mm1, %%mm5     \n\t"\
1278     "paddw             %%mm1, %%mm4     \n\t"\
1279     "punpckhwd         %%mm0, %%mm0     \n\t"\
1280     "punpckhwd         %%mm6, %%mm6     \n\t"\
1281     "punpckhwd         %%mm3, %%mm3     \n\t"\
1282     "paddw             %%mm7, %%mm0     \n\t"\
1283     "paddw             %%mm7, %%mm6     \n\t"\
1284     "paddw             %%mm7, %%mm3     \n\t"\
1285     /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
1286     "packuswb          %%mm0, %%mm2     \n\t"\
1287     "packuswb          %%mm6, %%mm5     \n\t"\
1288     "packuswb          %%mm3, %%mm4     \n\t"\
1289
1290 #define YSCALEYUV2RGB1b(index, c)  REAL_YSCALEYUV2RGB1b(index, c)
1291
1292 #define REAL_YSCALEYUV2RGB1_ALPHA(index) \
1293     "movq  (%1, "#index", 2), %%mm7     \n\t" /* abuf0[index  ]     */\
1294     "movq 8(%1, "#index", 2), %%mm1     \n\t" /* abuf0[index+4]     */\
1295     "psraw                $7, %%mm7     \n\t" /* abuf0[index  ] >>7 */\
1296     "psraw                $7, %%mm1     \n\t" /* abuf0[index+4] >>7 */\
1297     "packuswb          %%mm1, %%mm7     \n\t"
1298 #define YSCALEYUV2RGB1_ALPHA(index) REAL_YSCALEYUV2RGB1_ALPHA(index)
1299
1300 /**
1301  * YV12 to RGB without scaling or interpolating
1302  */
1303 static void RENAME(yuv2rgb32_1)(SwsContext *c, const int16_t *buf0,
1304                                 const int16_t *ubuf[2], const int16_t *bguf[2],
1305                                 const int16_t *abuf0, uint8_t *dest,
1306                                 int dstW, int uvalpha, int y)
1307 {
1308     const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
1309     const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
1310
1311     if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
1312         if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
1313             __asm__ volatile(
1314                 "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1315                 "mov        %4, %%"REG_b"               \n\t"
1316                 "push %%"REG_BP"                        \n\t"
1317                 YSCALEYUV2RGB1(%%REGBP, %5)
1318                 YSCALEYUV2RGB1_ALPHA(%%REGBP)
1319                 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1320                 "pop %%"REG_BP"                         \n\t"
1321                 "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1322                 :: "c" (buf0), "d" (abuf0), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1323                    "a" (&c->redDither)
1324             );
1325         } else {
1326             __asm__ volatile(
1327                 "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1328                 "mov        %4, %%"REG_b"               \n\t"
1329                 "push %%"REG_BP"                        \n\t"
1330                 YSCALEYUV2RGB1(%%REGBP, %5)
1331                 "pcmpeqd %%mm7, %%mm7                   \n\t"
1332                 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1333                 "pop %%"REG_BP"                         \n\t"
1334                 "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1335                 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1336                    "a" (&c->redDither)
1337             );
1338         }
1339     } else {
1340         if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
1341             __asm__ volatile(
1342                 "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1343                 "mov        %4, %%"REG_b"               \n\t"
1344                 "push %%"REG_BP"                        \n\t"
1345                 YSCALEYUV2RGB1b(%%REGBP, %5)
1346                 YSCALEYUV2RGB1_ALPHA(%%REGBP)
1347                 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1348                 "pop %%"REG_BP"                         \n\t"
1349                 "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1350                 :: "c" (buf0), "d" (abuf0), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1351                    "a" (&c->redDither)
1352             );
1353         } else {
1354             __asm__ volatile(
1355                 "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1356                 "mov        %4, %%"REG_b"               \n\t"
1357                 "push %%"REG_BP"                        \n\t"
1358                 YSCALEYUV2RGB1b(%%REGBP, %5)
1359                 "pcmpeqd %%mm7, %%mm7                   \n\t"
1360                 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1361                 "pop %%"REG_BP"                         \n\t"
1362                 "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1363                 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1364                    "a" (&c->redDither)
1365             );
1366         }
1367     }
1368 }
1369
1370 static void RENAME(yuv2bgr24_1)(SwsContext *c, const int16_t *buf0,
1371                                 const int16_t *ubuf[2], const int16_t *bguf[2],
1372                                 const int16_t *abuf0, uint8_t *dest,
1373                                 int dstW, int uvalpha, int y)
1374 {
1375     const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
1376     const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
1377
1378     if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
1379         __asm__ volatile(
1380             "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1381             "mov        %4, %%"REG_b"               \n\t"
1382             "push %%"REG_BP"                        \n\t"
1383             YSCALEYUV2RGB1(%%REGBP, %5)
1384             "pxor    %%mm7, %%mm7                   \n\t"
1385             WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1386             "pop %%"REG_BP"                         \n\t"
1387             "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1388             :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1389                "a" (&c->redDither)
1390         );
1391     } else {
1392         __asm__ volatile(
1393             "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1394             "mov        %4, %%"REG_b"               \n\t"
1395             "push %%"REG_BP"                        \n\t"
1396             YSCALEYUV2RGB1b(%%REGBP, %5)
1397             "pxor    %%mm7, %%mm7                   \n\t"
1398             WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1399             "pop %%"REG_BP"                         \n\t"
1400             "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1401             :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1402                "a" (&c->redDither)
1403         );
1404     }
1405 }
1406
1407 static void RENAME(yuv2rgb555_1)(SwsContext *c, const int16_t *buf0,
1408                                  const int16_t *ubuf[2], const int16_t *bguf[2],
1409                                  const int16_t *abuf0, uint8_t *dest,
1410                                  int dstW, int uvalpha, int y)
1411 {
1412     const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
1413     const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
1414
1415     if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
1416         __asm__ volatile(
1417             "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1418             "mov        %4, %%"REG_b"               \n\t"
1419             "push %%"REG_BP"                        \n\t"
1420             YSCALEYUV2RGB1(%%REGBP, %5)
1421             "pxor    %%mm7, %%mm7                   \n\t"
1422             /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1423 #ifdef DITHER1XBPP
1424             "paddusb "BLUE_DITHER"(%5), %%mm2      \n\t"
1425             "paddusb "GREEN_DITHER"(%5), %%mm4      \n\t"
1426             "paddusb "RED_DITHER"(%5), %%mm5      \n\t"
1427 #endif
1428             WRITERGB15(%%REGb, 8280(%5), %%REGBP)
1429             "pop %%"REG_BP"                         \n\t"
1430             "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1431             :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1432                "a" (&c->redDither)
1433         );
1434     } else {
1435         __asm__ volatile(
1436             "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1437             "mov        %4, %%"REG_b"               \n\t"
1438             "push %%"REG_BP"                        \n\t"
1439             YSCALEYUV2RGB1b(%%REGBP, %5)
1440             "pxor    %%mm7, %%mm7                   \n\t"
1441             /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1442 #ifdef DITHER1XBPP
1443             "paddusb "BLUE_DITHER"(%5), %%mm2      \n\t"
1444             "paddusb "GREEN_DITHER"(%5), %%mm4      \n\t"
1445             "paddusb "RED_DITHER"(%5), %%mm5      \n\t"
1446 #endif
1447             WRITERGB15(%%REGb, 8280(%5), %%REGBP)
1448             "pop %%"REG_BP"                         \n\t"
1449             "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1450             :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1451                "a" (&c->redDither)
1452         );
1453     }
1454 }
1455
1456 static void RENAME(yuv2rgb565_1)(SwsContext *c, const int16_t *buf0,
1457                                  const int16_t *ubuf[2], const int16_t *bguf[2],
1458                                  const int16_t *abuf0, uint8_t *dest,
1459                                  int dstW, int uvalpha, int y)
1460 {
1461     const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
1462     const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
1463
1464     if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
1465         __asm__ volatile(
1466             "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1467             "mov        %4, %%"REG_b"               \n\t"
1468             "push %%"REG_BP"                        \n\t"
1469             YSCALEYUV2RGB1(%%REGBP, %5)
1470             "pxor    %%mm7, %%mm7                   \n\t"
1471             /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1472 #ifdef DITHER1XBPP
1473             "paddusb "BLUE_DITHER"(%5), %%mm2      \n\t"
1474             "paddusb "GREEN_DITHER"(%5), %%mm4      \n\t"
1475             "paddusb "RED_DITHER"(%5), %%mm5      \n\t"
1476 #endif
1477             WRITERGB16(%%REGb, 8280(%5), %%REGBP)
1478             "pop %%"REG_BP"                         \n\t"
1479             "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1480             :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1481                "a" (&c->redDither)
1482         );
1483     } else {
1484         __asm__ volatile(
1485             "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1486             "mov        %4, %%"REG_b"               \n\t"
1487             "push %%"REG_BP"                        \n\t"
1488             YSCALEYUV2RGB1b(%%REGBP, %5)
1489             "pxor    %%mm7, %%mm7                   \n\t"
1490             /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1491 #ifdef DITHER1XBPP
1492             "paddusb "BLUE_DITHER"(%5), %%mm2      \n\t"
1493             "paddusb "GREEN_DITHER"(%5), %%mm4      \n\t"
1494             "paddusb "RED_DITHER"(%5), %%mm5      \n\t"
1495 #endif
1496             WRITERGB16(%%REGb, 8280(%5), %%REGBP)
1497             "pop %%"REG_BP"                         \n\t"
1498             "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1499             :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1500                "a" (&c->redDither)
1501         );
1502     }
1503 }
1504
1505 #define REAL_YSCALEYUV2PACKED1(index, c) \
1506     "xor            "#index", "#index"  \n\t"\
1507     ".p2align              4            \n\t"\
1508     "1:                                 \n\t"\
1509     "movq     (%2, "#index"), %%mm3     \n\t" /* uvbuf0[eax]*/\
1510     "add           "UV_OFFx2"("#c"), "#index"  \n\t" \
1511     "movq     (%2, "#index"), %%mm4     \n\t" /* uvbuf0[eax+2048]*/\
1512     "sub           "UV_OFFx2"("#c"), "#index"  \n\t" \
1513     "psraw                $7, %%mm3     \n\t" \
1514     "psraw                $7, %%mm4     \n\t" \
1515     "movq  (%0, "#index", 2), %%mm1     \n\t" /*buf0[eax]*/\
1516     "movq 8(%0, "#index", 2), %%mm7     \n\t" /*buf0[eax]*/\
1517     "psraw                $7, %%mm1     \n\t" \
1518     "psraw                $7, %%mm7     \n\t" \
1519
1520 #define YSCALEYUV2PACKED1(index, c)  REAL_YSCALEYUV2PACKED1(index, c)
1521
1522 #define REAL_YSCALEYUV2PACKED1b(index, c) \
1523     "xor "#index", "#index"             \n\t"\
1524     ".p2align              4            \n\t"\
1525     "1:                                 \n\t"\
1526     "movq     (%2, "#index"), %%mm2     \n\t" /* uvbuf0[eax]*/\
1527     "movq     (%3, "#index"), %%mm3     \n\t" /* uvbuf1[eax]*/\
1528     "add           "UV_OFFx2"("#c"), "#index"  \n\t" \
1529     "movq     (%2, "#index"), %%mm5     \n\t" /* uvbuf0[eax+2048]*/\
1530     "movq     (%3, "#index"), %%mm4     \n\t" /* uvbuf1[eax+2048]*/\
1531     "sub           "UV_OFFx2"("#c"), "#index"  \n\t" \
1532     "paddw             %%mm2, %%mm3     \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
1533     "paddw             %%mm5, %%mm4     \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
1534     "psrlw                $8, %%mm3     \n\t" \
1535     "psrlw                $8, %%mm4     \n\t" \
1536     "movq  (%0, "#index", 2), %%mm1     \n\t" /*buf0[eax]*/\
1537     "movq 8(%0, "#index", 2), %%mm7     \n\t" /*buf0[eax]*/\
1538     "psraw                $7, %%mm1     \n\t" \
1539     "psraw                $7, %%mm7     \n\t"
1540 #define YSCALEYUV2PACKED1b(index, c)  REAL_YSCALEYUV2PACKED1b(index, c)
1541
1542 static void RENAME(yuv2yuyv422_1)(SwsContext *c, const int16_t *buf0,
1543                                   const int16_t *ubuf[2], const int16_t *bguf[2],
1544                                   const int16_t *abuf0, uint8_t *dest,
1545                                   int dstW, int uvalpha, int y)
1546 {
1547     const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
1548     const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
1549
1550     if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
1551         __asm__ volatile(
1552             "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1553             "mov        %4, %%"REG_b"               \n\t"
1554             "push %%"REG_BP"                        \n\t"
1555             YSCALEYUV2PACKED1(%%REGBP, %5)
1556             WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1557             "pop %%"REG_BP"                         \n\t"
1558             "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1559             :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1560                "a" (&c->redDither)
1561         );
1562     } else {
1563         __asm__ volatile(
1564             "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1565             "mov        %4, %%"REG_b"               \n\t"
1566             "push %%"REG_BP"                        \n\t"
1567             YSCALEYUV2PACKED1b(%%REGBP, %5)
1568             WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1569             "pop %%"REG_BP"                         \n\t"
1570             "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1571             :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1572                "a" (&c->redDither)
1573         );
1574     }
1575 }
1576
1577 #if !COMPILE_TEMPLATE_MMX2
1578 //FIXME yuy2* can read up to 7 samples too much
1579
1580 static void RENAME(yuy2ToY)(uint8_t *dst, const uint8_t *src,
1581                             int width, uint32_t *unused)
1582 {
1583     __asm__ volatile(
1584         "movq "MANGLE(bm01010101)", %%mm2           \n\t"
1585         "mov                    %0, %%"REG_a"       \n\t"
1586         "1:                                         \n\t"
1587         "movq    (%1, %%"REG_a",2), %%mm0           \n\t"
1588         "movq   8(%1, %%"REG_a",2), %%mm1           \n\t"
1589         "pand                %%mm2, %%mm0           \n\t"
1590         "pand                %%mm2, %%mm1           \n\t"
1591         "packuswb            %%mm1, %%mm0           \n\t"
1592         "movq                %%mm0, (%2, %%"REG_a") \n\t"
1593         "add                    $8, %%"REG_a"       \n\t"
1594         " js                    1b                  \n\t"
1595         : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst+width)
1596         : "%"REG_a
1597     );
1598 }
1599
1600 static void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV,
1601                              const uint8_t *src1, const uint8_t *src2,
1602                              int width, uint32_t *unused)
1603 {
1604     __asm__ volatile(
1605         "movq "MANGLE(bm01010101)", %%mm4           \n\t"
1606         "mov                    %0, %%"REG_a"       \n\t"
1607         "1:                                         \n\t"
1608         "movq    (%1, %%"REG_a",4), %%mm0           \n\t"
1609         "movq   8(%1, %%"REG_a",4), %%mm1           \n\t"
1610         "psrlw                  $8, %%mm0           \n\t"
1611         "psrlw                  $8, %%mm1           \n\t"
1612         "packuswb            %%mm1, %%mm0           \n\t"
1613         "movq                %%mm0, %%mm1           \n\t"
1614         "psrlw                  $8, %%mm0           \n\t"
1615         "pand                %%mm4, %%mm1           \n\t"
1616         "packuswb            %%mm0, %%mm0           \n\t"
1617         "packuswb            %%mm1, %%mm1           \n\t"
1618         "movd                %%mm0, (%3, %%"REG_a") \n\t"
1619         "movd                %%mm1, (%2, %%"REG_a") \n\t"
1620         "add                    $4, %%"REG_a"       \n\t"
1621         " js                    1b                  \n\t"
1622         : : "g" ((x86_reg)-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
1623         : "%"REG_a
1624     );
1625     assert(src1 == src2);
1626 }
1627
1628 /* This is almost identical to the previous, end exists only because
1629  * yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses. */
1630 static void RENAME(uyvyToY)(uint8_t *dst, const uint8_t *src,
1631                             int width, uint32_t *unused)
1632 {
1633     __asm__ volatile(
1634         "mov                  %0, %%"REG_a"         \n\t"
1635         "1:                                         \n\t"
1636         "movq  (%1, %%"REG_a",2), %%mm0             \n\t"
1637         "movq 8(%1, %%"REG_a",2), %%mm1             \n\t"
1638         "psrlw                $8, %%mm0             \n\t"
1639         "psrlw                $8, %%mm1             \n\t"
1640         "packuswb          %%mm1, %%mm0             \n\t"
1641         "movq              %%mm0, (%2, %%"REG_a")   \n\t"
1642         "add                  $8, %%"REG_a"         \n\t"
1643         " js                  1b                    \n\t"
1644         : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst+width)
1645         : "%"REG_a
1646     );
1647 }
1648
1649 static void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV,
1650                              const uint8_t *src1, const uint8_t *src2,
1651                              int width, uint32_t *unused)
1652 {
1653     __asm__ volatile(
1654         "movq "MANGLE(bm01010101)", %%mm4           \n\t"
1655         "mov                    %0, %%"REG_a"       \n\t"
1656         "1:                                         \n\t"
1657         "movq    (%1, %%"REG_a",4), %%mm0           \n\t"
1658         "movq   8(%1, %%"REG_a",4), %%mm1           \n\t"
1659         "pand                %%mm4, %%mm0           \n\t"
1660         "pand                %%mm4, %%mm1           \n\t"
1661         "packuswb            %%mm1, %%mm0           \n\t"
1662         "movq                %%mm0, %%mm1           \n\t"
1663         "psrlw                  $8, %%mm0           \n\t"
1664         "pand                %%mm4, %%mm1           \n\t"
1665         "packuswb            %%mm0, %%mm0           \n\t"
1666         "packuswb            %%mm1, %%mm1           \n\t"
1667         "movd                %%mm0, (%3, %%"REG_a") \n\t"
1668         "movd                %%mm1, (%2, %%"REG_a") \n\t"
1669         "add                    $4, %%"REG_a"       \n\t"
1670         " js                    1b                  \n\t"
1671         : : "g" ((x86_reg)-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
1672         : "%"REG_a
1673     );
1674     assert(src1 == src2);
1675 }
1676
1677 static av_always_inline void RENAME(nvXXtoUV)(uint8_t *dst1, uint8_t *dst2,
1678                                               const uint8_t *src, int width)
1679 {
1680     __asm__ volatile(
1681         "movq "MANGLE(bm01010101)", %%mm4           \n\t"
1682         "mov                    %0, %%"REG_a"       \n\t"
1683         "1:                                         \n\t"
1684         "movq    (%1, %%"REG_a",2), %%mm0           \n\t"
1685         "movq   8(%1, %%"REG_a",2), %%mm1           \n\t"
1686         "movq                %%mm0, %%mm2           \n\t"
1687         "movq                %%mm1, %%mm3           \n\t"
1688         "pand                %%mm4, %%mm0           \n\t"
1689         "pand                %%mm4, %%mm1           \n\t"
1690         "psrlw                  $8, %%mm2           \n\t"
1691         "psrlw                  $8, %%mm3           \n\t"
1692         "packuswb            %%mm1, %%mm0           \n\t"
1693         "packuswb            %%mm3, %%mm2           \n\t"
1694         "movq                %%mm0, (%2, %%"REG_a") \n\t"
1695         "movq                %%mm2, (%3, %%"REG_a") \n\t"
1696         "add                    $8, %%"REG_a"       \n\t"
1697         " js                    1b                  \n\t"
1698         : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst1+width), "r" (dst2+width)
1699         : "%"REG_a
1700     );
1701 }
1702
1703 static void RENAME(nv12ToUV)(uint8_t *dstU, uint8_t *dstV,
1704                              const uint8_t *src1, const uint8_t *src2,
1705                              int width, uint32_t *unused)
1706 {
1707     RENAME(nvXXtoUV)(dstU, dstV, src1, width);
1708 }
1709
1710 static void RENAME(nv21ToUV)(uint8_t *dstU, uint8_t *dstV,
1711                              const uint8_t *src1, const uint8_t *src2,
1712                              int width, uint32_t *unused)
1713 {
1714     RENAME(nvXXtoUV)(dstV, dstU, src1, width);
1715 }
1716 #endif /* !COMPILE_TEMPLATE_MMX2 */
1717
1718 static av_always_inline void RENAME(bgr24ToY_mmx)(uint8_t *dst, const uint8_t *src,
1719                                                   int width, enum PixelFormat srcFormat)
1720 {
1721
1722     if(srcFormat == PIX_FMT_BGR24) {
1723         __asm__ volatile(
1724             "movq  "MANGLE(ff_bgr24toY1Coeff)", %%mm5       \n\t"
1725             "movq  "MANGLE(ff_bgr24toY2Coeff)", %%mm6       \n\t"
1726             :
1727         );
1728     } else {
1729         __asm__ volatile(
1730             "movq  "MANGLE(ff_rgb24toY1Coeff)", %%mm5       \n\t"
1731             "movq  "MANGLE(ff_rgb24toY2Coeff)", %%mm6       \n\t"
1732             :
1733         );
1734     }
1735
1736     __asm__ volatile(
1737         "movq  "MANGLE(ff_bgr24toYOffset)", %%mm4   \n\t"
1738         "mov                        %2, %%"REG_a"   \n\t"
1739         "pxor                    %%mm7, %%mm7       \n\t"
1740         "1:                                         \n\t"
1741         PREFETCH"               64(%0)              \n\t"
1742         "movd                     (%0), %%mm0       \n\t"
1743         "movd                    2(%0), %%mm1       \n\t"
1744         "movd                    6(%0), %%mm2       \n\t"
1745         "movd                    8(%0), %%mm3       \n\t"
1746         "add                       $12, %0          \n\t"
1747         "punpcklbw               %%mm7, %%mm0       \n\t"
1748         "punpcklbw               %%mm7, %%mm1       \n\t"
1749         "punpcklbw               %%mm7, %%mm2       \n\t"
1750         "punpcklbw               %%mm7, %%mm3       \n\t"
1751         "pmaddwd                 %%mm5, %%mm0       \n\t"
1752         "pmaddwd                 %%mm6, %%mm1       \n\t"
1753         "pmaddwd                 %%mm5, %%mm2       \n\t"
1754         "pmaddwd                 %%mm6, %%mm3       \n\t"
1755         "paddd                   %%mm1, %%mm0       \n\t"
1756         "paddd                   %%mm3, %%mm2       \n\t"
1757         "paddd                   %%mm4, %%mm0       \n\t"
1758         "paddd                   %%mm4, %%mm2       \n\t"
1759         "psrad                     $15, %%mm0       \n\t"
1760         "psrad                     $15, %%mm2       \n\t"
1761         "packssdw                %%mm2, %%mm0       \n\t"
1762         "packuswb                %%mm0, %%mm0       \n\t"
1763         "movd                %%mm0, (%1, %%"REG_a") \n\t"
1764         "add                        $4, %%"REG_a"   \n\t"
1765         " js                        1b              \n\t"
1766     : "+r" (src)
1767     : "r" (dst+width), "g" ((x86_reg)-width)
1768     : "%"REG_a
1769     );
1770 }
1771
1772 static void RENAME(bgr24ToY)(uint8_t *dst, const uint8_t *src,
1773                              int width, uint32_t *unused)
1774 {
1775     RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_BGR24);
1776 }
1777
1778 static void RENAME(rgb24ToY)(uint8_t *dst, const uint8_t *src,
1779                              int width, uint32_t *unused)
1780 {
1781     RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_RGB24);
1782 }
1783
1784 static av_always_inline void RENAME(bgr24ToUV_mmx)(uint8_t *dstU, uint8_t *dstV,
1785                                                    const uint8_t *src, int width,
1786                                                    enum PixelFormat srcFormat)
1787 {
1788     __asm__ volatile(
1789         "movq                    24(%4), %%mm6       \n\t"
1790         "mov                        %3, %%"REG_a"   \n\t"
1791         "pxor                    %%mm7, %%mm7       \n\t"
1792         "1:                                         \n\t"
1793         PREFETCH"               64(%0)              \n\t"
1794         "movd                     (%0), %%mm0       \n\t"
1795         "movd                    2(%0), %%mm1       \n\t"
1796         "punpcklbw               %%mm7, %%mm0       \n\t"
1797         "punpcklbw               %%mm7, %%mm1       \n\t"
1798         "movq                    %%mm0, %%mm2       \n\t"
1799         "movq                    %%mm1, %%mm3       \n\t"
1800         "pmaddwd                  (%4), %%mm0       \n\t"
1801         "pmaddwd                 8(%4), %%mm1       \n\t"
1802         "pmaddwd                16(%4), %%mm2       \n\t"
1803         "pmaddwd                 %%mm6, %%mm3       \n\t"
1804         "paddd                   %%mm1, %%mm0       \n\t"
1805         "paddd                   %%mm3, %%mm2       \n\t"
1806
1807         "movd                    6(%0), %%mm1       \n\t"
1808         "movd                    8(%0), %%mm3       \n\t"
1809         "add                       $12, %0          \n\t"
1810         "punpcklbw               %%mm7, %%mm1       \n\t"
1811         "punpcklbw               %%mm7, %%mm3       \n\t"
1812         "movq                    %%mm1, %%mm4       \n\t"
1813         "movq                    %%mm3, %%mm5       \n\t"
1814         "pmaddwd                  (%4), %%mm1       \n\t"
1815         "pmaddwd                 8(%4), %%mm3       \n\t"
1816         "pmaddwd                16(%4), %%mm4       \n\t"
1817         "pmaddwd                 %%mm6, %%mm5       \n\t"
1818         "paddd                   %%mm3, %%mm1       \n\t"
1819         "paddd                   %%mm5, %%mm4       \n\t"
1820
1821         "movq "MANGLE(ff_bgr24toUVOffset)", %%mm3       \n\t"
1822         "paddd                   %%mm3, %%mm0       \n\t"
1823         "paddd                   %%mm3, %%mm2       \n\t"
1824         "paddd                   %%mm3, %%mm1       \n\t"
1825         "paddd                   %%mm3, %%mm4       \n\t"
1826         "psrad                     $15, %%mm0       \n\t"
1827         "psrad                     $15, %%mm2       \n\t"
1828         "psrad                     $15, %%mm1       \n\t"
1829         "psrad                     $15, %%mm4       \n\t"
1830         "packssdw                %%mm1, %%mm0       \n\t"
1831         "packssdw                %%mm4, %%mm2       \n\t"
1832         "packuswb                %%mm0, %%mm0       \n\t"
1833         "packuswb                %%mm2, %%mm2       \n\t"
1834         "movd                %%mm0, (%1, %%"REG_a") \n\t"
1835         "movd                %%mm2, (%2, %%"REG_a") \n\t"
1836         "add                        $4, %%"REG_a"   \n\t"
1837         " js                        1b              \n\t"
1838     : "+r" (src)
1839     : "r" (dstU+width), "r" (dstV+width), "g" ((x86_reg)-width), "r"(ff_bgr24toUV[srcFormat == PIX_FMT_RGB24])
1840     : "%"REG_a
1841     );
1842 }
1843
1844 static void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV,
1845                               const uint8_t *src1, const uint8_t *src2,
1846                               int width, uint32_t *unused)
1847 {
1848     RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_BGR24);
1849     assert(src1 == src2);
1850 }
1851
1852 static void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV,
1853                               const uint8_t *src1, const uint8_t *src2,
1854                               int width, uint32_t *unused)
1855 {
1856     assert(src1==src2);
1857     RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_RGB24);
1858 }
1859
1860 #if !COMPILE_TEMPLATE_MMX2
1861 // bilinear / bicubic scaling
1862 static void RENAME(hScale)(SwsContext *c, int16_t *dst, int dstW,
1863                            const uint8_t *src, const int16_t *filter,
1864                            const int16_t *filterPos, int filterSize)
1865 {
1866     assert(filterSize % 4 == 0 && filterSize>0);
1867     if (filterSize==4) { // Always true for upscaling, sometimes for down, too.
1868         x86_reg counter= -2*dstW;
1869         filter-= counter*2;
1870         filterPos-= counter/2;
1871         dst-= counter/2;
1872         __asm__ volatile(
1873 #if defined(PIC)
1874             "push            %%"REG_b"              \n\t"
1875 #endif
1876             "pxor                %%mm7, %%mm7       \n\t"
1877             "push           %%"REG_BP"              \n\t" // we use 7 regs here ...
1878             "mov             %%"REG_a", %%"REG_BP"  \n\t"
1879             ".p2align                4              \n\t"
1880             "1:                                     \n\t"
1881             "movzwl   (%2, %%"REG_BP"), %%eax       \n\t"
1882             "movzwl  2(%2, %%"REG_BP"), %%ebx       \n\t"
1883             "movq  (%1, %%"REG_BP", 4), %%mm1       \n\t"
1884             "movq 8(%1, %%"REG_BP", 4), %%mm3       \n\t"
1885             "movd      (%3, %%"REG_a"), %%mm0       \n\t"
1886             "movd      (%3, %%"REG_b"), %%mm2       \n\t"
1887             "punpcklbw           %%mm7, %%mm0       \n\t"
1888             "punpcklbw           %%mm7, %%mm2       \n\t"
1889             "pmaddwd             %%mm1, %%mm0       \n\t"
1890             "pmaddwd             %%mm2, %%mm3       \n\t"
1891             "movq                %%mm0, %%mm4       \n\t"
1892             "punpckldq           %%mm3, %%mm0       \n\t"
1893             "punpckhdq           %%mm3, %%mm4       \n\t"
1894             "paddd               %%mm4, %%mm0       \n\t"
1895             "psrad                  $7, %%mm0       \n\t"
1896             "packssdw            %%mm0, %%mm0       \n\t"
1897             "movd                %%mm0, (%4, %%"REG_BP")    \n\t"
1898             "add                    $4, %%"REG_BP"  \n\t"
1899             " jnc                   1b              \n\t"
1900
1901             "pop            %%"REG_BP"              \n\t"
1902 #if defined(PIC)
1903             "pop             %%"REG_b"              \n\t"
1904 #endif
1905             : "+a" (counter)
1906             : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
1907 #if !defined(PIC)
1908             : "%"REG_b
1909 #endif
1910         );
1911     } else if (filterSize==8) {
1912         x86_reg counter= -2*dstW;
1913         filter-= counter*4;
1914         filterPos-= counter/2;
1915         dst-= counter/2;
1916         __asm__ volatile(
1917 #if defined(PIC)
1918             "push             %%"REG_b"             \n\t"
1919 #endif
1920             "pxor                 %%mm7, %%mm7      \n\t"
1921             "push            %%"REG_BP"             \n\t" // we use 7 regs here ...
1922             "mov              %%"REG_a", %%"REG_BP" \n\t"
1923             ".p2align                 4             \n\t"
1924             "1:                                     \n\t"
1925             "movzwl    (%2, %%"REG_BP"), %%eax      \n\t"
1926             "movzwl   2(%2, %%"REG_BP"), %%ebx      \n\t"
1927             "movq   (%1, %%"REG_BP", 8), %%mm1      \n\t"
1928             "movq 16(%1, %%"REG_BP", 8), %%mm3      \n\t"
1929             "movd       (%3, %%"REG_a"), %%mm0      \n\t"
1930             "movd       (%3, %%"REG_b"), %%mm2      \n\t"
1931             "punpcklbw            %%mm7, %%mm0      \n\t"
1932             "punpcklbw            %%mm7, %%mm2      \n\t"
1933             "pmaddwd              %%mm1, %%mm0      \n\t"
1934             "pmaddwd              %%mm2, %%mm3      \n\t"
1935
1936             "movq  8(%1, %%"REG_BP", 8), %%mm1      \n\t"
1937             "movq 24(%1, %%"REG_BP", 8), %%mm5      \n\t"
1938             "movd      4(%3, %%"REG_a"), %%mm4      \n\t"
1939             "movd      4(%3, %%"REG_b"), %%mm2      \n\t"
1940             "punpcklbw            %%mm7, %%mm4      \n\t"
1941             "punpcklbw            %%mm7, %%mm2      \n\t"
1942             "pmaddwd              %%mm1, %%mm4      \n\t"
1943             "pmaddwd              %%mm2, %%mm5      \n\t"
1944             "paddd                %%mm4, %%mm0      \n\t"
1945             "paddd                %%mm5, %%mm3      \n\t"
1946             "movq                 %%mm0, %%mm4      \n\t"
1947             "punpckldq            %%mm3, %%mm0      \n\t"
1948             "punpckhdq            %%mm3, %%mm4      \n\t"
1949             "paddd                %%mm4, %%mm0      \n\t"
1950             "psrad                   $7, %%mm0      \n\t"
1951             "packssdw             %%mm0, %%mm0      \n\t"
1952             "movd                 %%mm0, (%4, %%"REG_BP")   \n\t"
1953             "add                     $4, %%"REG_BP" \n\t"
1954             " jnc                    1b             \n\t"
1955
1956             "pop             %%"REG_BP"             \n\t"
1957 #if defined(PIC)
1958             "pop              %%"REG_b"             \n\t"
1959 #endif
1960             : "+a" (counter)
1961             : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
1962 #if !defined(PIC)
1963             : "%"REG_b
1964 #endif
1965         );
1966     } else {
1967         const uint8_t *offset = src+filterSize;
1968         x86_reg counter= -2*dstW;
1969         //filter-= counter*filterSize/2;
1970         filterPos-= counter/2;
1971         dst-= counter/2;
1972         __asm__ volatile(
1973             "pxor                  %%mm7, %%mm7     \n\t"
1974             ".p2align                  4            \n\t"
1975             "1:                                     \n\t"
1976             "mov                      %2, %%"REG_c" \n\t"
1977             "movzwl      (%%"REG_c", %0), %%eax     \n\t"
1978             "movzwl     2(%%"REG_c", %0), %%edx     \n\t"
1979             "mov                      %5, %%"REG_c" \n\t"
1980             "pxor                  %%mm4, %%mm4     \n\t"
1981             "pxor                  %%mm5, %%mm5     \n\t"
1982             "2:                                     \n\t"
1983             "movq                   (%1), %%mm1     \n\t"
1984             "movq               (%1, %6), %%mm3     \n\t"
1985             "movd (%%"REG_c", %%"REG_a"), %%mm0     \n\t"
1986             "movd (%%"REG_c", %%"REG_d"), %%mm2     \n\t"
1987             "punpcklbw             %%mm7, %%mm0     \n\t"
1988             "punpcklbw             %%mm7, %%mm2     \n\t"
1989             "pmaddwd               %%mm1, %%mm0     \n\t"
1990             "pmaddwd               %%mm2, %%mm3     \n\t"
1991             "paddd                 %%mm3, %%mm5     \n\t"
1992             "paddd                 %%mm0, %%mm4     \n\t"
1993             "add                      $8, %1        \n\t"
1994             "add                      $4, %%"REG_c" \n\t"
1995             "cmp                      %4, %%"REG_c" \n\t"
1996             " jb                      2b            \n\t"
1997             "add                      %6, %1        \n\t"
1998             "movq                  %%mm4, %%mm0     \n\t"
1999             "punpckldq             %%mm5, %%mm4     \n\t"
2000             "punpckhdq             %%mm5, %%mm0     \n\t"
2001             "paddd                 %%mm0, %%mm4     \n\t"
2002             "psrad                    $7, %%mm4     \n\t"
2003             "packssdw              %%mm4, %%mm4     \n\t"
2004             "mov                      %3, %%"REG_a" \n\t"
2005             "movd                  %%mm4, (%%"REG_a", %0)   \n\t"
2006             "add                      $4, %0        \n\t"
2007             " jnc                     1b            \n\t"
2008
2009             : "+r" (counter), "+r" (filter)
2010             : "m" (filterPos), "m" (dst), "m"(offset),
2011             "m" (src), "r" ((x86_reg)filterSize*2)
2012             : "%"REG_a, "%"REG_c, "%"REG_d
2013         );
2014     }
2015 }
2016 #endif /* !COMPILE_TEMPLATE_MMX2 */
2017
2018 #if COMPILE_TEMPLATE_MMX2
2019 static void RENAME(hyscale_fast)(SwsContext *c, int16_t *dst,
2020                                  int dstWidth, const uint8_t *src,
2021                                  int srcW, int xInc)
2022 {
2023     int16_t *filterPos = c->hLumFilterPos;
2024     int16_t *filter    = c->hLumFilter;
2025     void    *mmx2FilterCode= c->lumMmx2FilterCode;
2026     int i;
2027 #if defined(PIC)
2028     DECLARE_ALIGNED(8, uint64_t, ebxsave);
2029 #endif
2030
2031     __asm__ volatile(
2032 #if defined(PIC)
2033         "mov               %%"REG_b", %5        \n\t"
2034 #endif
2035         "pxor                  %%mm7, %%mm7     \n\t"
2036         "mov                      %0, %%"REG_c" \n\t"
2037         "mov                      %1, %%"REG_D" \n\t"
2038         "mov                      %2, %%"REG_d" \n\t"
2039         "mov                      %3, %%"REG_b" \n\t"
2040         "xor               %%"REG_a", %%"REG_a" \n\t" // i
2041         PREFETCH"        (%%"REG_c")            \n\t"
2042         PREFETCH"      32(%%"REG_c")            \n\t"
2043         PREFETCH"      64(%%"REG_c")            \n\t"
2044
2045 #if ARCH_X86_64
2046 #define CALL_MMX2_FILTER_CODE \
2047         "movl            (%%"REG_b"), %%esi     \n\t"\
2048         "call                    *%4            \n\t"\
2049         "movl (%%"REG_b", %%"REG_a"), %%esi     \n\t"\
2050         "add               %%"REG_S", %%"REG_c" \n\t"\
2051         "add               %%"REG_a", %%"REG_D" \n\t"\
2052         "xor               %%"REG_a", %%"REG_a" \n\t"\
2053
2054 #else
2055 #define CALL_MMX2_FILTER_CODE \
2056         "movl (%%"REG_b"), %%esi        \n\t"\
2057         "call         *%4                       \n\t"\
2058         "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
2059         "add               %%"REG_a", %%"REG_D" \n\t"\
2060         "xor               %%"REG_a", %%"REG_a" \n\t"\
2061
2062 #endif /* ARCH_X86_64 */
2063
2064         CALL_MMX2_FILTER_CODE
2065         CALL_MMX2_FILTER_CODE
2066         CALL_MMX2_FILTER_CODE
2067         CALL_MMX2_FILTER_CODE
2068         CALL_MMX2_FILTER_CODE
2069         CALL_MMX2_FILTER_CODE
2070         CALL_MMX2_FILTER_CODE
2071         CALL_MMX2_FILTER_CODE
2072
2073 #if defined(PIC)
2074         "mov                      %5, %%"REG_b" \n\t"
2075 #endif
2076         :: "m" (src), "m" (dst), "m" (filter), "m" (filterPos),
2077            "m" (mmx2FilterCode)
2078 #if defined(PIC)
2079           ,"m" (ebxsave)
2080 #endif
2081         : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
2082 #if !defined(PIC)
2083          ,"%"REG_b
2084 #endif
2085     );
2086
2087     for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
2088         dst[i] = src[srcW-1]*128;
2089 }
2090
2091 static void RENAME(hcscale_fast)(SwsContext *c, int16_t *dst1, int16_t *dst2,
2092                                  int dstWidth, const uint8_t *src1,
2093                                  const uint8_t *src2, int srcW, int xInc)
2094 {
2095     int16_t *filterPos = c->hChrFilterPos;
2096     int16_t *filter    = c->hChrFilter;
2097     void    *mmx2FilterCode= c->chrMmx2FilterCode;
2098     int i;
2099 #if defined(PIC)
2100     DECLARE_ALIGNED(8, uint64_t, ebxsave);
2101 #endif
2102
2103     __asm__ volatile(
2104 #if defined(PIC)
2105         "mov          %%"REG_b", %7         \n\t"
2106 #endif
2107         "pxor             %%mm7, %%mm7      \n\t"
2108         "mov                 %0, %%"REG_c"  \n\t"
2109         "mov                 %1, %%"REG_D"  \n\t"
2110         "mov                 %2, %%"REG_d"  \n\t"
2111         "mov                 %3, %%"REG_b"  \n\t"
2112         "xor          %%"REG_a", %%"REG_a"  \n\t" // i
2113         PREFETCH"   (%%"REG_c")             \n\t"
2114         PREFETCH" 32(%%"REG_c")             \n\t"
2115         PREFETCH" 64(%%"REG_c")             \n\t"
2116
2117         CALL_MMX2_FILTER_CODE
2118         CALL_MMX2_FILTER_CODE
2119         CALL_MMX2_FILTER_CODE
2120         CALL_MMX2_FILTER_CODE
2121         "xor          %%"REG_a", %%"REG_a"  \n\t" // i
2122         "mov                 %5, %%"REG_c"  \n\t" // src
2123         "mov                 %6, %%"REG_D"  \n\t" // buf2
2124         PREFETCH"   (%%"REG_c")             \n\t"
2125         PREFETCH" 32(%%"REG_c")             \n\t"
2126         PREFETCH" 64(%%"REG_c")             \n\t"
2127
2128         CALL_MMX2_FILTER_CODE
2129         CALL_MMX2_FILTER_CODE
2130         CALL_MMX2_FILTER_CODE
2131         CALL_MMX2_FILTER_CODE
2132
2133 #if defined(PIC)
2134         "mov %7, %%"REG_b"    \n\t"
2135 #endif
2136         :: "m" (src1), "m" (dst1), "m" (filter), "m" (filterPos),
2137            "m" (mmx2FilterCode), "m" (src2), "m"(dst2)
2138 #if defined(PIC)
2139           ,"m" (ebxsave)
2140 #endif
2141         : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
2142 #if !defined(PIC)
2143          ,"%"REG_b
2144 #endif
2145     );
2146
2147     for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) {
2148         dst1[i] = src1[srcW-1]*128;
2149         dst2[i] = src2[srcW-1]*128;
2150     }
2151 }
2152 #endif /* COMPILE_TEMPLATE_MMX2 */
2153
2154 static av_cold void RENAME(sws_init_swScale)(SwsContext *c)
2155 {
2156     enum PixelFormat srcFormat = c->srcFormat,
2157                      dstFormat = c->dstFormat;
2158
2159     if (!is16BPS(dstFormat) && !is9_OR_10BPS(dstFormat) &&
2160         dstFormat != PIX_FMT_NV12 && dstFormat != PIX_FMT_NV21) {
2161         if (!(c->flags & SWS_BITEXACT)) {
2162             if (c->flags & SWS_ACCURATE_RND) {
2163                 c->yuv2yuv1 = RENAME(yuv2yuv1_ar    );
2164                 c->yuv2yuvX = RENAME(yuv2yuvX_ar    );
2165                 if (!(c->flags & SWS_FULL_CHR_H_INT)) {
2166                     switch (c->dstFormat) {
2167                     case PIX_FMT_RGB32:   c->yuv2packedX = RENAME(yuv2rgb32_X_ar);   break;
2168                     case PIX_FMT_BGR24:   c->yuv2packedX = RENAME(yuv2bgr24_X_ar);   break;
2169                     case PIX_FMT_RGB555:  c->yuv2packedX = RENAME(yuv2rgb555_X_ar);  break;
2170                     case PIX_FMT_RGB565:  c->yuv2packedX = RENAME(yuv2rgb565_X_ar);  break;
2171                     case PIX_FMT_YUYV422: c->yuv2packedX = RENAME(yuv2yuyv422_X_ar); break;
2172                     default: break;
2173                     }
2174                 }
2175             } else {
2176                 c->yuv2yuv1 = RENAME(yuv2yuv1    );
2177                 c->yuv2yuvX = RENAME(yuv2yuvX    );
2178                 if (!(c->flags & SWS_FULL_CHR_H_INT)) {
2179                     switch (c->dstFormat) {
2180                     case PIX_FMT_RGB32:   c->yuv2packedX = RENAME(yuv2rgb32_X);   break;
2181                     case PIX_FMT_BGR24:   c->yuv2packedX = RENAME(yuv2bgr24_X);   break;
2182                     case PIX_FMT_RGB555:  c->yuv2packedX = RENAME(yuv2rgb555_X);  break;
2183                     case PIX_FMT_RGB565:  c->yuv2packedX = RENAME(yuv2rgb565_X);  break;
2184                     case PIX_FMT_YUYV422: c->yuv2packedX = RENAME(yuv2yuyv422_X); break;
2185                     default: break;
2186                     }
2187                 }
2188             }
2189         }
2190         if (!(c->flags & SWS_FULL_CHR_H_INT)) {
2191             switch (c->dstFormat) {
2192             case PIX_FMT_RGB32:
2193                 c->yuv2packed1 = RENAME(yuv2rgb32_1);
2194                 c->yuv2packed2 = RENAME(yuv2rgb32_2);
2195                 break;
2196             case PIX_FMT_BGR24:
2197                 c->yuv2packed1 = RENAME(yuv2bgr24_1);
2198                 c->yuv2packed2 = RENAME(yuv2bgr24_2);
2199                 break;
2200             case PIX_FMT_RGB555:
2201                 c->yuv2packed1 = RENAME(yuv2rgb555_1);
2202                 c->yuv2packed2 = RENAME(yuv2rgb555_2);
2203                 break;
2204             case PIX_FMT_RGB565:
2205                 c->yuv2packed1 = RENAME(yuv2rgb565_1);
2206                 c->yuv2packed2 = RENAME(yuv2rgb565_2);
2207                 break;
2208             case PIX_FMT_YUYV422:
2209                 c->yuv2packed1 = RENAME(yuv2yuyv422_1);
2210                 c->yuv2packed2 = RENAME(yuv2yuyv422_2);
2211                 break;
2212             default:
2213                 break;
2214             }
2215         }
2216     }
2217
2218     if (c->scalingBpp == 8) {
2219 #if !COMPILE_TEMPLATE_MMX2
2220     c->hScale       = RENAME(hScale      );
2221 #endif /* !COMPILE_TEMPLATE_MMX2 */
2222
2223     // Use the new MMX scaler if the MMX2 one can't be used (it is faster than the x86 ASM one).
2224 #if COMPILE_TEMPLATE_MMX2
2225     if (c->flags & SWS_FAST_BILINEAR && c->canMMX2BeUsed)
2226     {
2227         c->hyscale_fast = RENAME(hyscale_fast);
2228         c->hcscale_fast = RENAME(hcscale_fast);
2229     } else {
2230 #endif /* COMPILE_TEMPLATE_MMX2 */
2231         c->hyscale_fast = NULL;
2232         c->hcscale_fast = NULL;
2233 #if COMPILE_TEMPLATE_MMX2
2234     }
2235 #endif /* COMPILE_TEMPLATE_MMX2 */
2236     }
2237
2238 #if !COMPILE_TEMPLATE_MMX2
2239     switch(srcFormat) {
2240         case PIX_FMT_YUYV422  : c->chrToYV12 = RENAME(yuy2ToUV); break;
2241         case PIX_FMT_UYVY422  : c->chrToYV12 = RENAME(uyvyToUV); break;
2242         case PIX_FMT_NV12     : c->chrToYV12 = RENAME(nv12ToUV); break;
2243         case PIX_FMT_NV21     : c->chrToYV12 = RENAME(nv21ToUV); break;
2244         default: break;
2245     }
2246 #endif /* !COMPILE_TEMPLATE_MMX2 */
2247     if (!c->chrSrcHSubSample) {
2248         switch(srcFormat) {
2249         case PIX_FMT_BGR24  : c->chrToYV12 = RENAME(bgr24ToUV); break;
2250         case PIX_FMT_RGB24  : c->chrToYV12 = RENAME(rgb24ToUV); break;
2251         default: break;
2252         }
2253     }
2254
2255     switch (srcFormat) {
2256 #if !COMPILE_TEMPLATE_MMX2
2257     case PIX_FMT_YUYV422  :
2258     case PIX_FMT_Y400A    : c->lumToYV12 = RENAME(yuy2ToY); break;
2259     case PIX_FMT_UYVY422  : c->lumToYV12 = RENAME(uyvyToY); break;
2260 #endif /* !COMPILE_TEMPLATE_MMX2 */
2261     case PIX_FMT_BGR24    : c->lumToYV12 = RENAME(bgr24ToY); break;
2262     case PIX_FMT_RGB24    : c->lumToYV12 = RENAME(rgb24ToY); break;
2263     default: break;
2264     }
2265 #if !COMPILE_TEMPLATE_MMX2
2266     if (c->alpPixBuf) {
2267         switch (srcFormat) {
2268         case PIX_FMT_Y400A  : c->alpToYV12 = RENAME(yuy2ToY); break;
2269         default: break;
2270         }
2271     }
2272 #endif /* !COMPILE_TEMPLATE_MMX2 */
2273 }