swscale: remove if(canMMX2BeUsed) conditional.
[ffmpeg.git] / libswscale / x86 / swscale_template.c
1 /*
2  * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of Libav.
5  *
6  * Libav is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * Libav is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with Libav; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 #include "swscale_template.h"
22
23 #undef REAL_MOVNTQ
24 #undef MOVNTQ
25 #undef PREFETCH
26
27 #if COMPILE_TEMPLATE_MMX2
28 #define PREFETCH "prefetchnta"
29 #else
30 #define PREFETCH  " # nop"
31 #endif
32
33 #if COMPILE_TEMPLATE_MMX2
34 #define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
35 #else
36 #define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
37 #endif
38 #define MOVNTQ(a,b)  REAL_MOVNTQ(a,b)
39
40 #define YSCALEYUV2YV12X(x, offset, dest, width) \
41     __asm__ volatile(\
42         "xor                          %%"REG_a", %%"REG_a"  \n\t"\
43         "movq             "VROUNDER_OFFSET"(%0), %%mm3      \n\t"\
44         "movq                             %%mm3, %%mm4      \n\t"\
45         "lea                     " offset "(%0), %%"REG_d"  \n\t"\
46         "mov                        (%%"REG_d"), %%"REG_S"  \n\t"\
47         ".p2align                             4             \n\t" /* FIXME Unroll? */\
48         "1:                                                 \n\t"\
49         "movq                      8(%%"REG_d"), %%mm0      \n\t" /* filterCoeff */\
50         "movq   "  x "(%%"REG_S", %%"REG_a", 2), %%mm2      \n\t" /* srcData */\
51         "movq 8+"  x "(%%"REG_S", %%"REG_a", 2), %%mm5      \n\t" /* srcData */\
52         "add                                $16, %%"REG_d"  \n\t"\
53         "mov                        (%%"REG_d"), %%"REG_S"  \n\t"\
54         "test                         %%"REG_S", %%"REG_S"  \n\t"\
55         "pmulhw                           %%mm0, %%mm2      \n\t"\
56         "pmulhw                           %%mm0, %%mm5      \n\t"\
57         "paddw                            %%mm2, %%mm3      \n\t"\
58         "paddw                            %%mm5, %%mm4      \n\t"\
59         " jnz                                1b             \n\t"\
60         "psraw                               $3, %%mm3      \n\t"\
61         "psraw                               $3, %%mm4      \n\t"\
62         "packuswb                         %%mm4, %%mm3      \n\t"\
63         MOVNTQ(%%mm3, (%1, %%REGa))\
64         "add                                 $8, %%"REG_a"  \n\t"\
65         "cmp                                 %2, %%"REG_a"  \n\t"\
66         "movq             "VROUNDER_OFFSET"(%0), %%mm3      \n\t"\
67         "movq                             %%mm3, %%mm4      \n\t"\
68         "lea                     " offset "(%0), %%"REG_d"  \n\t"\
69         "mov                        (%%"REG_d"), %%"REG_S"  \n\t"\
70         "jb                                  1b             \n\t"\
71         :: "r" (&c->redDither),\
72         "r" (dest), "g" ((x86_reg)width)\
73         : "%"REG_a, "%"REG_d, "%"REG_S\
74     );
75
76 #define YSCALEYUV2YV12X_ACCURATE(x, offset, dest, width) \
77     __asm__ volatile(\
78         "lea                     " offset "(%0), %%"REG_d"  \n\t"\
79         "xor                          %%"REG_a", %%"REG_a"  \n\t"\
80         "pxor                             %%mm4, %%mm4      \n\t"\
81         "pxor                             %%mm5, %%mm5      \n\t"\
82         "pxor                             %%mm6, %%mm6      \n\t"\
83         "pxor                             %%mm7, %%mm7      \n\t"\
84         "mov                        (%%"REG_d"), %%"REG_S"  \n\t"\
85         ".p2align                             4             \n\t"\
86         "1:                                                 \n\t"\
87         "movq   "  x "(%%"REG_S", %%"REG_a", 2), %%mm0      \n\t" /* srcData */\
88         "movq 8+"  x "(%%"REG_S", %%"REG_a", 2), %%mm2      \n\t" /* srcData */\
89         "mov        "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S"  \n\t"\
90         "movq   "  x "(%%"REG_S", %%"REG_a", 2), %%mm1      \n\t" /* srcData */\
91         "movq                             %%mm0, %%mm3      \n\t"\
92         "punpcklwd                        %%mm1, %%mm0      \n\t"\
93         "punpckhwd                        %%mm1, %%mm3      \n\t"\
94         "movq       "STR(APCK_COEF)"(%%"REG_d"), %%mm1      \n\t" /* filterCoeff */\
95         "pmaddwd                          %%mm1, %%mm0      \n\t"\
96         "pmaddwd                          %%mm1, %%mm3      \n\t"\
97         "paddd                            %%mm0, %%mm4      \n\t"\
98         "paddd                            %%mm3, %%mm5      \n\t"\
99         "movq 8+"  x "(%%"REG_S", %%"REG_a", 2), %%mm3      \n\t" /* srcData */\
100         "mov        "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S"  \n\t"\
101         "add                  $"STR(APCK_SIZE)", %%"REG_d"  \n\t"\
102         "test                         %%"REG_S", %%"REG_S"  \n\t"\
103         "movq                             %%mm2, %%mm0      \n\t"\
104         "punpcklwd                        %%mm3, %%mm2      \n\t"\
105         "punpckhwd                        %%mm3, %%mm0      \n\t"\
106         "pmaddwd                          %%mm1, %%mm2      \n\t"\
107         "pmaddwd                          %%mm1, %%mm0      \n\t"\
108         "paddd                            %%mm2, %%mm6      \n\t"\
109         "paddd                            %%mm0, %%mm7      \n\t"\
110         " jnz                                1b             \n\t"\
111         "psrad                              $16, %%mm4      \n\t"\
112         "psrad                              $16, %%mm5      \n\t"\
113         "psrad                              $16, %%mm6      \n\t"\
114         "psrad                              $16, %%mm7      \n\t"\
115         "movq             "VROUNDER_OFFSET"(%0), %%mm0      \n\t"\
116         "packssdw                         %%mm5, %%mm4      \n\t"\
117         "packssdw                         %%mm7, %%mm6      \n\t"\
118         "paddw                            %%mm0, %%mm4      \n\t"\
119         "paddw                            %%mm0, %%mm6      \n\t"\
120         "psraw                               $3, %%mm4      \n\t"\
121         "psraw                               $3, %%mm6      \n\t"\
122         "packuswb                         %%mm6, %%mm4      \n\t"\
123         MOVNTQ(%%mm4, (%1, %%REGa))\
124         "add                                 $8, %%"REG_a"  \n\t"\
125         "cmp                                 %2, %%"REG_a"  \n\t"\
126         "lea                     " offset "(%0), %%"REG_d"  \n\t"\
127         "pxor                             %%mm4, %%mm4      \n\t"\
128         "pxor                             %%mm5, %%mm5      \n\t"\
129         "pxor                             %%mm6, %%mm6      \n\t"\
130         "pxor                             %%mm7, %%mm7      \n\t"\
131         "mov                        (%%"REG_d"), %%"REG_S"  \n\t"\
132         "jb                                  1b             \n\t"\
133         :: "r" (&c->redDither),\
134         "r" (dest), "g" ((x86_reg)width)\
135         : "%"REG_a, "%"REG_d, "%"REG_S\
136     );
137
138 #define YSCALEYUV2YV121 \
139     "mov %2, %%"REG_a"                    \n\t"\
140     ".p2align               4             \n\t" /* FIXME Unroll? */\
141     "1:                                   \n\t"\
142     "movq  (%0, %%"REG_a", 2), %%mm0      \n\t"\
143     "movq 8(%0, %%"REG_a", 2), %%mm1      \n\t"\
144     "psraw                 $7, %%mm0      \n\t"\
145     "psraw                 $7, %%mm1      \n\t"\
146     "packuswb           %%mm1, %%mm0      \n\t"\
147     MOVNTQ(%%mm0, (%1, %%REGa))\
148     "add                   $8, %%"REG_a"  \n\t"\
149     "jnc                   1b             \n\t"
150
151 #define YSCALEYUV2YV121_ACCURATE \
152     "mov %2, %%"REG_a"                    \n\t"\
153     "pcmpeqw %%mm7, %%mm7                 \n\t"\
154     "psrlw                 $15, %%mm7     \n\t"\
155     "psllw                  $6, %%mm7     \n\t"\
156     ".p2align                4            \n\t" /* FIXME Unroll? */\
157     "1:                                   \n\t"\
158     "movq  (%0, %%"REG_a", 2), %%mm0      \n\t"\
159     "movq 8(%0, %%"REG_a", 2), %%mm1      \n\t"\
160     "paddsw             %%mm7, %%mm0      \n\t"\
161     "paddsw             %%mm7, %%mm1      \n\t"\
162     "psraw                 $7, %%mm0      \n\t"\
163     "psraw                 $7, %%mm1      \n\t"\
164     "packuswb           %%mm1, %%mm0      \n\t"\
165     MOVNTQ(%%mm0, (%1, %%REGa))\
166     "add                   $8, %%"REG_a"  \n\t"\
167     "jnc                   1b             \n\t"
168
169 /*
170     :: "m" (-lumFilterSize), "m" (-chrFilterSize),
171        "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
172        "r" (dest), "m" (dstW_reg),
173        "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
174     : "%eax", "%ebx", "%ecx", "%edx", "%esi"
175 */
176 #define YSCALEYUV2PACKEDX_UV \
177     __asm__ volatile(\
178         "xor                   %%"REG_a", %%"REG_a"     \n\t"\
179         ".p2align                      4                \n\t"\
180         "nop                                            \n\t"\
181         "1:                                             \n\t"\
182         "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d"     \n\t"\
183         "mov                 (%%"REG_d"), %%"REG_S"     \n\t"\
184         "movq      "VROUNDER_OFFSET"(%0), %%mm3         \n\t"\
185         "movq                      %%mm3, %%mm4         \n\t"\
186         ".p2align                      4                \n\t"\
187         "2:                                             \n\t"\
188         "movq               8(%%"REG_d"), %%mm0         \n\t" /* filterCoeff */\
189         "movq     (%%"REG_S", %%"REG_a"), %%mm2         \n\t" /* UsrcData */\
190         "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm5         \n\t" /* VsrcData */\
191         "add                         $16, %%"REG_d"     \n\t"\
192         "mov                 (%%"REG_d"), %%"REG_S"     \n\t"\
193         "pmulhw                    %%mm0, %%mm2         \n\t"\
194         "pmulhw                    %%mm0, %%mm5         \n\t"\
195         "paddw                     %%mm2, %%mm3         \n\t"\
196         "paddw                     %%mm5, %%mm4         \n\t"\
197         "test                  %%"REG_S", %%"REG_S"     \n\t"\
198         " jnz                         2b                \n\t"\
199
200 #define YSCALEYUV2PACKEDX_YA(offset,coeff,src1,src2,dst1,dst2) \
201     "lea                "offset"(%0), %%"REG_d"     \n\t"\
202     "mov                 (%%"REG_d"), %%"REG_S"     \n\t"\
203     "movq      "VROUNDER_OFFSET"(%0), "#dst1"       \n\t"\
204     "movq                    "#dst1", "#dst2"       \n\t"\
205     ".p2align                      4                \n\t"\
206     "2:                                             \n\t"\
207     "movq               8(%%"REG_d"), "#coeff"      \n\t" /* filterCoeff */\
208     "movq  (%%"REG_S", %%"REG_a", 2), "#src1"       \n\t" /* Y1srcData */\
209     "movq 8(%%"REG_S", %%"REG_a", 2), "#src2"       \n\t" /* Y2srcData */\
210     "add                         $16, %%"REG_d"            \n\t"\
211     "mov                 (%%"REG_d"), %%"REG_S"     \n\t"\
212     "pmulhw                 "#coeff", "#src1"       \n\t"\
213     "pmulhw                 "#coeff", "#src2"       \n\t"\
214     "paddw                   "#src1", "#dst1"       \n\t"\
215     "paddw                   "#src2", "#dst2"       \n\t"\
216     "test                  %%"REG_S", %%"REG_S"     \n\t"\
217     " jnz                         2b                \n\t"\
218
219 #define YSCALEYUV2PACKEDX \
220     YSCALEYUV2PACKEDX_UV \
221     YSCALEYUV2PACKEDX_YA(LUM_MMX_FILTER_OFFSET,%%mm0,%%mm2,%%mm5,%%mm1,%%mm7) \
222
223 #define YSCALEYUV2PACKEDX_END                     \
224         :: "r" (&c->redDither),                   \
225             "m" (dummy), "m" (dummy), "m" (dummy),\
226             "r" (dest), "m" (dstW_reg)            \
227         : "%"REG_a, "%"REG_d, "%"REG_S            \
228     );
229
230 #define YSCALEYUV2PACKEDX_ACCURATE_UV \
231     __asm__ volatile(\
232         "xor %%"REG_a", %%"REG_a"                       \n\t"\
233         ".p2align                      4                \n\t"\
234         "nop                                            \n\t"\
235         "1:                                             \n\t"\
236         "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d"     \n\t"\
237         "mov                 (%%"REG_d"), %%"REG_S"     \n\t"\
238         "pxor                      %%mm4, %%mm4         \n\t"\
239         "pxor                      %%mm5, %%mm5         \n\t"\
240         "pxor                      %%mm6, %%mm6         \n\t"\
241         "pxor                      %%mm7, %%mm7         \n\t"\
242         ".p2align                      4                \n\t"\
243         "2:                                             \n\t"\
244         "movq     (%%"REG_S", %%"REG_a"), %%mm0         \n\t" /* UsrcData */\
245         "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm2         \n\t" /* VsrcData */\
246         "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S"     \n\t"\
247         "movq     (%%"REG_S", %%"REG_a"), %%mm1         \n\t" /* UsrcData */\
248         "movq                      %%mm0, %%mm3         \n\t"\
249         "punpcklwd                 %%mm1, %%mm0         \n\t"\
250         "punpckhwd                 %%mm1, %%mm3         \n\t"\
251         "movq "STR(APCK_COEF)"(%%"REG_d"),%%mm1         \n\t" /* filterCoeff */\
252         "pmaddwd                   %%mm1, %%mm0         \n\t"\
253         "pmaddwd                   %%mm1, %%mm3         \n\t"\
254         "paddd                     %%mm0, %%mm4         \n\t"\
255         "paddd                     %%mm3, %%mm5         \n\t"\
256         "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm3         \n\t" /* VsrcData */\
257         "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S"     \n\t"\
258         "add           $"STR(APCK_SIZE)", %%"REG_d"     \n\t"\
259         "test                  %%"REG_S", %%"REG_S"     \n\t"\
260         "movq                      %%mm2, %%mm0         \n\t"\
261         "punpcklwd                 %%mm3, %%mm2         \n\t"\
262         "punpckhwd                 %%mm3, %%mm0         \n\t"\
263         "pmaddwd                   %%mm1, %%mm2         \n\t"\
264         "pmaddwd                   %%mm1, %%mm0         \n\t"\
265         "paddd                     %%mm2, %%mm6         \n\t"\
266         "paddd                     %%mm0, %%mm7         \n\t"\
267         " jnz                         2b                \n\t"\
268         "psrad                       $16, %%mm4         \n\t"\
269         "psrad                       $16, %%mm5         \n\t"\
270         "psrad                       $16, %%mm6         \n\t"\
271         "psrad                       $16, %%mm7         \n\t"\
272         "movq      "VROUNDER_OFFSET"(%0), %%mm0         \n\t"\
273         "packssdw                  %%mm5, %%mm4         \n\t"\
274         "packssdw                  %%mm7, %%mm6         \n\t"\
275         "paddw                     %%mm0, %%mm4         \n\t"\
276         "paddw                     %%mm0, %%mm6         \n\t"\
277         "movq                      %%mm4, "U_TEMP"(%0)  \n\t"\
278         "movq                      %%mm6, "V_TEMP"(%0)  \n\t"\
279
280 #define YSCALEYUV2PACKEDX_ACCURATE_YA(offset) \
281     "lea                "offset"(%0), %%"REG_d"     \n\t"\
282     "mov                 (%%"REG_d"), %%"REG_S"     \n\t"\
283     "pxor                      %%mm1, %%mm1         \n\t"\
284     "pxor                      %%mm5, %%mm5         \n\t"\
285     "pxor                      %%mm7, %%mm7         \n\t"\
286     "pxor                      %%mm6, %%mm6         \n\t"\
287     ".p2align                      4                \n\t"\
288     "2:                                             \n\t"\
289     "movq  (%%"REG_S", %%"REG_a", 2), %%mm0         \n\t" /* Y1srcData */\
290     "movq 8(%%"REG_S", %%"REG_a", 2), %%mm2         \n\t" /* Y2srcData */\
291     "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S"     \n\t"\
292     "movq  (%%"REG_S", %%"REG_a", 2), %%mm4         \n\t" /* Y1srcData */\
293     "movq                      %%mm0, %%mm3         \n\t"\
294     "punpcklwd                 %%mm4, %%mm0         \n\t"\
295     "punpckhwd                 %%mm4, %%mm3         \n\t"\
296     "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm4         \n\t" /* filterCoeff */\
297     "pmaddwd                   %%mm4, %%mm0         \n\t"\
298     "pmaddwd                   %%mm4, %%mm3         \n\t"\
299     "paddd                     %%mm0, %%mm1         \n\t"\
300     "paddd                     %%mm3, %%mm5         \n\t"\
301     "movq 8(%%"REG_S", %%"REG_a", 2), %%mm3         \n\t" /* Y2srcData */\
302     "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S"     \n\t"\
303     "add           $"STR(APCK_SIZE)", %%"REG_d"     \n\t"\
304     "test                  %%"REG_S", %%"REG_S"     \n\t"\
305     "movq                      %%mm2, %%mm0         \n\t"\
306     "punpcklwd                 %%mm3, %%mm2         \n\t"\
307     "punpckhwd                 %%mm3, %%mm0         \n\t"\
308     "pmaddwd                   %%mm4, %%mm2         \n\t"\
309     "pmaddwd                   %%mm4, %%mm0         \n\t"\
310     "paddd                     %%mm2, %%mm7         \n\t"\
311     "paddd                     %%mm0, %%mm6         \n\t"\
312     " jnz                         2b                \n\t"\
313     "psrad                       $16, %%mm1         \n\t"\
314     "psrad                       $16, %%mm5         \n\t"\
315     "psrad                       $16, %%mm7         \n\t"\
316     "psrad                       $16, %%mm6         \n\t"\
317     "movq      "VROUNDER_OFFSET"(%0), %%mm0         \n\t"\
318     "packssdw                  %%mm5, %%mm1         \n\t"\
319     "packssdw                  %%mm6, %%mm7         \n\t"\
320     "paddw                     %%mm0, %%mm1         \n\t"\
321     "paddw                     %%mm0, %%mm7         \n\t"\
322     "movq               "U_TEMP"(%0), %%mm3         \n\t"\
323     "movq               "V_TEMP"(%0), %%mm4         \n\t"\
324
325 #define YSCALEYUV2PACKEDX_ACCURATE \
326     YSCALEYUV2PACKEDX_ACCURATE_UV \
327     YSCALEYUV2PACKEDX_ACCURATE_YA(LUM_MMX_FILTER_OFFSET)
328
329 #define YSCALEYUV2RGBX \
330     "psubw  "U_OFFSET"(%0), %%mm3       \n\t" /* (U-128)8*/\
331     "psubw  "V_OFFSET"(%0), %%mm4       \n\t" /* (V-128)8*/\
332     "movq            %%mm3, %%mm2       \n\t" /* (U-128)8*/\
333     "movq            %%mm4, %%mm5       \n\t" /* (V-128)8*/\
334     "pmulhw "UG_COEFF"(%0), %%mm3       \n\t"\
335     "pmulhw "VG_COEFF"(%0), %%mm4       \n\t"\
336     /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
337     "pmulhw "UB_COEFF"(%0), %%mm2       \n\t"\
338     "pmulhw "VR_COEFF"(%0), %%mm5       \n\t"\
339     "psubw  "Y_OFFSET"(%0), %%mm1       \n\t" /* 8(Y-16)*/\
340     "psubw  "Y_OFFSET"(%0), %%mm7       \n\t" /* 8(Y-16)*/\
341     "pmulhw  "Y_COEFF"(%0), %%mm1       \n\t"\
342     "pmulhw  "Y_COEFF"(%0), %%mm7       \n\t"\
343     /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
344     "paddw           %%mm3, %%mm4       \n\t"\
345     "movq            %%mm2, %%mm0       \n\t"\
346     "movq            %%mm5, %%mm6       \n\t"\
347     "movq            %%mm4, %%mm3       \n\t"\
348     "punpcklwd       %%mm2, %%mm2       \n\t"\
349     "punpcklwd       %%mm5, %%mm5       \n\t"\
350     "punpcklwd       %%mm4, %%mm4       \n\t"\
351     "paddw           %%mm1, %%mm2       \n\t"\
352     "paddw           %%mm1, %%mm5       \n\t"\
353     "paddw           %%mm1, %%mm4       \n\t"\
354     "punpckhwd       %%mm0, %%mm0       \n\t"\
355     "punpckhwd       %%mm6, %%mm6       \n\t"\
356     "punpckhwd       %%mm3, %%mm3       \n\t"\
357     "paddw           %%mm7, %%mm0       \n\t"\
358     "paddw           %%mm7, %%mm6       \n\t"\
359     "paddw           %%mm7, %%mm3       \n\t"\
360     /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
361     "packuswb        %%mm0, %%mm2       \n\t"\
362     "packuswb        %%mm6, %%mm5       \n\t"\
363     "packuswb        %%mm3, %%mm4       \n\t"\
364
365 #define REAL_YSCALEYUV2PACKED(index, c) \
366     "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0              \n\t"\
367     "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1              \n\t"\
368     "psraw                $3, %%mm0                           \n\t"\
369     "psraw                $3, %%mm1                           \n\t"\
370     "movq              %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c") \n\t"\
371     "movq              %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c") \n\t"\
372     "xor            "#index", "#index"                        \n\t"\
373     ".p2align              4            \n\t"\
374     "1:                                 \n\t"\
375     "movq     (%2, "#index"), %%mm2     \n\t" /* uvbuf0[eax]*/\
376     "movq     (%3, "#index"), %%mm3     \n\t" /* uvbuf1[eax]*/\
377     "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5     \n\t" /* uvbuf0[eax+2048]*/\
378     "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4     \n\t" /* uvbuf1[eax+2048]*/\
379     "psubw             %%mm3, %%mm2     \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
380     "psubw             %%mm4, %%mm5     \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
381     "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0    \n\t"\
382     "pmulhw            %%mm0, %%mm2     \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
383     "pmulhw            %%mm0, %%mm5     \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
384     "psraw                $7, %%mm3     \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
385     "psraw                $7, %%mm4     \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
386     "paddw             %%mm2, %%mm3     \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
387     "paddw             %%mm5, %%mm4     \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
388     "movq  (%0, "#index", 2), %%mm0     \n\t" /*buf0[eax]*/\
389     "movq  (%1, "#index", 2), %%mm1     \n\t" /*buf1[eax]*/\
390     "movq 8(%0, "#index", 2), %%mm6     \n\t" /*buf0[eax]*/\
391     "movq 8(%1, "#index", 2), %%mm7     \n\t" /*buf1[eax]*/\
392     "psubw             %%mm1, %%mm0     \n\t" /* buf0[eax] - buf1[eax]*/\
393     "psubw             %%mm7, %%mm6     \n\t" /* buf0[eax] - buf1[eax]*/\
394     "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0  \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
395     "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6  \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
396     "psraw                $7, %%mm1     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
397     "psraw                $7, %%mm7     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
398     "paddw             %%mm0, %%mm1     \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
399     "paddw             %%mm6, %%mm7     \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
400
401 #define YSCALEYUV2PACKED(index, c)  REAL_YSCALEYUV2PACKED(index, c)
402
403 #define REAL_YSCALEYUV2RGB_UV(index, c) \
404     "xor            "#index", "#index"  \n\t"\
405     ".p2align              4            \n\t"\
406     "1:                                 \n\t"\
407     "movq     (%2, "#index"), %%mm2     \n\t" /* uvbuf0[eax]*/\
408     "movq     (%3, "#index"), %%mm3     \n\t" /* uvbuf1[eax]*/\
409     "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5     \n\t" /* uvbuf0[eax+2048]*/\
410     "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4     \n\t" /* uvbuf1[eax+2048]*/\
411     "psubw             %%mm3, %%mm2     \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
412     "psubw             %%mm4, %%mm5     \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
413     "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0    \n\t"\
414     "pmulhw            %%mm0, %%mm2     \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
415     "pmulhw            %%mm0, %%mm5     \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
416     "psraw                $4, %%mm3     \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
417     "psraw                $4, %%mm4     \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
418     "paddw             %%mm2, %%mm3     \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
419     "paddw             %%mm5, %%mm4     \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
420     "psubw  "U_OFFSET"("#c"), %%mm3     \n\t" /* (U-128)8*/\
421     "psubw  "V_OFFSET"("#c"), %%mm4     \n\t" /* (V-128)8*/\
422     "movq              %%mm3, %%mm2     \n\t" /* (U-128)8*/\
423     "movq              %%mm4, %%mm5     \n\t" /* (V-128)8*/\
424     "pmulhw "UG_COEFF"("#c"), %%mm3     \n\t"\
425     "pmulhw "VG_COEFF"("#c"), %%mm4     \n\t"\
426     /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
427
428 #define REAL_YSCALEYUV2RGB_YA(index, c, b1, b2) \
429     "movq  ("#b1", "#index", 2), %%mm0     \n\t" /*buf0[eax]*/\
430     "movq  ("#b2", "#index", 2), %%mm1     \n\t" /*buf1[eax]*/\
431     "movq 8("#b1", "#index", 2), %%mm6     \n\t" /*buf0[eax]*/\
432     "movq 8("#b2", "#index", 2), %%mm7     \n\t" /*buf1[eax]*/\
433     "psubw             %%mm1, %%mm0     \n\t" /* buf0[eax] - buf1[eax]*/\
434     "psubw             %%mm7, %%mm6     \n\t" /* buf0[eax] - buf1[eax]*/\
435     "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0  \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
436     "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6  \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
437     "psraw                $4, %%mm1     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
438     "psraw                $4, %%mm7     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
439     "paddw             %%mm0, %%mm1     \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
440     "paddw             %%mm6, %%mm7     \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
441
442 #define REAL_YSCALEYUV2RGB_COEFF(c) \
443     "pmulhw "UB_COEFF"("#c"), %%mm2     \n\t"\
444     "pmulhw "VR_COEFF"("#c"), %%mm5     \n\t"\
445     "psubw  "Y_OFFSET"("#c"), %%mm1     \n\t" /* 8(Y-16)*/\
446     "psubw  "Y_OFFSET"("#c"), %%mm7     \n\t" /* 8(Y-16)*/\
447     "pmulhw  "Y_COEFF"("#c"), %%mm1     \n\t"\
448     "pmulhw  "Y_COEFF"("#c"), %%mm7     \n\t"\
449     /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
450     "paddw             %%mm3, %%mm4     \n\t"\
451     "movq              %%mm2, %%mm0     \n\t"\
452     "movq              %%mm5, %%mm6     \n\t"\
453     "movq              %%mm4, %%mm3     \n\t"\
454     "punpcklwd         %%mm2, %%mm2     \n\t"\
455     "punpcklwd         %%mm5, %%mm5     \n\t"\
456     "punpcklwd         %%mm4, %%mm4     \n\t"\
457     "paddw             %%mm1, %%mm2     \n\t"\
458     "paddw             %%mm1, %%mm5     \n\t"\
459     "paddw             %%mm1, %%mm4     \n\t"\
460     "punpckhwd         %%mm0, %%mm0     \n\t"\
461     "punpckhwd         %%mm6, %%mm6     \n\t"\
462     "punpckhwd         %%mm3, %%mm3     \n\t"\
463     "paddw             %%mm7, %%mm0     \n\t"\
464     "paddw             %%mm7, %%mm6     \n\t"\
465     "paddw             %%mm7, %%mm3     \n\t"\
466     /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
467     "packuswb          %%mm0, %%mm2     \n\t"\
468     "packuswb          %%mm6, %%mm5     \n\t"\
469     "packuswb          %%mm3, %%mm4     \n\t"\
470
471 #define YSCALEYUV2RGB_YA(index, c, b1, b2) REAL_YSCALEYUV2RGB_YA(index, c, b1, b2)
472
473 #define YSCALEYUV2RGB(index, c) \
474     REAL_YSCALEYUV2RGB_UV(index, c) \
475     REAL_YSCALEYUV2RGB_YA(index, c, %0, %1) \
476     REAL_YSCALEYUV2RGB_COEFF(c)
477
478 #define REAL_YSCALEYUV2PACKED1(index, c) \
479     "xor            "#index", "#index"  \n\t"\
480     ".p2align              4            \n\t"\
481     "1:                                 \n\t"\
482     "movq     (%2, "#index"), %%mm3     \n\t" /* uvbuf0[eax]*/\
483     "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm4     \n\t" /* uvbuf0[eax+2048]*/\
484     "psraw                $7, %%mm3     \n\t" \
485     "psraw                $7, %%mm4     \n\t" \
486     "movq  (%0, "#index", 2), %%mm1     \n\t" /*buf0[eax]*/\
487     "movq 8(%0, "#index", 2), %%mm7     \n\t" /*buf0[eax]*/\
488     "psraw                $7, %%mm1     \n\t" \
489     "psraw                $7, %%mm7     \n\t" \
490
491 #define YSCALEYUV2PACKED1(index, c)  REAL_YSCALEYUV2PACKED1(index, c)
492
493 #define REAL_YSCALEYUV2RGB1(index, c) \
494     "xor            "#index", "#index"  \n\t"\
495     ".p2align              4            \n\t"\
496     "1:                                 \n\t"\
497     "movq     (%2, "#index"), %%mm3     \n\t" /* uvbuf0[eax]*/\
498     "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm4     \n\t" /* uvbuf0[eax+2048]*/\
499     "psraw                $4, %%mm3     \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
500     "psraw                $4, %%mm4     \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
501     "psubw  "U_OFFSET"("#c"), %%mm3     \n\t" /* (U-128)8*/\
502     "psubw  "V_OFFSET"("#c"), %%mm4     \n\t" /* (V-128)8*/\
503     "movq              %%mm3, %%mm2     \n\t" /* (U-128)8*/\
504     "movq              %%mm4, %%mm5     \n\t" /* (V-128)8*/\
505     "pmulhw "UG_COEFF"("#c"), %%mm3     \n\t"\
506     "pmulhw "VG_COEFF"("#c"), %%mm4     \n\t"\
507     /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
508     "movq  (%0, "#index", 2), %%mm1     \n\t" /*buf0[eax]*/\
509     "movq 8(%0, "#index", 2), %%mm7     \n\t" /*buf0[eax]*/\
510     "psraw                $4, %%mm1     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
511     "psraw                $4, %%mm7     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
512     "pmulhw "UB_COEFF"("#c"), %%mm2     \n\t"\
513     "pmulhw "VR_COEFF"("#c"), %%mm5     \n\t"\
514     "psubw  "Y_OFFSET"("#c"), %%mm1     \n\t" /* 8(Y-16)*/\
515     "psubw  "Y_OFFSET"("#c"), %%mm7     \n\t" /* 8(Y-16)*/\
516     "pmulhw  "Y_COEFF"("#c"), %%mm1     \n\t"\
517     "pmulhw  "Y_COEFF"("#c"), %%mm7     \n\t"\
518     /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
519     "paddw             %%mm3, %%mm4     \n\t"\
520     "movq              %%mm2, %%mm0     \n\t"\
521     "movq              %%mm5, %%mm6     \n\t"\
522     "movq              %%mm4, %%mm3     \n\t"\
523     "punpcklwd         %%mm2, %%mm2     \n\t"\
524     "punpcklwd         %%mm5, %%mm5     \n\t"\
525     "punpcklwd         %%mm4, %%mm4     \n\t"\
526     "paddw             %%mm1, %%mm2     \n\t"\
527     "paddw             %%mm1, %%mm5     \n\t"\
528     "paddw             %%mm1, %%mm4     \n\t"\
529     "punpckhwd         %%mm0, %%mm0     \n\t"\
530     "punpckhwd         %%mm6, %%mm6     \n\t"\
531     "punpckhwd         %%mm3, %%mm3     \n\t"\
532     "paddw             %%mm7, %%mm0     \n\t"\
533     "paddw             %%mm7, %%mm6     \n\t"\
534     "paddw             %%mm7, %%mm3     \n\t"\
535     /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
536     "packuswb          %%mm0, %%mm2     \n\t"\
537     "packuswb          %%mm6, %%mm5     \n\t"\
538     "packuswb          %%mm3, %%mm4     \n\t"\
539
540 #define YSCALEYUV2RGB1(index, c)  REAL_YSCALEYUV2RGB1(index, c)
541
542 #define REAL_YSCALEYUV2PACKED1b(index, c) \
543     "xor "#index", "#index"             \n\t"\
544     ".p2align              4            \n\t"\
545     "1:                                 \n\t"\
546     "movq     (%2, "#index"), %%mm2     \n\t" /* uvbuf0[eax]*/\
547     "movq     (%3, "#index"), %%mm3     \n\t" /* uvbuf1[eax]*/\
548     "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5     \n\t" /* uvbuf0[eax+2048]*/\
549     "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4     \n\t" /* uvbuf1[eax+2048]*/\
550     "paddw             %%mm2, %%mm3     \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
551     "paddw             %%mm5, %%mm4     \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
552     "psrlw                $8, %%mm3     \n\t" \
553     "psrlw                $8, %%mm4     \n\t" \
554     "movq  (%0, "#index", 2), %%mm1     \n\t" /*buf0[eax]*/\
555     "movq 8(%0, "#index", 2), %%mm7     \n\t" /*buf0[eax]*/\
556     "psraw                $7, %%mm1     \n\t" \
557     "psraw                $7, %%mm7     \n\t"
558 #define YSCALEYUV2PACKED1b(index, c)  REAL_YSCALEYUV2PACKED1b(index, c)
559
560 // do vertical chrominance interpolation
561 #define REAL_YSCALEYUV2RGB1b(index, c) \
562     "xor            "#index", "#index"  \n\t"\
563     ".p2align              4            \n\t"\
564     "1:                                 \n\t"\
565     "movq     (%2, "#index"), %%mm2     \n\t" /* uvbuf0[eax]*/\
566     "movq     (%3, "#index"), %%mm3     \n\t" /* uvbuf1[eax]*/\
567     "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5     \n\t" /* uvbuf0[eax+2048]*/\
568     "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4     \n\t" /* uvbuf1[eax+2048]*/\
569     "paddw             %%mm2, %%mm3     \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
570     "paddw             %%mm5, %%mm4     \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
571     "psrlw                $5, %%mm3     \n\t" /*FIXME might overflow*/\
572     "psrlw                $5, %%mm4     \n\t" /*FIXME might overflow*/\
573     "psubw  "U_OFFSET"("#c"), %%mm3     \n\t" /* (U-128)8*/\
574     "psubw  "V_OFFSET"("#c"), %%mm4     \n\t" /* (V-128)8*/\
575     "movq              %%mm3, %%mm2     \n\t" /* (U-128)8*/\
576     "movq              %%mm4, %%mm5     \n\t" /* (V-128)8*/\
577     "pmulhw "UG_COEFF"("#c"), %%mm3     \n\t"\
578     "pmulhw "VG_COEFF"("#c"), %%mm4     \n\t"\
579     /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
580     "movq  (%0, "#index", 2), %%mm1     \n\t" /*buf0[eax]*/\
581     "movq 8(%0, "#index", 2), %%mm7     \n\t" /*buf0[eax]*/\
582     "psraw                $4, %%mm1     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
583     "psraw                $4, %%mm7     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
584     "pmulhw "UB_COEFF"("#c"), %%mm2     \n\t"\
585     "pmulhw "VR_COEFF"("#c"), %%mm5     \n\t"\
586     "psubw  "Y_OFFSET"("#c"), %%mm1     \n\t" /* 8(Y-16)*/\
587     "psubw  "Y_OFFSET"("#c"), %%mm7     \n\t" /* 8(Y-16)*/\
588     "pmulhw  "Y_COEFF"("#c"), %%mm1     \n\t"\
589     "pmulhw  "Y_COEFF"("#c"), %%mm7     \n\t"\
590     /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
591     "paddw             %%mm3, %%mm4     \n\t"\
592     "movq              %%mm2, %%mm0     \n\t"\
593     "movq              %%mm5, %%mm6     \n\t"\
594     "movq              %%mm4, %%mm3     \n\t"\
595     "punpcklwd         %%mm2, %%mm2     \n\t"\
596     "punpcklwd         %%mm5, %%mm5     \n\t"\
597     "punpcklwd         %%mm4, %%mm4     \n\t"\
598     "paddw             %%mm1, %%mm2     \n\t"\
599     "paddw             %%mm1, %%mm5     \n\t"\
600     "paddw             %%mm1, %%mm4     \n\t"\
601     "punpckhwd         %%mm0, %%mm0     \n\t"\
602     "punpckhwd         %%mm6, %%mm6     \n\t"\
603     "punpckhwd         %%mm3, %%mm3     \n\t"\
604     "paddw             %%mm7, %%mm0     \n\t"\
605     "paddw             %%mm7, %%mm6     \n\t"\
606     "paddw             %%mm7, %%mm3     \n\t"\
607     /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
608     "packuswb          %%mm0, %%mm2     \n\t"\
609     "packuswb          %%mm6, %%mm5     \n\t"\
610     "packuswb          %%mm3, %%mm4     \n\t"\
611
612 #define YSCALEYUV2RGB1b(index, c)  REAL_YSCALEYUV2RGB1b(index, c)
613
614 #define REAL_YSCALEYUV2RGB1_ALPHA(index) \
615     "movq  (%1, "#index", 2), %%mm7     \n\t" /* abuf0[index  ]     */\
616     "movq 8(%1, "#index", 2), %%mm1     \n\t" /* abuf0[index+4]     */\
617     "psraw                $7, %%mm7     \n\t" /* abuf0[index  ] >>7 */\
618     "psraw                $7, %%mm1     \n\t" /* abuf0[index+4] >>7 */\
619     "packuswb          %%mm1, %%mm7     \n\t"
620 #define YSCALEYUV2RGB1_ALPHA(index) REAL_YSCALEYUV2RGB1_ALPHA(index)
621
622 #define REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) \
623     "movq       "#b", "#q2"     \n\t" /* B */\
624     "movq       "#r", "#t"      \n\t" /* R */\
625     "punpcklbw  "#g", "#b"      \n\t" /* GBGBGBGB 0 */\
626     "punpcklbw  "#a", "#r"      \n\t" /* ARARARAR 0 */\
627     "punpckhbw  "#g", "#q2"     \n\t" /* GBGBGBGB 2 */\
628     "punpckhbw  "#a", "#t"      \n\t" /* ARARARAR 2 */\
629     "movq       "#b", "#q0"     \n\t" /* GBGBGBGB 0 */\
630     "movq      "#q2", "#q3"     \n\t" /* GBGBGBGB 2 */\
631     "punpcklwd  "#r", "#q0"     \n\t" /* ARGBARGB 0 */\
632     "punpckhwd  "#r", "#b"      \n\t" /* ARGBARGB 1 */\
633     "punpcklwd  "#t", "#q2"     \n\t" /* ARGBARGB 2 */\
634     "punpckhwd  "#t", "#q3"     \n\t" /* ARGBARGB 3 */\
635 \
636     MOVNTQ(   q0,   (dst, index, 4))\
637     MOVNTQ(    b,  8(dst, index, 4))\
638     MOVNTQ(   q2, 16(dst, index, 4))\
639     MOVNTQ(   q3, 24(dst, index, 4))\
640 \
641     "add      $8, "#index"      \n\t"\
642     "cmp "#dstw", "#index"      \n\t"\
643     " jb      1b                \n\t"
644 #define WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t)  REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t)
645
646 #define REAL_WRITERGB16(dst, dstw, index) \
647     "pand "MANGLE(bF8)", %%mm2  \n\t" /* B */\
648     "pand "MANGLE(bFC)", %%mm4  \n\t" /* G */\
649     "pand "MANGLE(bF8)", %%mm5  \n\t" /* R */\
650     "psrlq           $3, %%mm2  \n\t"\
651 \
652     "movq         %%mm2, %%mm1  \n\t"\
653     "movq         %%mm4, %%mm3  \n\t"\
654 \
655     "punpcklbw    %%mm7, %%mm3  \n\t"\
656     "punpcklbw    %%mm5, %%mm2  \n\t"\
657     "punpckhbw    %%mm7, %%mm4  \n\t"\
658     "punpckhbw    %%mm5, %%mm1  \n\t"\
659 \
660     "psllq           $3, %%mm3  \n\t"\
661     "psllq           $3, %%mm4  \n\t"\
662 \
663     "por          %%mm3, %%mm2  \n\t"\
664     "por          %%mm4, %%mm1  \n\t"\
665 \
666     MOVNTQ(%%mm2,  (dst, index, 2))\
667     MOVNTQ(%%mm1, 8(dst, index, 2))\
668 \
669     "add             $8, "#index"   \n\t"\
670     "cmp        "#dstw", "#index"   \n\t"\
671     " jb             1b             \n\t"
672 #define WRITERGB16(dst, dstw, index)  REAL_WRITERGB16(dst, dstw, index)
673
674 #define REAL_WRITERGB15(dst, dstw, index) \
675     "pand "MANGLE(bF8)", %%mm2  \n\t" /* B */\
676     "pand "MANGLE(bF8)", %%mm4  \n\t" /* G */\
677     "pand "MANGLE(bF8)", %%mm5  \n\t" /* R */\
678     "psrlq           $3, %%mm2  \n\t"\
679     "psrlq           $1, %%mm5  \n\t"\
680 \
681     "movq         %%mm2, %%mm1  \n\t"\
682     "movq         %%mm4, %%mm3  \n\t"\
683 \
684     "punpcklbw    %%mm7, %%mm3  \n\t"\
685     "punpcklbw    %%mm5, %%mm2  \n\t"\
686     "punpckhbw    %%mm7, %%mm4  \n\t"\
687     "punpckhbw    %%mm5, %%mm1  \n\t"\
688 \
689     "psllq           $2, %%mm3  \n\t"\
690     "psllq           $2, %%mm4  \n\t"\
691 \
692     "por          %%mm3, %%mm2  \n\t"\
693     "por          %%mm4, %%mm1  \n\t"\
694 \
695     MOVNTQ(%%mm2,  (dst, index, 2))\
696     MOVNTQ(%%mm1, 8(dst, index, 2))\
697 \
698     "add             $8, "#index"   \n\t"\
699     "cmp        "#dstw", "#index"   \n\t"\
700     " jb             1b             \n\t"
701 #define WRITERGB15(dst, dstw, index)  REAL_WRITERGB15(dst, dstw, index)
702
703 #define WRITEBGR24MMX(dst, dstw, index) \
704     /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
705     "movq      %%mm2, %%mm1     \n\t" /* B */\
706     "movq      %%mm5, %%mm6     \n\t" /* R */\
707     "punpcklbw %%mm4, %%mm2     \n\t" /* GBGBGBGB 0 */\
708     "punpcklbw %%mm7, %%mm5     \n\t" /* 0R0R0R0R 0 */\
709     "punpckhbw %%mm4, %%mm1     \n\t" /* GBGBGBGB 2 */\
710     "punpckhbw %%mm7, %%mm6     \n\t" /* 0R0R0R0R 2 */\
711     "movq      %%mm2, %%mm0     \n\t" /* GBGBGBGB 0 */\
712     "movq      %%mm1, %%mm3     \n\t" /* GBGBGBGB 2 */\
713     "punpcklwd %%mm5, %%mm0     \n\t" /* 0RGB0RGB 0 */\
714     "punpckhwd %%mm5, %%mm2     \n\t" /* 0RGB0RGB 1 */\
715     "punpcklwd %%mm6, %%mm1     \n\t" /* 0RGB0RGB 2 */\
716     "punpckhwd %%mm6, %%mm3     \n\t" /* 0RGB0RGB 3 */\
717 \
718     "movq      %%mm0, %%mm4     \n\t" /* 0RGB0RGB 0 */\
719     "movq      %%mm2, %%mm6     \n\t" /* 0RGB0RGB 1 */\
720     "movq      %%mm1, %%mm5     \n\t" /* 0RGB0RGB 2 */\
721     "movq      %%mm3, %%mm7     \n\t" /* 0RGB0RGB 3 */\
722 \
723     "psllq       $40, %%mm0     \n\t" /* RGB00000 0 */\
724     "psllq       $40, %%mm2     \n\t" /* RGB00000 1 */\
725     "psllq       $40, %%mm1     \n\t" /* RGB00000 2 */\
726     "psllq       $40, %%mm3     \n\t" /* RGB00000 3 */\
727 \
728     "punpckhdq %%mm4, %%mm0     \n\t" /* 0RGBRGB0 0 */\
729     "punpckhdq %%mm6, %%mm2     \n\t" /* 0RGBRGB0 1 */\
730     "punpckhdq %%mm5, %%mm1     \n\t" /* 0RGBRGB0 2 */\
731     "punpckhdq %%mm7, %%mm3     \n\t" /* 0RGBRGB0 3 */\
732 \
733     "psrlq        $8, %%mm0     \n\t" /* 00RGBRGB 0 */\
734     "movq      %%mm2, %%mm6     \n\t" /* 0RGBRGB0 1 */\
735     "psllq       $40, %%mm2     \n\t" /* GB000000 1 */\
736     "por       %%mm2, %%mm0     \n\t" /* GBRGBRGB 0 */\
737     MOVNTQ(%%mm0, (dst))\
738 \
739     "psrlq       $24, %%mm6     \n\t" /* 0000RGBR 1 */\
740     "movq      %%mm1, %%mm5     \n\t" /* 0RGBRGB0 2 */\
741     "psllq       $24, %%mm1     \n\t" /* BRGB0000 2 */\
742     "por       %%mm1, %%mm6     \n\t" /* BRGBRGBR 1 */\
743     MOVNTQ(%%mm6, 8(dst))\
744 \
745     "psrlq       $40, %%mm5     \n\t" /* 000000RG 2 */\
746     "psllq        $8, %%mm3     \n\t" /* RGBRGB00 3 */\
747     "por       %%mm3, %%mm5     \n\t" /* RGBRGBRG 2 */\
748     MOVNTQ(%%mm5, 16(dst))\
749 \
750     "add         $24, "#dst"    \n\t"\
751 \
752     "add          $8, "#index"  \n\t"\
753     "cmp     "#dstw", "#index"  \n\t"\
754     " jb          1b            \n\t"
755
756 #define WRITEBGR24MMX2(dst, dstw, index) \
757     /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
758     "movq "MANGLE(ff_M24A)", %%mm0 \n\t"\
759     "movq "MANGLE(ff_M24C)", %%mm7 \n\t"\
760     "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2  B1 B0 B1 B0 */\
761     "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2  G1 G0 G1 G0 */\
762     "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0  R1 R0 R1 R0 */\
763 \
764     "pand   %%mm0, %%mm1        \n\t" /*    B2        B1       B0 */\
765     "pand   %%mm0, %%mm3        \n\t" /*    G2        G1       G0 */\
766     "pand   %%mm7, %%mm6        \n\t" /*       R1        R0       */\
767 \
768     "psllq     $8, %%mm3        \n\t" /* G2        G1       G0    */\
769     "por    %%mm1, %%mm6        \n\t"\
770     "por    %%mm3, %%mm6        \n\t"\
771     MOVNTQ(%%mm6, (dst))\
772 \
773     "psrlq     $8, %%mm4        \n\t" /* 00 G7 G6 G5  G4 G3 G2 G1 */\
774     "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4  B3 B2 B3 B2 */\
775     "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3  G4 G3 G4 G3 */\
776     "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4  R3 R2 R3 R2 */\
777 \
778     "pand "MANGLE(ff_M24B)", %%mm1 \n\t" /* B5       B4        B3    */\
779     "pand   %%mm7, %%mm3        \n\t" /*       G4        G3       */\
780     "pand   %%mm0, %%mm6        \n\t" /*    R4        R3       R2 */\
781 \
782     "por    %%mm1, %%mm3        \n\t" /* B5    G4 B4     G3 B3    */\
783     "por    %%mm3, %%mm6        \n\t"\
784     MOVNTQ(%%mm6, 8(dst))\
785 \
786     "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6  B7 B6 B6 B7 */\
787     "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7  G6 G5 G6 G5 */\
788     "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6  R5 R4 R5 R4 */\
789 \
790     "pand   %%mm7, %%mm1        \n\t" /*       B7        B6       */\
791     "pand   %%mm0, %%mm3        \n\t" /*    G7        G6       G5 */\
792     "pand "MANGLE(ff_M24B)", %%mm6 \n\t" /* R7       R6        R5    */\
793 \
794     "por    %%mm1, %%mm3        \n\t"\
795     "por    %%mm3, %%mm6        \n\t"\
796     MOVNTQ(%%mm6, 16(dst))\
797 \
798     "add      $24, "#dst"       \n\t"\
799 \
800     "add       $8, "#index"     \n\t"\
801     "cmp  "#dstw", "#index"     \n\t"\
802     " jb       1b               \n\t"
803
804 #if COMPILE_TEMPLATE_MMX2
805 #undef WRITEBGR24
806 #define WRITEBGR24(dst, dstw, index)  WRITEBGR24MMX2(dst, dstw, index)
807 #else
808 #undef WRITEBGR24
809 #define WRITEBGR24(dst, dstw, index)  WRITEBGR24MMX(dst, dstw, index)
810 #endif
811
812 #define REAL_WRITEYUY2(dst, dstw, index) \
813     "packuswb  %%mm3, %%mm3     \n\t"\
814     "packuswb  %%mm4, %%mm4     \n\t"\
815     "packuswb  %%mm7, %%mm1     \n\t"\
816     "punpcklbw %%mm4, %%mm3     \n\t"\
817     "movq      %%mm1, %%mm7     \n\t"\
818     "punpcklbw %%mm3, %%mm1     \n\t"\
819     "punpckhbw %%mm3, %%mm7     \n\t"\
820 \
821     MOVNTQ(%%mm1, (dst, index, 2))\
822     MOVNTQ(%%mm7, 8(dst, index, 2))\
823 \
824     "add          $8, "#index"  \n\t"\
825     "cmp     "#dstw", "#index"  \n\t"\
826     " jb          1b            \n\t"
827 #define WRITEYUY2(dst, dstw, index)  REAL_WRITEYUY2(dst, dstw, index)
828
829
830 static inline void RENAME(yuv2yuvX)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
831                                     const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize, const int16_t **alpSrc,
832                                     uint8_t *dest, uint8_t *uDest, uint8_t *vDest, uint8_t *aDest, long dstW, long chrDstW)
833 {
834     if(!(c->flags & SWS_BITEXACT)) {
835         if (c->flags & SWS_ACCURATE_RND) {
836             if (uDest) {
837                 YSCALEYUV2YV12X_ACCURATE(   "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
838                 YSCALEYUV2YV12X_ACCURATE(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
839             }
840             if (CONFIG_SWSCALE_ALPHA && aDest) {
841                 YSCALEYUV2YV12X_ACCURATE(   "0", ALP_MMX_FILTER_OFFSET, aDest, dstW)
842             }
843
844             YSCALEYUV2YV12X_ACCURATE("0", LUM_MMX_FILTER_OFFSET, dest, dstW)
845         } else {
846             if (uDest) {
847                 YSCALEYUV2YV12X(   "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
848                 YSCALEYUV2YV12X(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
849             }
850             if (CONFIG_SWSCALE_ALPHA && aDest) {
851                 YSCALEYUV2YV12X(   "0", ALP_MMX_FILTER_OFFSET, aDest, dstW)
852             }
853
854             YSCALEYUV2YV12X("0", LUM_MMX_FILTER_OFFSET, dest, dstW)
855         }
856         return;
857     }
858     yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize,
859                 chrFilter, chrSrc, chrFilterSize,
860                 alpSrc, dest, uDest, vDest, aDest, dstW, chrDstW);
861 }
862
863 static inline void RENAME(yuv2yuv1)(SwsContext *c, const int16_t *lumSrc, const int16_t *chrSrc, const int16_t *alpSrc,
864                                     uint8_t *dest, uint8_t *uDest, uint8_t *vDest, uint8_t *aDest, long dstW, long chrDstW)
865 {
866     if(!(c->flags & SWS_BITEXACT)) {
867         long p= 4;
868         const uint8_t *src[4]= {alpSrc + dstW, lumSrc + dstW, chrSrc + chrDstW, chrSrc + VOFW + chrDstW};
869         uint8_t *dst[4]= {aDest, dest, uDest, vDest};
870         x86_reg counter[4]= {dstW, dstW, chrDstW, chrDstW};
871
872         if (c->flags & SWS_ACCURATE_RND) {
873             while(p--) {
874                 if (dst[p]) {
875                     __asm__ volatile(
876                         YSCALEYUV2YV121_ACCURATE
877                         :: "r" (src[p]), "r" (dst[p] + counter[p]),
878                         "g" (-counter[p])
879                         : "%"REG_a
880                     );
881                 }
882             }
883         } else {
884             while(p--) {
885                 if (dst[p]) {
886                     __asm__ volatile(
887                         YSCALEYUV2YV121
888                         :: "r" (src[p]), "r" (dst[p] + counter[p]),
889                         "g" (-counter[p])
890                         : "%"REG_a
891                     );
892                 }
893             }
894         }
895         return;
896     }
897     yuv2yuv1_c(c, lumSrc, chrSrc, alpSrc, dest, uDest, vDest, aDest,
898                dstW, chrDstW);
899 }
900
901
902 /**
903  * vertical scale YV12 to RGB
904  */
905 static inline void RENAME(yuv2packedX)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
906                                        const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
907                                        const int16_t **alpSrc, uint8_t *dest, long dstW, long dstY)
908 {
909     x86_reg dummy=0;
910     x86_reg dstW_reg = dstW;
911     if(!(c->flags & SWS_BITEXACT)) {
912         if (c->flags & SWS_ACCURATE_RND) {
913             switch(c->dstFormat) {
914             case PIX_FMT_RGB32:
915                 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
916                     YSCALEYUV2PACKEDX_ACCURATE
917                     YSCALEYUV2RGBX
918                     "movq                      %%mm2, "U_TEMP"(%0)  \n\t"
919                     "movq                      %%mm4, "V_TEMP"(%0)  \n\t"
920                     "movq                      %%mm5, "Y_TEMP"(%0)  \n\t"
921                     YSCALEYUV2PACKEDX_ACCURATE_YA(ALP_MMX_FILTER_OFFSET)
922                     "movq               "Y_TEMP"(%0), %%mm5         \n\t"
923                     "psraw                        $3, %%mm1         \n\t"
924                     "psraw                        $3, %%mm7         \n\t"
925                     "packuswb                  %%mm7, %%mm1         \n\t"
926                     WRITEBGR32(%4, %5, %%REGa, %%mm3, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm2, %%mm6)
927
928                     YSCALEYUV2PACKEDX_END
929                 } else {
930                     YSCALEYUV2PACKEDX_ACCURATE
931                     YSCALEYUV2RGBX
932                     "pcmpeqd %%mm7, %%mm7 \n\t"
933                     WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
934
935                     YSCALEYUV2PACKEDX_END
936                 }
937                 return;
938             case PIX_FMT_BGR24:
939                 YSCALEYUV2PACKEDX_ACCURATE
940                 YSCALEYUV2RGBX
941                 "pxor %%mm7, %%mm7 \n\t"
942                 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"\n\t" //FIXME optimize
943                 "add %4, %%"REG_c"                        \n\t"
944                 WRITEBGR24(%%REGc, %5, %%REGa)
945
946
947                 :: "r" (&c->redDither),
948                 "m" (dummy), "m" (dummy), "m" (dummy),
949                 "r" (dest), "m" (dstW_reg)
950                 : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
951                 );
952                 return;
953             case PIX_FMT_RGB555:
954                 YSCALEYUV2PACKEDX_ACCURATE
955                 YSCALEYUV2RGBX
956                 "pxor %%mm7, %%mm7 \n\t"
957                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
958 #ifdef DITHER1XBPP
959                 "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
960                 "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
961                 "paddusb "RED_DITHER"(%0), %%mm5\n\t"
962 #endif
963
964                 WRITERGB15(%4, %5, %%REGa)
965                 YSCALEYUV2PACKEDX_END
966                 return;
967             case PIX_FMT_RGB565:
968                 YSCALEYUV2PACKEDX_ACCURATE
969                 YSCALEYUV2RGBX
970                 "pxor %%mm7, %%mm7 \n\t"
971                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
972 #ifdef DITHER1XBPP
973                 "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
974                 "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
975                 "paddusb "RED_DITHER"(%0), %%mm5\n\t"
976 #endif
977
978                 WRITERGB16(%4, %5, %%REGa)
979                 YSCALEYUV2PACKEDX_END
980                 return;
981             case PIX_FMT_YUYV422:
982                 YSCALEYUV2PACKEDX_ACCURATE
983                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
984
985                 "psraw $3, %%mm3    \n\t"
986                 "psraw $3, %%mm4    \n\t"
987                 "psraw $3, %%mm1    \n\t"
988                 "psraw $3, %%mm7    \n\t"
989                 WRITEYUY2(%4, %5, %%REGa)
990                 YSCALEYUV2PACKEDX_END
991                 return;
992             }
993         } else {
994             switch(c->dstFormat) {
995             case PIX_FMT_RGB32:
996                 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
997                     YSCALEYUV2PACKEDX
998                     YSCALEYUV2RGBX
999                     YSCALEYUV2PACKEDX_YA(ALP_MMX_FILTER_OFFSET, %%mm0, %%mm3, %%mm6, %%mm1, %%mm7)
1000                     "psraw                        $3, %%mm1         \n\t"
1001                     "psraw                        $3, %%mm7         \n\t"
1002                     "packuswb                  %%mm7, %%mm1         \n\t"
1003                     WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
1004                     YSCALEYUV2PACKEDX_END
1005                 } else {
1006                     YSCALEYUV2PACKEDX
1007                     YSCALEYUV2RGBX
1008                     "pcmpeqd %%mm7, %%mm7 \n\t"
1009                     WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1010                     YSCALEYUV2PACKEDX_END
1011                 }
1012                 return;
1013             case PIX_FMT_BGR24:
1014                 YSCALEYUV2PACKEDX
1015                 YSCALEYUV2RGBX
1016                 "pxor                    %%mm7, %%mm7       \n\t"
1017                 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"   \n\t" //FIXME optimize
1018                 "add                        %4, %%"REG_c"   \n\t"
1019                 WRITEBGR24(%%REGc, %5, %%REGa)
1020
1021                 :: "r" (&c->redDither),
1022                 "m" (dummy), "m" (dummy), "m" (dummy),
1023                 "r" (dest),  "m" (dstW_reg)
1024                 : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
1025                 );
1026                 return;
1027             case PIX_FMT_RGB555:
1028                 YSCALEYUV2PACKEDX
1029                 YSCALEYUV2RGBX
1030                 "pxor %%mm7, %%mm7 \n\t"
1031                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1032 #ifdef DITHER1XBPP
1033                 "paddusb "BLUE_DITHER"(%0), %%mm2  \n\t"
1034                 "paddusb "GREEN_DITHER"(%0), %%mm4  \n\t"
1035                 "paddusb "RED_DITHER"(%0), %%mm5  \n\t"
1036 #endif
1037
1038                 WRITERGB15(%4, %5, %%REGa)
1039                 YSCALEYUV2PACKEDX_END
1040                 return;
1041             case PIX_FMT_RGB565:
1042                 YSCALEYUV2PACKEDX
1043                 YSCALEYUV2RGBX
1044                 "pxor %%mm7, %%mm7 \n\t"
1045                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1046 #ifdef DITHER1XBPP
1047                 "paddusb "BLUE_DITHER"(%0), %%mm2  \n\t"
1048                 "paddusb "GREEN_DITHER"(%0), %%mm4  \n\t"
1049                 "paddusb "RED_DITHER"(%0), %%mm5  \n\t"
1050 #endif
1051
1052                 WRITERGB16(%4, %5, %%REGa)
1053                 YSCALEYUV2PACKEDX_END
1054                 return;
1055             case PIX_FMT_YUYV422:
1056                 YSCALEYUV2PACKEDX
1057                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1058
1059                 "psraw $3, %%mm3    \n\t"
1060                 "psraw $3, %%mm4    \n\t"
1061                 "psraw $3, %%mm1    \n\t"
1062                 "psraw $3, %%mm7    \n\t"
1063                 WRITEYUY2(%4, %5, %%REGa)
1064                 YSCALEYUV2PACKEDX_END
1065                 return;
1066             }
1067         }
1068     }
1069     yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize,
1070                    chrFilter, chrSrc, chrFilterSize,
1071                    alpSrc, dest, dstW, dstY);
1072 }
1073
1074 /**
1075  * vertical bilinear scale YV12 to RGB
1076  */
1077 static inline void RENAME(yuv2packed2)(SwsContext *c, const uint16_t *buf0, const uint16_t *buf1, const uint16_t *uvbuf0, const uint16_t *uvbuf1,
1078                           const uint16_t *abuf0, const uint16_t *abuf1, uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
1079 {
1080     if(!(c->flags & SWS_BITEXACT)) {
1081         switch(c->dstFormat) {
1082         //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
1083         case PIX_FMT_RGB32:
1084             if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
1085 #if ARCH_X86_64
1086                 __asm__ volatile(
1087                     YSCALEYUV2RGB(%%r8, %5)
1088                     YSCALEYUV2RGB_YA(%%r8, %5, %6, %7)
1089                     "psraw                  $3, %%mm1       \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
1090                     "psraw                  $3, %%mm7       \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
1091                     "packuswb            %%mm7, %%mm1       \n\t"
1092                     WRITEBGR32(%4, 8280(%5), %%r8, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
1093
1094                     :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "r" (dest),
1095                     "a" (&c->redDither)
1096                     ,"r" (abuf0), "r" (abuf1)
1097                     : "%r8"
1098                 );
1099 #else
1100                 *(const uint16_t **)(&c->u_temp)=abuf0;
1101                 *(const uint16_t **)(&c->v_temp)=abuf1;
1102                 __asm__ volatile(
1103                     "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1104                     "mov        %4, %%"REG_b"               \n\t"
1105                     "push %%"REG_BP"                        \n\t"
1106                     YSCALEYUV2RGB(%%REGBP, %5)
1107                     "push                   %0              \n\t"
1108                     "push                   %1              \n\t"
1109                     "mov          "U_TEMP"(%5), %0          \n\t"
1110                     "mov          "V_TEMP"(%5), %1          \n\t"
1111                     YSCALEYUV2RGB_YA(%%REGBP, %5, %0, %1)
1112                     "psraw                  $3, %%mm1       \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
1113                     "psraw                  $3, %%mm7       \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
1114                     "packuswb            %%mm7, %%mm1       \n\t"
1115                     "pop                    %1              \n\t"
1116                     "pop                    %0              \n\t"
1117                     WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
1118                     "pop %%"REG_BP"                         \n\t"
1119                     "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1120
1121                     :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1122                     "a" (&c->redDither)
1123                 );
1124 #endif
1125             } else {
1126                 __asm__ volatile(
1127                     "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1128                     "mov        %4, %%"REG_b"               \n\t"
1129                     "push %%"REG_BP"                        \n\t"
1130                     YSCALEYUV2RGB(%%REGBP, %5)
1131                     "pcmpeqd %%mm7, %%mm7                   \n\t"
1132                     WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1133                     "pop %%"REG_BP"                         \n\t"
1134                     "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1135
1136                     :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1137                     "a" (&c->redDither)
1138                 );
1139             }
1140             return;
1141         case PIX_FMT_BGR24:
1142             __asm__ volatile(
1143                 "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1144                 "mov        %4, %%"REG_b"               \n\t"
1145                 "push %%"REG_BP"                        \n\t"
1146                 YSCALEYUV2RGB(%%REGBP, %5)
1147                 "pxor    %%mm7, %%mm7                   \n\t"
1148                 WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1149                 "pop %%"REG_BP"                         \n\t"
1150                 "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1151                 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1152                 "a" (&c->redDither)
1153             );
1154             return;
1155         case PIX_FMT_RGB555:
1156             __asm__ volatile(
1157                 "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1158                 "mov        %4, %%"REG_b"               \n\t"
1159                 "push %%"REG_BP"                        \n\t"
1160                 YSCALEYUV2RGB(%%REGBP, %5)
1161                 "pxor    %%mm7, %%mm7                   \n\t"
1162                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1163 #ifdef DITHER1XBPP
1164                 "paddusb "BLUE_DITHER"(%5), %%mm2      \n\t"
1165                 "paddusb "GREEN_DITHER"(%5), %%mm4      \n\t"
1166                 "paddusb "RED_DITHER"(%5), %%mm5      \n\t"
1167 #endif
1168
1169                 WRITERGB15(%%REGb, 8280(%5), %%REGBP)
1170                 "pop %%"REG_BP"                         \n\t"
1171                 "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1172
1173                 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1174                 "a" (&c->redDither)
1175             );
1176             return;
1177         case PIX_FMT_RGB565:
1178             __asm__ volatile(
1179                 "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1180                 "mov        %4, %%"REG_b"               \n\t"
1181                 "push %%"REG_BP"                        \n\t"
1182                 YSCALEYUV2RGB(%%REGBP, %5)
1183                 "pxor    %%mm7, %%mm7                   \n\t"
1184                 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1185 #ifdef DITHER1XBPP
1186                 "paddusb "BLUE_DITHER"(%5), %%mm2      \n\t"
1187                 "paddusb "GREEN_DITHER"(%5), %%mm4      \n\t"
1188                 "paddusb "RED_DITHER"(%5), %%mm5      \n\t"
1189 #endif
1190
1191                 WRITERGB16(%%REGb, 8280(%5), %%REGBP)
1192                 "pop %%"REG_BP"                         \n\t"
1193                 "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1194                 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1195                 "a" (&c->redDither)
1196             );
1197             return;
1198         case PIX_FMT_YUYV422:
1199             __asm__ volatile(
1200                 "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1201                 "mov %4, %%"REG_b"                        \n\t"
1202                 "push %%"REG_BP"                        \n\t"
1203                 YSCALEYUV2PACKED(%%REGBP, %5)
1204                 WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1205                 "pop %%"REG_BP"                         \n\t"
1206                 "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1207                 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1208                 "a" (&c->redDither)
1209             );
1210             return;
1211         default: break;
1212         }
1213     }
1214     yuv2packed2_c(c, buf0, buf1, uvbuf0, uvbuf1, abuf0, abuf1,
1215                   dest, dstW, yalpha, uvalpha, y);
1216 }
1217
1218 /**
1219  * YV12 to RGB without scaling or interpolating
1220  */
1221 static inline void RENAME(yuv2packed1)(SwsContext *c, const uint16_t *buf0, const uint16_t *uvbuf0, const uint16_t *uvbuf1,
1222                           const uint16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, enum PixelFormat dstFormat, int flags, int y)
1223 {
1224     if(!(flags & SWS_BITEXACT)) {
1225         const uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
1226
1227         if (flags&SWS_FULL_CHR_H_INT) {
1228             c->yuv2packed2(c, buf0, buf0, uvbuf0, uvbuf1, abuf0, abuf0, dest, dstW, 0, uvalpha, y);
1229             return;
1230         }
1231
1232         if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
1233             switch(dstFormat) {
1234             case PIX_FMT_RGB32:
1235                 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
1236                     __asm__ volatile(
1237                         "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1238                         "mov        %4, %%"REG_b"               \n\t"
1239                         "push %%"REG_BP"                        \n\t"
1240                         YSCALEYUV2RGB1(%%REGBP, %5)
1241                         YSCALEYUV2RGB1_ALPHA(%%REGBP)
1242                         WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1243                         "pop %%"REG_BP"                         \n\t"
1244                         "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1245
1246                         :: "c" (buf0), "d" (abuf0), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1247                         "a" (&c->redDither)
1248                     );
1249                 } else {
1250                     __asm__ volatile(
1251                         "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1252                         "mov        %4, %%"REG_b"               \n\t"
1253                         "push %%"REG_BP"                        \n\t"
1254                         YSCALEYUV2RGB1(%%REGBP, %5)
1255                         "pcmpeqd %%mm7, %%mm7                   \n\t"
1256                         WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1257                         "pop %%"REG_BP"                         \n\t"
1258                         "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1259
1260                         :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1261                         "a" (&c->redDither)
1262                     );
1263                 }
1264                 return;
1265             case PIX_FMT_BGR24:
1266                 __asm__ volatile(
1267                     "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1268                     "mov        %4, %%"REG_b"               \n\t"
1269                     "push %%"REG_BP"                        \n\t"
1270                     YSCALEYUV2RGB1(%%REGBP, %5)
1271                     "pxor    %%mm7, %%mm7                   \n\t"
1272                     WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1273                     "pop %%"REG_BP"                         \n\t"
1274                     "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1275
1276                     :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1277                     "a" (&c->redDither)
1278                 );
1279                 return;
1280             case PIX_FMT_RGB555:
1281                 __asm__ volatile(
1282                     "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1283                     "mov        %4, %%"REG_b"               \n\t"
1284                     "push %%"REG_BP"                        \n\t"
1285                     YSCALEYUV2RGB1(%%REGBP, %5)
1286                     "pxor    %%mm7, %%mm7                   \n\t"
1287                     /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1288 #ifdef DITHER1XBPP
1289                     "paddusb "BLUE_DITHER"(%5), %%mm2      \n\t"
1290                     "paddusb "GREEN_DITHER"(%5), %%mm4      \n\t"
1291                     "paddusb "RED_DITHER"(%5), %%mm5      \n\t"
1292 #endif
1293                     WRITERGB15(%%REGb, 8280(%5), %%REGBP)
1294                     "pop %%"REG_BP"                         \n\t"
1295                     "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1296
1297                     :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1298                     "a" (&c->redDither)
1299                 );
1300                 return;
1301             case PIX_FMT_RGB565:
1302                 __asm__ volatile(
1303                     "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1304                     "mov        %4, %%"REG_b"               \n\t"
1305                     "push %%"REG_BP"                        \n\t"
1306                     YSCALEYUV2RGB1(%%REGBP, %5)
1307                     "pxor    %%mm7, %%mm7                   \n\t"
1308                     /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1309 #ifdef DITHER1XBPP
1310                     "paddusb "BLUE_DITHER"(%5), %%mm2      \n\t"
1311                     "paddusb "GREEN_DITHER"(%5), %%mm4      \n\t"
1312                     "paddusb "RED_DITHER"(%5), %%mm5      \n\t"
1313 #endif
1314
1315                     WRITERGB16(%%REGb, 8280(%5), %%REGBP)
1316                     "pop %%"REG_BP"                         \n\t"
1317                     "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1318
1319                     :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1320                     "a" (&c->redDither)
1321                 );
1322                 return;
1323             case PIX_FMT_YUYV422:
1324                 __asm__ volatile(
1325                     "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1326                     "mov        %4, %%"REG_b"               \n\t"
1327                     "push %%"REG_BP"                        \n\t"
1328                     YSCALEYUV2PACKED1(%%REGBP, %5)
1329                     WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1330                     "pop %%"REG_BP"                         \n\t"
1331                     "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1332
1333                     :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1334                     "a" (&c->redDither)
1335                 );
1336                 return;
1337             }
1338         } else {
1339             switch(dstFormat) {
1340             case PIX_FMT_RGB32:
1341                 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
1342                     __asm__ volatile(
1343                         "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1344                         "mov        %4, %%"REG_b"               \n\t"
1345                         "push %%"REG_BP"                        \n\t"
1346                         YSCALEYUV2RGB1b(%%REGBP, %5)
1347                         YSCALEYUV2RGB1_ALPHA(%%REGBP)
1348                         WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1349                         "pop %%"REG_BP"                         \n\t"
1350                         "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1351
1352                         :: "c" (buf0), "d" (abuf0), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1353                         "a" (&c->redDither)
1354                     );
1355                 } else {
1356                     __asm__ volatile(
1357                         "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1358                         "mov        %4, %%"REG_b"               \n\t"
1359                         "push %%"REG_BP"                        \n\t"
1360                         YSCALEYUV2RGB1b(%%REGBP, %5)
1361                         "pcmpeqd %%mm7, %%mm7                   \n\t"
1362                         WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1363                         "pop %%"REG_BP"                         \n\t"
1364                         "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1365
1366                         :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1367                         "a" (&c->redDither)
1368                     );
1369                 }
1370                 return;
1371             case PIX_FMT_BGR24:
1372                 __asm__ volatile(
1373                     "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1374                     "mov        %4, %%"REG_b"               \n\t"
1375                     "push %%"REG_BP"                        \n\t"
1376                     YSCALEYUV2RGB1b(%%REGBP, %5)
1377                     "pxor    %%mm7, %%mm7                   \n\t"
1378                     WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1379                     "pop %%"REG_BP"                         \n\t"
1380                     "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1381
1382                     :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1383                     "a" (&c->redDither)
1384                 );
1385                 return;
1386             case PIX_FMT_RGB555:
1387                 __asm__ volatile(
1388                     "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1389                     "mov        %4, %%"REG_b"               \n\t"
1390                     "push %%"REG_BP"                        \n\t"
1391                     YSCALEYUV2RGB1b(%%REGBP, %5)
1392                     "pxor    %%mm7, %%mm7                   \n\t"
1393                     /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1394 #ifdef DITHER1XBPP
1395                     "paddusb "BLUE_DITHER"(%5), %%mm2      \n\t"
1396                     "paddusb "GREEN_DITHER"(%5), %%mm4      \n\t"
1397                     "paddusb "RED_DITHER"(%5), %%mm5      \n\t"
1398 #endif
1399                     WRITERGB15(%%REGb, 8280(%5), %%REGBP)
1400                     "pop %%"REG_BP"                         \n\t"
1401                     "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1402
1403                     :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1404                     "a" (&c->redDither)
1405                 );
1406                 return;
1407             case PIX_FMT_RGB565:
1408                 __asm__ volatile(
1409                     "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1410                     "mov        %4, %%"REG_b"               \n\t"
1411                     "push %%"REG_BP"                        \n\t"
1412                     YSCALEYUV2RGB1b(%%REGBP, %5)
1413                     "pxor    %%mm7, %%mm7                   \n\t"
1414                     /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1415 #ifdef DITHER1XBPP
1416                     "paddusb "BLUE_DITHER"(%5), %%mm2      \n\t"
1417                     "paddusb "GREEN_DITHER"(%5), %%mm4      \n\t"
1418                     "paddusb "RED_DITHER"(%5), %%mm5      \n\t"
1419 #endif
1420
1421                     WRITERGB16(%%REGb, 8280(%5), %%REGBP)
1422                     "pop %%"REG_BP"                         \n\t"
1423                     "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1424
1425                     :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1426                     "a" (&c->redDither)
1427                 );
1428                 return;
1429             case PIX_FMT_YUYV422:
1430                 __asm__ volatile(
1431                     "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
1432                     "mov        %4, %%"REG_b"               \n\t"
1433                     "push %%"REG_BP"                        \n\t"
1434                     YSCALEYUV2PACKED1b(%%REGBP, %5)
1435                     WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1436                     "pop %%"REG_BP"                         \n\t"
1437                     "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
1438
1439                     :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1440                     "a" (&c->redDither)
1441                 );
1442                 return;
1443             }
1444         }
1445     }
1446     yuv2packed1_c(c, buf0, uvbuf0, uvbuf1, abuf0, dest,
1447                   dstW, uvalpha, dstFormat, flags, y);
1448 }
1449
1450 //FIXME yuy2* can read up to 7 samples too much
1451
1452 static inline void RENAME(yuy2ToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
1453 {
1454     __asm__ volatile(
1455         "movq "MANGLE(bm01010101)", %%mm2           \n\t"
1456         "mov                    %0, %%"REG_a"       \n\t"
1457         "1:                                         \n\t"
1458         "movq    (%1, %%"REG_a",2), %%mm0           \n\t"
1459         "movq   8(%1, %%"REG_a",2), %%mm1           \n\t"
1460         "pand                %%mm2, %%mm0           \n\t"
1461         "pand                %%mm2, %%mm1           \n\t"
1462         "packuswb            %%mm1, %%mm0           \n\t"
1463         "movq                %%mm0, (%2, %%"REG_a") \n\t"
1464         "add                    $8, %%"REG_a"       \n\t"
1465         " js                    1b                  \n\t"
1466         : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst+width)
1467         : "%"REG_a
1468     );
1469 }
1470
1471 static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
1472 {
1473     __asm__ volatile(
1474         "movq "MANGLE(bm01010101)", %%mm4           \n\t"
1475         "mov                    %0, %%"REG_a"       \n\t"
1476         "1:                                         \n\t"
1477         "movq    (%1, %%"REG_a",4), %%mm0           \n\t"
1478         "movq   8(%1, %%"REG_a",4), %%mm1           \n\t"
1479         "psrlw                  $8, %%mm0           \n\t"
1480         "psrlw                  $8, %%mm1           \n\t"
1481         "packuswb            %%mm1, %%mm0           \n\t"
1482         "movq                %%mm0, %%mm1           \n\t"
1483         "psrlw                  $8, %%mm0           \n\t"
1484         "pand                %%mm4, %%mm1           \n\t"
1485         "packuswb            %%mm0, %%mm0           \n\t"
1486         "packuswb            %%mm1, %%mm1           \n\t"
1487         "movd                %%mm0, (%3, %%"REG_a") \n\t"
1488         "movd                %%mm1, (%2, %%"REG_a") \n\t"
1489         "add                    $4, %%"REG_a"       \n\t"
1490         " js                    1b                  \n\t"
1491         : : "g" ((x86_reg)-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
1492         : "%"REG_a
1493     );
1494     assert(src1 == src2);
1495 }
1496
1497 static inline void RENAME(LEToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
1498 {
1499     __asm__ volatile(
1500         "mov                    %0, %%"REG_a"       \n\t"
1501         "1:                                         \n\t"
1502         "movq    (%1, %%"REG_a",2), %%mm0           \n\t"
1503         "movq   8(%1, %%"REG_a",2), %%mm1           \n\t"
1504         "movq    (%2, %%"REG_a",2), %%mm2           \n\t"
1505         "movq   8(%2, %%"REG_a",2), %%mm3           \n\t"
1506         "psrlw                  $8, %%mm0           \n\t"
1507         "psrlw                  $8, %%mm1           \n\t"
1508         "psrlw                  $8, %%mm2           \n\t"
1509         "psrlw                  $8, %%mm3           \n\t"
1510         "packuswb            %%mm1, %%mm0           \n\t"
1511         "packuswb            %%mm3, %%mm2           \n\t"
1512         "movq                %%mm0, (%3, %%"REG_a") \n\t"
1513         "movq                %%mm2, (%4, %%"REG_a") \n\t"
1514         "add                    $8, %%"REG_a"       \n\t"
1515         " js                    1b                  \n\t"
1516         : : "g" ((x86_reg)-width), "r" (src1+width*2), "r" (src2+width*2), "r" (dstU+width), "r" (dstV+width)
1517         : "%"REG_a
1518     );
1519 }
1520
1521 /* This is almost identical to the previous, end exists only because
1522  * yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses. */
1523 static inline void RENAME(uyvyToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
1524 {
1525     __asm__ volatile(
1526         "mov                  %0, %%"REG_a"         \n\t"
1527         "1:                                         \n\t"
1528         "movq  (%1, %%"REG_a",2), %%mm0             \n\t"
1529         "movq 8(%1, %%"REG_a",2), %%mm1             \n\t"
1530         "psrlw                $8, %%mm0             \n\t"
1531         "psrlw                $8, %%mm1             \n\t"
1532         "packuswb          %%mm1, %%mm0             \n\t"
1533         "movq              %%mm0, (%2, %%"REG_a")   \n\t"
1534         "add                  $8, %%"REG_a"         \n\t"
1535         " js                  1b                    \n\t"
1536         : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst+width)
1537         : "%"REG_a
1538     );
1539 }
1540
1541 static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
1542 {
1543     __asm__ volatile(
1544         "movq "MANGLE(bm01010101)", %%mm4           \n\t"
1545         "mov                    %0, %%"REG_a"       \n\t"
1546         "1:                                         \n\t"
1547         "movq    (%1, %%"REG_a",4), %%mm0           \n\t"
1548         "movq   8(%1, %%"REG_a",4), %%mm1           \n\t"
1549         "pand                %%mm4, %%mm0           \n\t"
1550         "pand                %%mm4, %%mm1           \n\t"
1551         "packuswb            %%mm1, %%mm0           \n\t"
1552         "movq                %%mm0, %%mm1           \n\t"
1553         "psrlw                  $8, %%mm0           \n\t"
1554         "pand                %%mm4, %%mm1           \n\t"
1555         "packuswb            %%mm0, %%mm0           \n\t"
1556         "packuswb            %%mm1, %%mm1           \n\t"
1557         "movd                %%mm0, (%3, %%"REG_a") \n\t"
1558         "movd                %%mm1, (%2, %%"REG_a") \n\t"
1559         "add                    $4, %%"REG_a"       \n\t"
1560         " js                    1b                  \n\t"
1561         : : "g" ((x86_reg)-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
1562         : "%"REG_a
1563     );
1564     assert(src1 == src2);
1565 }
1566
1567 static inline void RENAME(BEToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
1568 {
1569     __asm__ volatile(
1570         "movq "MANGLE(bm01010101)", %%mm4           \n\t"
1571         "mov                    %0, %%"REG_a"       \n\t"
1572         "1:                                         \n\t"
1573         "movq    (%1, %%"REG_a",2), %%mm0           \n\t"
1574         "movq   8(%1, %%"REG_a",2), %%mm1           \n\t"
1575         "movq    (%2, %%"REG_a",2), %%mm2           \n\t"
1576         "movq   8(%2, %%"REG_a",2), %%mm3           \n\t"
1577         "pand                %%mm4, %%mm0           \n\t"
1578         "pand                %%mm4, %%mm1           \n\t"
1579         "pand                %%mm4, %%mm2           \n\t"
1580         "pand                %%mm4, %%mm3           \n\t"
1581         "packuswb            %%mm1, %%mm0           \n\t"
1582         "packuswb            %%mm3, %%mm2           \n\t"
1583         "movq                %%mm0, (%3, %%"REG_a") \n\t"
1584         "movq                %%mm2, (%4, %%"REG_a") \n\t"
1585         "add                    $8, %%"REG_a"       \n\t"
1586         " js                    1b                  \n\t"
1587         : : "g" ((x86_reg)-width), "r" (src1+width*2), "r" (src2+width*2), "r" (dstU+width), "r" (dstV+width)
1588         : "%"REG_a
1589     );
1590 }
1591
1592 static inline void RENAME(nvXXtoUV)(uint8_t *dst1, uint8_t *dst2,
1593                                     const uint8_t *src, long width)
1594 {
1595     __asm__ volatile(
1596         "movq "MANGLE(bm01010101)", %%mm4           \n\t"
1597         "mov                    %0, %%"REG_a"       \n\t"
1598         "1:                                         \n\t"
1599         "movq    (%1, %%"REG_a",2), %%mm0           \n\t"
1600         "movq   8(%1, %%"REG_a",2), %%mm1           \n\t"
1601         "movq                %%mm0, %%mm2           \n\t"
1602         "movq                %%mm1, %%mm3           \n\t"
1603         "pand                %%mm4, %%mm0           \n\t"
1604         "pand                %%mm4, %%mm1           \n\t"
1605         "psrlw                  $8, %%mm2           \n\t"
1606         "psrlw                  $8, %%mm3           \n\t"
1607         "packuswb            %%mm1, %%mm0           \n\t"
1608         "packuswb            %%mm3, %%mm2           \n\t"
1609         "movq                %%mm0, (%2, %%"REG_a") \n\t"
1610         "movq                %%mm2, (%3, %%"REG_a") \n\t"
1611         "add                    $8, %%"REG_a"       \n\t"
1612         " js                    1b                  \n\t"
1613         : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst1+width), "r" (dst2+width)
1614         : "%"REG_a
1615     );
1616 }
1617
1618 static inline void RENAME(nv12ToUV)(uint8_t *dstU, uint8_t *dstV,
1619                                     const uint8_t *src1, const uint8_t *src2,
1620                                     long width, uint32_t *unused)
1621 {
1622     RENAME(nvXXtoUV)(dstU, dstV, src1, width);
1623 }
1624
1625 static inline void RENAME(nv21ToUV)(uint8_t *dstU, uint8_t *dstV,
1626                                     const uint8_t *src1, const uint8_t *src2,
1627                                     long width, uint32_t *unused)
1628 {
1629     RENAME(nvXXtoUV)(dstV, dstU, src1, width);
1630 }
1631
1632 static inline void RENAME(bgr24ToY_mmx)(uint8_t *dst, const uint8_t *src, long width, enum PixelFormat srcFormat)
1633 {
1634
1635     if(srcFormat == PIX_FMT_BGR24) {
1636         __asm__ volatile(
1637             "movq  "MANGLE(ff_bgr24toY1Coeff)", %%mm5       \n\t"
1638             "movq  "MANGLE(ff_bgr24toY2Coeff)", %%mm6       \n\t"
1639             :
1640         );
1641     } else {
1642         __asm__ volatile(
1643             "movq  "MANGLE(ff_rgb24toY1Coeff)", %%mm5       \n\t"
1644             "movq  "MANGLE(ff_rgb24toY2Coeff)", %%mm6       \n\t"
1645             :
1646         );
1647     }
1648
1649     __asm__ volatile(
1650         "movq  "MANGLE(ff_bgr24toYOffset)", %%mm4   \n\t"
1651         "mov                        %2, %%"REG_a"   \n\t"
1652         "pxor                    %%mm7, %%mm7       \n\t"
1653         "1:                                         \n\t"
1654         PREFETCH"               64(%0)              \n\t"
1655         "movd                     (%0), %%mm0       \n\t"
1656         "movd                    2(%0), %%mm1       \n\t"
1657         "movd                    6(%0), %%mm2       \n\t"
1658         "movd                    8(%0), %%mm3       \n\t"
1659         "add                       $12, %0          \n\t"
1660         "punpcklbw               %%mm7, %%mm0       \n\t"
1661         "punpcklbw               %%mm7, %%mm1       \n\t"
1662         "punpcklbw               %%mm7, %%mm2       \n\t"
1663         "punpcklbw               %%mm7, %%mm3       \n\t"
1664         "pmaddwd                 %%mm5, %%mm0       \n\t"
1665         "pmaddwd                 %%mm6, %%mm1       \n\t"
1666         "pmaddwd                 %%mm5, %%mm2       \n\t"
1667         "pmaddwd                 %%mm6, %%mm3       \n\t"
1668         "paddd                   %%mm1, %%mm0       \n\t"
1669         "paddd                   %%mm3, %%mm2       \n\t"
1670         "paddd                   %%mm4, %%mm0       \n\t"
1671         "paddd                   %%mm4, %%mm2       \n\t"
1672         "psrad                     $15, %%mm0       \n\t"
1673         "psrad                     $15, %%mm2       \n\t"
1674         "packssdw                %%mm2, %%mm0       \n\t"
1675         "packuswb                %%mm0, %%mm0       \n\t"
1676         "movd                %%mm0, (%1, %%"REG_a") \n\t"
1677         "add                        $4, %%"REG_a"   \n\t"
1678         " js                        1b              \n\t"
1679     : "+r" (src)
1680     : "r" (dst+width), "g" ((x86_reg)-width)
1681     : "%"REG_a
1682     );
1683 }
1684
1685 static inline void RENAME(bgr24ToUV_mmx)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src, long width, enum PixelFormat srcFormat)
1686 {
1687     __asm__ volatile(
1688         "movq                    24(%4), %%mm6       \n\t"
1689         "mov                        %3, %%"REG_a"   \n\t"
1690         "pxor                    %%mm7, %%mm7       \n\t"
1691         "1:                                         \n\t"
1692         PREFETCH"               64(%0)              \n\t"
1693         "movd                     (%0), %%mm0       \n\t"
1694         "movd                    2(%0), %%mm1       \n\t"
1695         "punpcklbw               %%mm7, %%mm0       \n\t"
1696         "punpcklbw               %%mm7, %%mm1       \n\t"
1697         "movq                    %%mm0, %%mm2       \n\t"
1698         "movq                    %%mm1, %%mm3       \n\t"
1699         "pmaddwd                  (%4), %%mm0       \n\t"
1700         "pmaddwd                 8(%4), %%mm1       \n\t"
1701         "pmaddwd                16(%4), %%mm2       \n\t"
1702         "pmaddwd                 %%mm6, %%mm3       \n\t"
1703         "paddd                   %%mm1, %%mm0       \n\t"
1704         "paddd                   %%mm3, %%mm2       \n\t"
1705
1706         "movd                    6(%0), %%mm1       \n\t"
1707         "movd                    8(%0), %%mm3       \n\t"
1708         "add                       $12, %0          \n\t"
1709         "punpcklbw               %%mm7, %%mm1       \n\t"
1710         "punpcklbw               %%mm7, %%mm3       \n\t"
1711         "movq                    %%mm1, %%mm4       \n\t"
1712         "movq                    %%mm3, %%mm5       \n\t"
1713         "pmaddwd                  (%4), %%mm1       \n\t"
1714         "pmaddwd                 8(%4), %%mm3       \n\t"
1715         "pmaddwd                16(%4), %%mm4       \n\t"
1716         "pmaddwd                 %%mm6, %%mm5       \n\t"
1717         "paddd                   %%mm3, %%mm1       \n\t"
1718         "paddd                   %%mm5, %%mm4       \n\t"
1719
1720         "movq "MANGLE(ff_bgr24toUVOffset)", %%mm3       \n\t"
1721         "paddd                   %%mm3, %%mm0       \n\t"
1722         "paddd                   %%mm3, %%mm2       \n\t"
1723         "paddd                   %%mm3, %%mm1       \n\t"
1724         "paddd                   %%mm3, %%mm4       \n\t"
1725         "psrad                     $15, %%mm0       \n\t"
1726         "psrad                     $15, %%mm2       \n\t"
1727         "psrad                     $15, %%mm1       \n\t"
1728         "psrad                     $15, %%mm4       \n\t"
1729         "packssdw                %%mm1, %%mm0       \n\t"
1730         "packssdw                %%mm4, %%mm2       \n\t"
1731         "packuswb                %%mm0, %%mm0       \n\t"
1732         "packuswb                %%mm2, %%mm2       \n\t"
1733         "movd                %%mm0, (%1, %%"REG_a") \n\t"
1734         "movd                %%mm2, (%2, %%"REG_a") \n\t"
1735         "add                        $4, %%"REG_a"   \n\t"
1736         " js                        1b              \n\t"
1737     : "+r" (src)
1738     : "r" (dstU+width), "r" (dstV+width), "g" ((x86_reg)-width), "r"(ff_bgr24toUV[srcFormat == PIX_FMT_RGB24])
1739     : "%"REG_a
1740     );
1741 }
1742
1743 static inline void RENAME(bgr24ToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
1744 {
1745     RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_BGR24);
1746 }
1747
1748 static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
1749 {
1750     RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_BGR24);
1751     assert(src1 == src2);
1752 }
1753
1754 static inline void RENAME(rgb24ToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
1755 {
1756     RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_RGB24);
1757 }
1758
1759 static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
1760 {
1761     assert(src1==src2);
1762     RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_RGB24);
1763 }
1764
1765
1766 // bilinear / bicubic scaling
1767 static inline void RENAME(hScale)(int16_t *dst, int dstW, const uint8_t *src, int srcW, int xInc,
1768                                   const int16_t *filter, const int16_t *filterPos, long filterSize)
1769 {
1770     assert(filterSize % 4 == 0 && filterSize>0);
1771     if (filterSize==4) { // Always true for upscaling, sometimes for down, too.
1772         x86_reg counter= -2*dstW;
1773         filter-= counter*2;
1774         filterPos-= counter/2;
1775         dst-= counter/2;
1776         __asm__ volatile(
1777 #if defined(PIC)
1778             "push            %%"REG_b"              \n\t"
1779 #endif
1780             "pxor                %%mm7, %%mm7       \n\t"
1781             "push           %%"REG_BP"              \n\t" // we use 7 regs here ...
1782             "mov             %%"REG_a", %%"REG_BP"  \n\t"
1783             ".p2align                4              \n\t"
1784             "1:                                     \n\t"
1785             "movzwl   (%2, %%"REG_BP"), %%eax       \n\t"
1786             "movzwl  2(%2, %%"REG_BP"), %%ebx       \n\t"
1787             "movq  (%1, %%"REG_BP", 4), %%mm1       \n\t"
1788             "movq 8(%1, %%"REG_BP", 4), %%mm3       \n\t"
1789             "movd      (%3, %%"REG_a"), %%mm0       \n\t"
1790             "movd      (%3, %%"REG_b"), %%mm2       \n\t"
1791             "punpcklbw           %%mm7, %%mm0       \n\t"
1792             "punpcklbw           %%mm7, %%mm2       \n\t"
1793             "pmaddwd             %%mm1, %%mm0       \n\t"
1794             "pmaddwd             %%mm2, %%mm3       \n\t"
1795             "movq                %%mm0, %%mm4       \n\t"
1796             "punpckldq           %%mm3, %%mm0       \n\t"
1797             "punpckhdq           %%mm3, %%mm4       \n\t"
1798             "paddd               %%mm4, %%mm0       \n\t"
1799             "psrad                  $7, %%mm0       \n\t"
1800             "packssdw            %%mm0, %%mm0       \n\t"
1801             "movd                %%mm0, (%4, %%"REG_BP")    \n\t"
1802             "add                    $4, %%"REG_BP"  \n\t"
1803             " jnc                   1b              \n\t"
1804
1805             "pop            %%"REG_BP"              \n\t"
1806 #if defined(PIC)
1807             "pop             %%"REG_b"              \n\t"
1808 #endif
1809             : "+a" (counter)
1810             : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
1811 #if !defined(PIC)
1812             : "%"REG_b
1813 #endif
1814         );
1815     } else if (filterSize==8) {
1816         x86_reg counter= -2*dstW;
1817         filter-= counter*4;
1818         filterPos-= counter/2;
1819         dst-= counter/2;
1820         __asm__ volatile(
1821 #if defined(PIC)
1822             "push             %%"REG_b"             \n\t"
1823 #endif
1824             "pxor                 %%mm7, %%mm7      \n\t"
1825             "push            %%"REG_BP"             \n\t" // we use 7 regs here ...
1826             "mov              %%"REG_a", %%"REG_BP" \n\t"
1827             ".p2align                 4             \n\t"
1828             "1:                                     \n\t"
1829             "movzwl    (%2, %%"REG_BP"), %%eax      \n\t"
1830             "movzwl   2(%2, %%"REG_BP"), %%ebx      \n\t"
1831             "movq   (%1, %%"REG_BP", 8), %%mm1      \n\t"
1832             "movq 16(%1, %%"REG_BP", 8), %%mm3      \n\t"
1833             "movd       (%3, %%"REG_a"), %%mm0      \n\t"
1834             "movd       (%3, %%"REG_b"), %%mm2      \n\t"
1835             "punpcklbw            %%mm7, %%mm0      \n\t"
1836             "punpcklbw            %%mm7, %%mm2      \n\t"
1837             "pmaddwd              %%mm1, %%mm0      \n\t"
1838             "pmaddwd              %%mm2, %%mm3      \n\t"
1839
1840             "movq  8(%1, %%"REG_BP", 8), %%mm1      \n\t"
1841             "movq 24(%1, %%"REG_BP", 8), %%mm5      \n\t"
1842             "movd      4(%3, %%"REG_a"), %%mm4      \n\t"
1843             "movd      4(%3, %%"REG_b"), %%mm2      \n\t"
1844             "punpcklbw            %%mm7, %%mm4      \n\t"
1845             "punpcklbw            %%mm7, %%mm2      \n\t"
1846             "pmaddwd              %%mm1, %%mm4      \n\t"
1847             "pmaddwd              %%mm2, %%mm5      \n\t"
1848             "paddd                %%mm4, %%mm0      \n\t"
1849             "paddd                %%mm5, %%mm3      \n\t"
1850             "movq                 %%mm0, %%mm4      \n\t"
1851             "punpckldq            %%mm3, %%mm0      \n\t"
1852             "punpckhdq            %%mm3, %%mm4      \n\t"
1853             "paddd                %%mm4, %%mm0      \n\t"
1854             "psrad                   $7, %%mm0      \n\t"
1855             "packssdw             %%mm0, %%mm0      \n\t"
1856             "movd                 %%mm0, (%4, %%"REG_BP")   \n\t"
1857             "add                     $4, %%"REG_BP" \n\t"
1858             " jnc                    1b             \n\t"
1859
1860             "pop             %%"REG_BP"             \n\t"
1861 #if defined(PIC)
1862             "pop              %%"REG_b"             \n\t"
1863 #endif
1864             : "+a" (counter)
1865             : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
1866 #if !defined(PIC)
1867             : "%"REG_b
1868 #endif
1869         );
1870     } else {
1871         const uint8_t *offset = src+filterSize;
1872         x86_reg counter= -2*dstW;
1873         //filter-= counter*filterSize/2;
1874         filterPos-= counter/2;
1875         dst-= counter/2;
1876         __asm__ volatile(
1877             "pxor                  %%mm7, %%mm7     \n\t"
1878             ".p2align                  4            \n\t"
1879             "1:                                     \n\t"
1880             "mov                      %2, %%"REG_c" \n\t"
1881             "movzwl      (%%"REG_c", %0), %%eax     \n\t"
1882             "movzwl     2(%%"REG_c", %0), %%edx     \n\t"
1883             "mov                      %5, %%"REG_c" \n\t"
1884             "pxor                  %%mm4, %%mm4     \n\t"
1885             "pxor                  %%mm5, %%mm5     \n\t"
1886             "2:                                     \n\t"
1887             "movq                   (%1), %%mm1     \n\t"
1888             "movq               (%1, %6), %%mm3     \n\t"
1889             "movd (%%"REG_c", %%"REG_a"), %%mm0     \n\t"
1890             "movd (%%"REG_c", %%"REG_d"), %%mm2     \n\t"
1891             "punpcklbw             %%mm7, %%mm0     \n\t"
1892             "punpcklbw             %%mm7, %%mm2     \n\t"
1893             "pmaddwd               %%mm1, %%mm0     \n\t"
1894             "pmaddwd               %%mm2, %%mm3     \n\t"
1895             "paddd                 %%mm3, %%mm5     \n\t"
1896             "paddd                 %%mm0, %%mm4     \n\t"
1897             "add                      $8, %1        \n\t"
1898             "add                      $4, %%"REG_c" \n\t"
1899             "cmp                      %4, %%"REG_c" \n\t"
1900             " jb                      2b            \n\t"
1901             "add                      %6, %1        \n\t"
1902             "movq                  %%mm4, %%mm0     \n\t"
1903             "punpckldq             %%mm5, %%mm4     \n\t"
1904             "punpckhdq             %%mm5, %%mm0     \n\t"
1905             "paddd                 %%mm0, %%mm4     \n\t"
1906             "psrad                    $7, %%mm4     \n\t"
1907             "packssdw              %%mm4, %%mm4     \n\t"
1908             "mov                      %3, %%"REG_a" \n\t"
1909             "movd                  %%mm4, (%%"REG_a", %0)   \n\t"
1910             "add                      $4, %0        \n\t"
1911             " jnc                     1b            \n\t"
1912
1913             : "+r" (counter), "+r" (filter)
1914             : "m" (filterPos), "m" (dst), "m"(offset),
1915             "m" (src), "r" ((x86_reg)filterSize*2)
1916             : "%"REG_a, "%"REG_c, "%"REG_d
1917         );
1918     }
1919 }
1920
1921 #if COMPILE_TEMPLATE_MMX2
1922 static inline void RENAME(hyscale_fast)(SwsContext *c, int16_t *dst,
1923                                         long dstWidth, const uint8_t *src, int srcW,
1924                                         int xInc)
1925 {
1926     int32_t *filterPos = c->hLumFilterPos;
1927     int16_t *filter    = c->hLumFilter;
1928     int     canMMX2BeUsed  = c->canMMX2BeUsed;
1929     void    *mmx2FilterCode= c->lumMmx2FilterCode;
1930     int i;
1931 #if defined(PIC)
1932     DECLARE_ALIGNED(8, uint64_t, ebxsave);
1933 #endif
1934
1935         __asm__ volatile(
1936 #if defined(PIC)
1937             "mov               %%"REG_b", %5        \n\t"
1938 #endif
1939             "pxor                  %%mm7, %%mm7     \n\t"
1940             "mov                      %0, %%"REG_c" \n\t"
1941             "mov                      %1, %%"REG_D" \n\t"
1942             "mov                      %2, %%"REG_d" \n\t"
1943             "mov                      %3, %%"REG_b" \n\t"
1944             "xor               %%"REG_a", %%"REG_a" \n\t" // i
1945             PREFETCH"        (%%"REG_c")            \n\t"
1946             PREFETCH"      32(%%"REG_c")            \n\t"
1947             PREFETCH"      64(%%"REG_c")            \n\t"
1948
1949 #if ARCH_X86_64
1950
1951 #define CALL_MMX2_FILTER_CODE \
1952             "movl            (%%"REG_b"), %%esi     \n\t"\
1953             "call                    *%4            \n\t"\
1954             "movl (%%"REG_b", %%"REG_a"), %%esi     \n\t"\
1955             "add               %%"REG_S", %%"REG_c" \n\t"\
1956             "add               %%"REG_a", %%"REG_D" \n\t"\
1957             "xor               %%"REG_a", %%"REG_a" \n\t"\
1958
1959 #else
1960
1961 #define CALL_MMX2_FILTER_CODE \
1962             "movl (%%"REG_b"), %%esi        \n\t"\
1963             "call         *%4                       \n\t"\
1964             "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
1965             "add               %%"REG_a", %%"REG_D" \n\t"\
1966             "xor               %%"REG_a", %%"REG_a" \n\t"\
1967
1968 #endif /* ARCH_X86_64 */
1969
1970             CALL_MMX2_FILTER_CODE
1971             CALL_MMX2_FILTER_CODE
1972             CALL_MMX2_FILTER_CODE
1973             CALL_MMX2_FILTER_CODE
1974             CALL_MMX2_FILTER_CODE
1975             CALL_MMX2_FILTER_CODE
1976             CALL_MMX2_FILTER_CODE
1977             CALL_MMX2_FILTER_CODE
1978
1979 #if defined(PIC)
1980             "mov                      %5, %%"REG_b" \n\t"
1981 #endif
1982             :: "m" (src), "m" (dst), "m" (filter), "m" (filterPos),
1983             "m" (mmx2FilterCode)
1984 #if defined(PIC)
1985             ,"m" (ebxsave)
1986 #endif
1987             : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
1988 #if !defined(PIC)
1989             ,"%"REG_b
1990 #endif
1991         );
1992         for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
1993 }
1994
1995 static inline void RENAME(hcscale_fast)(SwsContext *c, int16_t *dst,
1996                                         long dstWidth, const uint8_t *src1,
1997                                         const uint8_t *src2, int srcW, int xInc)
1998 {
1999     int32_t *filterPos = c->hChrFilterPos;
2000     int16_t *filter    = c->hChrFilter;
2001     int     canMMX2BeUsed  = c->canMMX2BeUsed;
2002     void    *mmx2FilterCode= c->chrMmx2FilterCode;
2003     int i;
2004 #if defined(PIC)
2005     DECLARE_ALIGNED(8, uint64_t, ebxsave);
2006 #endif
2007
2008         __asm__ volatile(
2009 #if defined(PIC)
2010             "mov          %%"REG_b", %6         \n\t"
2011 #endif
2012             "pxor             %%mm7, %%mm7      \n\t"
2013             "mov                 %0, %%"REG_c"  \n\t"
2014             "mov                 %1, %%"REG_D"  \n\t"
2015             "mov                 %2, %%"REG_d"  \n\t"
2016             "mov                 %3, %%"REG_b"  \n\t"
2017             "xor          %%"REG_a", %%"REG_a"  \n\t" // i
2018             PREFETCH"   (%%"REG_c")             \n\t"
2019             PREFETCH" 32(%%"REG_c")             \n\t"
2020             PREFETCH" 64(%%"REG_c")             \n\t"
2021
2022             CALL_MMX2_FILTER_CODE
2023             CALL_MMX2_FILTER_CODE
2024             CALL_MMX2_FILTER_CODE
2025             CALL_MMX2_FILTER_CODE
2026             "xor          %%"REG_a", %%"REG_a"  \n\t" // i
2027             "mov                 %5, %%"REG_c"  \n\t" // src
2028             "mov                 %1, %%"REG_D"  \n\t" // buf1
2029             "add              $"AV_STRINGIFY(VOF)", %%"REG_D"  \n\t"
2030             PREFETCH"   (%%"REG_c")             \n\t"
2031             PREFETCH" 32(%%"REG_c")             \n\t"
2032             PREFETCH" 64(%%"REG_c")             \n\t"
2033
2034             CALL_MMX2_FILTER_CODE
2035             CALL_MMX2_FILTER_CODE
2036             CALL_MMX2_FILTER_CODE
2037             CALL_MMX2_FILTER_CODE
2038
2039 #if defined(PIC)
2040             "mov %6, %%"REG_b"    \n\t"
2041 #endif
2042             :: "m" (src1), "m" (dst), "m" (filter), "m" (filterPos),
2043             "m" (mmx2FilterCode), "m" (src2)
2044 #if defined(PIC)
2045             ,"m" (ebxsave)
2046 #endif
2047             : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
2048 #if !defined(PIC)
2049             ,"%"REG_b
2050 #endif
2051         );
2052         for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) {
2053             //printf("%d %d %d\n", dstWidth, i, srcW);
2054             dst[i] = src1[srcW-1]*128;
2055             dst[i+VOFW] = src2[srcW-1]*128;
2056         }
2057 }
2058 #endif /* COMPILE_TEMPLATE_MMX2 */
2059
2060 #if !COMPILE_TEMPLATE_MMX2
2061 static void updateMMXDitherTables(SwsContext *c, int dstY, int lumBufIndex, int chrBufIndex,
2062                                   int lastInLumBuf, int lastInChrBuf)
2063 {
2064     const int dstH= c->dstH;
2065     const int flags= c->flags;
2066     int16_t **lumPixBuf= c->lumPixBuf;
2067     int16_t **chrPixBuf= c->chrPixBuf;
2068     int16_t **alpPixBuf= c->alpPixBuf;
2069     const int vLumBufSize= c->vLumBufSize;
2070     const int vChrBufSize= c->vChrBufSize;
2071     int16_t *vLumFilterPos= c->vLumFilterPos;
2072     int16_t *vChrFilterPos= c->vChrFilterPos;
2073     int16_t *vLumFilter= c->vLumFilter;
2074     int16_t *vChrFilter= c->vChrFilter;
2075     int32_t *lumMmxFilter= c->lumMmxFilter;
2076     int32_t *chrMmxFilter= c->chrMmxFilter;
2077     int32_t av_unused *alpMmxFilter= c->alpMmxFilter;
2078     const int vLumFilterSize= c->vLumFilterSize;
2079     const int vChrFilterSize= c->vChrFilterSize;
2080     const int chrDstY= dstY>>c->chrDstVSubSample;
2081     const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
2082     const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
2083
2084         c->blueDither= ff_dither8[dstY&1];
2085         if (c->dstFormat == PIX_FMT_RGB555 || c->dstFormat == PIX_FMT_BGR555)
2086             c->greenDither= ff_dither8[dstY&1];
2087         else
2088             c->greenDither= ff_dither4[dstY&1];
2089         c->redDither= ff_dither8[(dstY+1)&1];
2090         if (dstY < dstH - 2) {
2091             const int16_t **lumSrcPtr= (const int16_t **) lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
2092             const int16_t **chrSrcPtr= (const int16_t **) chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
2093             const int16_t **alpSrcPtr= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? (const int16_t **) alpPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize : NULL;
2094             int i;
2095             if (flags & SWS_ACCURATE_RND) {
2096                 int s= APCK_SIZE / 8;
2097                 for (i=0; i<vLumFilterSize; i+=2) {
2098                     *(const void**)&lumMmxFilter[s*i              ]= lumSrcPtr[i  ];
2099                     *(const void**)&lumMmxFilter[s*i+APCK_PTR2/4  ]= lumSrcPtr[i+(vLumFilterSize>1)];
2100                               lumMmxFilter[s*i+APCK_COEF/4  ]=
2101                               lumMmxFilter[s*i+APCK_COEF/4+1]= vLumFilter[dstY*vLumFilterSize + i    ]
2102                         + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0);
2103                     if (CONFIG_SWSCALE_ALPHA && alpPixBuf) {
2104                         *(const void**)&alpMmxFilter[s*i              ]= alpSrcPtr[i  ];
2105                         *(const void**)&alpMmxFilter[s*i+APCK_PTR2/4  ]= alpSrcPtr[i+(vLumFilterSize>1)];
2106                                   alpMmxFilter[s*i+APCK_COEF/4  ]=
2107                                   alpMmxFilter[s*i+APCK_COEF/4+1]= lumMmxFilter[s*i+APCK_COEF/4  ];
2108                     }
2109                 }
2110                 for (i=0; i<vChrFilterSize; i+=2) {
2111                     *(const void**)&chrMmxFilter[s*i              ]= chrSrcPtr[i  ];
2112                     *(const void**)&chrMmxFilter[s*i+APCK_PTR2/4  ]= chrSrcPtr[i+(vChrFilterSize>1)];
2113                               chrMmxFilter[s*i+APCK_COEF/4  ]=
2114                               chrMmxFilter[s*i+APCK_COEF/4+1]= vChrFilter[chrDstY*vChrFilterSize + i    ]
2115                         + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0);
2116                 }
2117             } else {
2118                 for (i=0; i<vLumFilterSize; i++) {
2119                     lumMmxFilter[4*i+0]= (int32_t)lumSrcPtr[i];
2120                     lumMmxFilter[4*i+1]= (uint64_t)lumSrcPtr[i] >> 32;
2121                     lumMmxFilter[4*i+2]=
2122                     lumMmxFilter[4*i+3]=
2123                         ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001;
2124                     if (CONFIG_SWSCALE_ALPHA && alpPixBuf) {
2125                         alpMmxFilter[4*i+0]= (int32_t)alpSrcPtr[i];
2126                         alpMmxFilter[4*i+1]= (uint64_t)alpSrcPtr[i] >> 32;
2127                         alpMmxFilter[4*i+2]=
2128                         alpMmxFilter[4*i+3]= lumMmxFilter[4*i+2];
2129                     }
2130                 }
2131                 for (i=0; i<vChrFilterSize; i++) {
2132                     chrMmxFilter[4*i+0]= (int32_t)chrSrcPtr[i];
2133                     chrMmxFilter[4*i+1]= (uint64_t)chrSrcPtr[i] >> 32;
2134                     chrMmxFilter[4*i+2]=
2135                     chrMmxFilter[4*i+3]=
2136                         ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001;
2137                 }
2138             }
2139         }
2140 }
2141 #endif /* !COMPILE_TEMPLATE_MMX2 */
2142
2143 static void RENAME(sws_init_swScale)(SwsContext *c)
2144 {
2145     enum PixelFormat srcFormat = c->srcFormat;
2146
2147     c->yuv2yuv1     = RENAME(yuv2yuv1    );
2148     c->yuv2yuvX     = RENAME(yuv2yuvX    );
2149     c->yuv2packed1  = RENAME(yuv2packed1 );
2150     c->yuv2packed2  = RENAME(yuv2packed2 );
2151     c->yuv2packedX  = RENAME(yuv2packedX );
2152
2153     c->hScale       = RENAME(hScale      );
2154
2155     // Use the new MMX scaler if the MMX2 one can't be used (it is faster than the x86 ASM one).
2156 #if COMPILE_TEMPLATE_MMX2
2157     if (c->flags & SWS_FAST_BILINEAR && c->canMMX2BeUsed)
2158     {
2159         c->hyscale_fast = RENAME(hyscale_fast);
2160         c->hcscale_fast = RENAME(hcscale_fast);
2161     } else {
2162 #endif /* COMPILE_TEMPLATE_MMX2 */
2163         c->hyscale_fast = NULL;
2164         c->hcscale_fast = NULL;
2165 #if COMPILE_TEMPLATE_MMX2
2166     }
2167 #endif /* COMPILE_TEMPLATE_MMX2 */
2168
2169     switch(srcFormat) {
2170         case PIX_FMT_YUYV422  : c->chrToYV12 = RENAME(yuy2ToUV); break;
2171         case PIX_FMT_UYVY422  : c->chrToYV12 = RENAME(uyvyToUV); break;
2172         case PIX_FMT_NV12     : c->chrToYV12 = RENAME(nv12ToUV); break;
2173         case PIX_FMT_NV21     : c->chrToYV12 = RENAME(nv21ToUV); break;
2174         case PIX_FMT_YUV420P16BE:
2175         case PIX_FMT_YUV422P16BE:
2176         case PIX_FMT_YUV444P16BE: c->chrToYV12 = RENAME(BEToUV); break;
2177         case PIX_FMT_YUV420P16LE:
2178         case PIX_FMT_YUV422P16LE:
2179         case PIX_FMT_YUV444P16LE: c->chrToYV12 = RENAME(LEToUV); break;
2180         default: break;
2181     }
2182     if (!c->chrSrcHSubSample) {
2183         switch(srcFormat) {
2184         case PIX_FMT_BGR24  : c->chrToYV12 = RENAME(bgr24ToUV); break;
2185         case PIX_FMT_RGB24  : c->chrToYV12 = RENAME(rgb24ToUV); break;
2186         default: break;
2187         }
2188     }
2189
2190     switch (srcFormat) {
2191     case PIX_FMT_YUYV422  :
2192     case PIX_FMT_YUV420P16BE:
2193     case PIX_FMT_YUV422P16BE:
2194     case PIX_FMT_YUV444P16BE:
2195     case PIX_FMT_Y400A    :
2196     case PIX_FMT_GRAY16BE : c->lumToYV12 = RENAME(yuy2ToY); break;
2197     case PIX_FMT_UYVY422  :
2198     case PIX_FMT_YUV420P16LE:
2199     case PIX_FMT_YUV422P16LE:
2200     case PIX_FMT_YUV444P16LE:
2201     case PIX_FMT_GRAY16LE : c->lumToYV12 = RENAME(uyvyToY); break;
2202     case PIX_FMT_BGR24    : c->lumToYV12 = RENAME(bgr24ToY); break;
2203     case PIX_FMT_RGB24    : c->lumToYV12 = RENAME(rgb24ToY); break;
2204     default: break;
2205     }
2206     if (c->alpPixBuf) {
2207         switch (srcFormat) {
2208         case PIX_FMT_Y400A  : c->alpToYV12 = RENAME(yuy2ToY); break;
2209         default: break;
2210         }
2211     }
2212 }