Change license headers to say 'FFmpeg' instead of 'this program'.
[ffmpeg.git] / libswscale / rgb2rgb_template.c
1 /*
2  *
3  *  rgb2rgb.c, Software RGB to RGB convertor
4  *  pluralize by Software PAL8 to RGB convertor
5  *               Software YUV to YUV convertor
6  *               Software YUV to RGB convertor
7  *  Written by Nick Kurshev.
8  *  palette & YUV & runtime CPU stuff by Michael (michaelni@gmx.at)
9  *  lot of big-endian byteorder fixes by Alex Beregszaszi
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27
28 #include <stddef.h>
29 #include <inttypes.h> /* for __WORDSIZE */
30
31 #ifndef __WORDSIZE
32 // #warning You have misconfigured system and probably will lose performance!
33 #define __WORDSIZE MP_WORDSIZE
34 #endif
35
36 #undef PREFETCH
37 #undef MOVNTQ
38 #undef EMMS
39 #undef SFENCE
40 #undef MMREG_SIZE
41 #undef PREFETCHW
42 #undef PAVGB
43
44 #ifdef HAVE_SSE2
45 #define MMREG_SIZE 16
46 #else
47 #define MMREG_SIZE 8
48 #endif
49
50 #ifdef HAVE_3DNOW
51 #define PREFETCH  "prefetch"
52 #define PREFETCHW "prefetchw"
53 #define PAVGB     "pavgusb"
54 #elif defined ( HAVE_MMX2 )
55 #define PREFETCH "prefetchnta"
56 #define PREFETCHW "prefetcht0"
57 #define PAVGB     "pavgb"
58 #else
59 #ifdef __APPLE__
60 #define PREFETCH "#"
61 #define PREFETCHW "#"
62 #else
63 #define PREFETCH "/nop"
64 #define PREFETCHW "/nop"
65 #endif
66 #endif
67
68 #ifdef HAVE_3DNOW
69 /* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */
70 #define EMMS     "femms"
71 #else
72 #define EMMS     "emms"
73 #endif
74
75 #ifdef HAVE_MMX2
76 #define MOVNTQ "movntq"
77 #define SFENCE "sfence"
78 #else
79 #define MOVNTQ "movq"
80 #ifdef __APPLE__
81 #define SFENCE "#"
82 #else
83 #define SFENCE "/nop"
84 #endif
85 #endif
86
87 static inline void RENAME(rgb24to32)(const uint8_t *src,uint8_t *dst,long src_size)
88 {
89   uint8_t *dest = dst;
90   const uint8_t *s = src;
91   const uint8_t *end;
92 #ifdef HAVE_MMX
93   const uint8_t *mm_end;
94 #endif
95   end = s + src_size;
96 #ifdef HAVE_MMX
97   __asm __volatile(PREFETCH"    %0"::"m"(*s):"memory");
98   mm_end = end - 23;
99   __asm __volatile("movq        %0, %%mm7"::"m"(mask32):"memory");
100   while(s < mm_end)
101   {
102     __asm __volatile(
103         PREFETCH"       32%1\n\t"
104         "movd   %1, %%mm0\n\t"
105         "punpckldq 3%1, %%mm0\n\t"
106         "movd   6%1, %%mm1\n\t"
107         "punpckldq 9%1, %%mm1\n\t"
108         "movd   12%1, %%mm2\n\t"
109         "punpckldq 15%1, %%mm2\n\t"
110         "movd   18%1, %%mm3\n\t"
111         "punpckldq 21%1, %%mm3\n\t"
112         "pand   %%mm7, %%mm0\n\t"
113         "pand   %%mm7, %%mm1\n\t"
114         "pand   %%mm7, %%mm2\n\t"
115         "pand   %%mm7, %%mm3\n\t"
116         MOVNTQ" %%mm0, %0\n\t"
117         MOVNTQ" %%mm1, 8%0\n\t"
118         MOVNTQ" %%mm2, 16%0\n\t"
119         MOVNTQ" %%mm3, 24%0"
120         :"=m"(*dest)
121         :"m"(*s)
122         :"memory");
123     dest += 32;
124     s += 24;
125   }
126   __asm __volatile(SFENCE:::"memory");
127   __asm __volatile(EMMS:::"memory");
128 #endif
129   while(s < end)
130   {
131 #ifdef WORDS_BIGENDIAN
132     /* RGB24 (= R,G,B) -> RGB32 (= A,B,G,R) */
133     *dest++ = 0;
134     *dest++ = s[2];
135     *dest++ = s[1];
136     *dest++ = s[0];
137     s+=3;
138 #else
139     *dest++ = *s++;
140     *dest++ = *s++;
141     *dest++ = *s++;
142     *dest++ = 0;
143 #endif
144   }
145 }
146
147 static inline void RENAME(rgb32to24)(const uint8_t *src,uint8_t *dst,long src_size)
148 {
149   uint8_t *dest = dst;
150   const uint8_t *s = src;
151   const uint8_t *end;
152 #ifdef HAVE_MMX
153   const uint8_t *mm_end;
154 #endif
155   end = s + src_size;
156 #ifdef HAVE_MMX
157   __asm __volatile(PREFETCH"    %0"::"m"(*s):"memory");
158   mm_end = end - 31;
159   while(s < mm_end)
160   {
161     __asm __volatile(
162         PREFETCH"       32%1\n\t"
163         "movq   %1, %%mm0\n\t"
164         "movq   8%1, %%mm1\n\t"
165         "movq   16%1, %%mm4\n\t"
166         "movq   24%1, %%mm5\n\t"
167         "movq   %%mm0, %%mm2\n\t"
168         "movq   %%mm1, %%mm3\n\t"
169         "movq   %%mm4, %%mm6\n\t"
170         "movq   %%mm5, %%mm7\n\t"
171         "psrlq  $8, %%mm2\n\t"
172         "psrlq  $8, %%mm3\n\t"
173         "psrlq  $8, %%mm6\n\t"
174         "psrlq  $8, %%mm7\n\t"
175         "pand   %2, %%mm0\n\t"
176         "pand   %2, %%mm1\n\t"
177         "pand   %2, %%mm4\n\t"
178         "pand   %2, %%mm5\n\t"
179         "pand   %3, %%mm2\n\t"
180         "pand   %3, %%mm3\n\t"
181         "pand   %3, %%mm6\n\t"
182         "pand   %3, %%mm7\n\t"
183         "por    %%mm2, %%mm0\n\t"
184         "por    %%mm3, %%mm1\n\t"
185         "por    %%mm6, %%mm4\n\t"
186         "por    %%mm7, %%mm5\n\t"
187
188         "movq   %%mm1, %%mm2\n\t"
189         "movq   %%mm4, %%mm3\n\t"
190         "psllq  $48, %%mm2\n\t"
191         "psllq  $32, %%mm3\n\t"
192         "pand   %4, %%mm2\n\t"
193         "pand   %5, %%mm3\n\t"
194         "por    %%mm2, %%mm0\n\t"
195         "psrlq  $16, %%mm1\n\t"
196         "psrlq  $32, %%mm4\n\t"
197         "psllq  $16, %%mm5\n\t"
198         "por    %%mm3, %%mm1\n\t"
199         "pand   %6, %%mm5\n\t"
200         "por    %%mm5, %%mm4\n\t"
201
202         MOVNTQ" %%mm0, %0\n\t"
203         MOVNTQ" %%mm1, 8%0\n\t"
204         MOVNTQ" %%mm4, 16%0"
205         :"=m"(*dest)
206         :"m"(*s),"m"(mask24l),
207          "m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh)
208         :"memory");
209     dest += 24;
210     s += 32;
211   }
212   __asm __volatile(SFENCE:::"memory");
213   __asm __volatile(EMMS:::"memory");
214 #endif
215   while(s < end)
216   {
217 #ifdef WORDS_BIGENDIAN
218     /* RGB32 (= A,B,G,R) -> RGB24 (= R,G,B) */
219     s++;
220     dest[2] = *s++;
221     dest[1] = *s++;
222     dest[0] = *s++;
223     dest += 3;
224 #else
225     *dest++ = *s++;
226     *dest++ = *s++;
227     *dest++ = *s++;
228     s++;
229 #endif
230   }
231 }
232
233 /*
234  Original by Strepto/Astral
235  ported to gcc & bugfixed : A'rpi
236  MMX2, 3DNOW optimization by Nick Kurshev
237  32bit c version, and and&add trick by Michael Niedermayer
238 */
239 static inline void RENAME(rgb15to16)(const uint8_t *src,uint8_t *dst,long src_size)
240 {
241   register const uint8_t* s=src;
242   register uint8_t* d=dst;
243   register const uint8_t *end;
244   const uint8_t *mm_end;
245   end = s + src_size;
246 #ifdef HAVE_MMX
247   __asm __volatile(PREFETCH"    %0"::"m"(*s));
248   __asm __volatile("movq        %0, %%mm4"::"m"(mask15s));
249   mm_end = end - 15;
250   while(s<mm_end)
251   {
252         __asm __volatile(
253                 PREFETCH"       32%1\n\t"
254                 "movq   %1, %%mm0\n\t"
255                 "movq   8%1, %%mm2\n\t"
256                 "movq   %%mm0, %%mm1\n\t"
257                 "movq   %%mm2, %%mm3\n\t"
258                 "pand   %%mm4, %%mm0\n\t"
259                 "pand   %%mm4, %%mm2\n\t"
260                 "paddw  %%mm1, %%mm0\n\t"
261                 "paddw  %%mm3, %%mm2\n\t"
262                 MOVNTQ" %%mm0, %0\n\t"
263                 MOVNTQ" %%mm2, 8%0"
264                 :"=m"(*d)
265                 :"m"(*s)
266                 );
267         d+=16;
268         s+=16;
269   }
270   __asm __volatile(SFENCE:::"memory");
271   __asm __volatile(EMMS:::"memory");
272 #endif
273     mm_end = end - 3;
274     while(s < mm_end)
275     {
276         register unsigned x= *((uint32_t *)s);
277         *((uint32_t *)d) = (x&0x7FFF7FFF) + (x&0x7FE07FE0);
278         d+=4;
279         s+=4;
280     }
281     if(s < end)
282     {
283         register unsigned short x= *((uint16_t *)s);
284         *((uint16_t *)d) = (x&0x7FFF) + (x&0x7FE0);
285     }
286 }
287
288 static inline void RENAME(rgb16to15)(const uint8_t *src,uint8_t *dst,long src_size)
289 {
290   register const uint8_t* s=src;
291   register uint8_t* d=dst;
292   register const uint8_t *end;
293   const uint8_t *mm_end;
294   end = s + src_size;
295 #ifdef HAVE_MMX
296   __asm __volatile(PREFETCH"    %0"::"m"(*s));
297   __asm __volatile("movq        %0, %%mm7"::"m"(mask15rg));
298   __asm __volatile("movq        %0, %%mm6"::"m"(mask15b));
299   mm_end = end - 15;
300   while(s<mm_end)
301   {
302         __asm __volatile(
303                 PREFETCH"       32%1\n\t"
304                 "movq   %1, %%mm0\n\t"
305                 "movq   8%1, %%mm2\n\t"
306                 "movq   %%mm0, %%mm1\n\t"
307                 "movq   %%mm2, %%mm3\n\t"
308                 "psrlq  $1, %%mm0\n\t"
309                 "psrlq  $1, %%mm2\n\t"
310                 "pand   %%mm7, %%mm0\n\t"
311                 "pand   %%mm7, %%mm2\n\t"
312                 "pand   %%mm6, %%mm1\n\t"
313                 "pand   %%mm6, %%mm3\n\t"
314                 "por    %%mm1, %%mm0\n\t"
315                 "por    %%mm3, %%mm2\n\t"
316                 MOVNTQ" %%mm0, %0\n\t"
317                 MOVNTQ" %%mm2, 8%0"
318                 :"=m"(*d)
319                 :"m"(*s)
320                 );
321         d+=16;
322         s+=16;
323   }
324   __asm __volatile(SFENCE:::"memory");
325   __asm __volatile(EMMS:::"memory");
326 #endif
327     mm_end = end - 3;
328     while(s < mm_end)
329     {
330         register uint32_t x= *((uint32_t *)s);
331         *((uint32_t *)d) = ((x>>1)&0x7FE07FE0) | (x&0x001F001F);
332         s+=4;
333         d+=4;
334     }
335     if(s < end)
336     {
337         register uint16_t x= *((uint16_t *)s);
338         *((uint16_t *)d) = ((x>>1)&0x7FE0) | (x&0x001F);
339         s+=2;
340         d+=2;
341     }
342 }
343
344 static inline void RENAME(rgb32to16)(const uint8_t *src, uint8_t *dst, long src_size)
345 {
346         const uint8_t *s = src;
347         const uint8_t *end;
348 #ifdef HAVE_MMX
349         const uint8_t *mm_end;
350 #endif
351         uint16_t *d = (uint16_t *)dst;
352         end = s + src_size;
353 #ifdef HAVE_MMX
354         mm_end = end - 15;
355 #if 1 //is faster only if multiplies are reasonable fast (FIXME figure out on which cpus this is faster, on Athlon its slightly faster)
356         asm volatile(
357                 "movq %3, %%mm5                 \n\t"
358                 "movq %4, %%mm6                 \n\t"
359                 "movq %5, %%mm7                 \n\t"
360                 ASMALIGN(4)
361                 "1:                             \n\t"
362                 PREFETCH" 32(%1)                \n\t"
363                 "movd   (%1), %%mm0             \n\t"
364                 "movd   4(%1), %%mm3            \n\t"
365                 "punpckldq 8(%1), %%mm0         \n\t"
366                 "punpckldq 12(%1), %%mm3        \n\t"
367                 "movq %%mm0, %%mm1              \n\t"
368                 "movq %%mm3, %%mm4              \n\t"
369                 "pand %%mm6, %%mm0              \n\t"
370                 "pand %%mm6, %%mm3              \n\t"
371                 "pmaddwd %%mm7, %%mm0           \n\t"
372                 "pmaddwd %%mm7, %%mm3           \n\t"
373                 "pand %%mm5, %%mm1              \n\t"
374                 "pand %%mm5, %%mm4              \n\t"
375                 "por %%mm1, %%mm0               \n\t"   
376                 "por %%mm4, %%mm3               \n\t"
377                 "psrld $5, %%mm0                \n\t"
378                 "pslld $11, %%mm3               \n\t"
379                 "por %%mm3, %%mm0               \n\t"
380                 MOVNTQ" %%mm0, (%0)             \n\t"
381                 "add $16, %1                    \n\t"
382                 "add $8, %0                     \n\t"
383                 "cmp %2, %1                     \n\t"
384                 " jb 1b                         \n\t"
385                 : "+r" (d), "+r"(s)
386                 : "r" (mm_end), "m" (mask3216g), "m" (mask3216br), "m" (mul3216)
387         );
388 #else
389         __asm __volatile(PREFETCH"      %0"::"m"(*src):"memory");
390         __asm __volatile(
391             "movq       %0, %%mm7\n\t"
392             "movq       %1, %%mm6\n\t"
393             ::"m"(red_16mask),"m"(green_16mask));
394         while(s < mm_end)
395         {
396             __asm __volatile(
397                 PREFETCH" 32%1\n\t"
398                 "movd   %1, %%mm0\n\t"
399                 "movd   4%1, %%mm3\n\t"
400                 "punpckldq 8%1, %%mm0\n\t"
401                 "punpckldq 12%1, %%mm3\n\t"
402                 "movq   %%mm0, %%mm1\n\t"
403                 "movq   %%mm0, %%mm2\n\t"
404                 "movq   %%mm3, %%mm4\n\t"
405                 "movq   %%mm3, %%mm5\n\t"
406                 "psrlq  $3, %%mm0\n\t"
407                 "psrlq  $3, %%mm3\n\t"
408                 "pand   %2, %%mm0\n\t"
409                 "pand   %2, %%mm3\n\t"
410                 "psrlq  $5, %%mm1\n\t"
411                 "psrlq  $5, %%mm4\n\t"
412                 "pand   %%mm6, %%mm1\n\t"
413                 "pand   %%mm6, %%mm4\n\t"
414                 "psrlq  $8, %%mm2\n\t"
415                 "psrlq  $8, %%mm5\n\t"
416                 "pand   %%mm7, %%mm2\n\t"
417                 "pand   %%mm7, %%mm5\n\t"
418                 "por    %%mm1, %%mm0\n\t"
419                 "por    %%mm4, %%mm3\n\t"
420                 "por    %%mm2, %%mm0\n\t"
421                 "por    %%mm5, %%mm3\n\t"
422                 "psllq  $16, %%mm3\n\t"
423                 "por    %%mm3, %%mm0\n\t"
424                 MOVNTQ" %%mm0, %0\n\t"
425                 :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
426                 d += 4;
427                 s += 16;
428         }
429 #endif
430         __asm __volatile(SFENCE:::"memory");
431         __asm __volatile(EMMS:::"memory");
432 #endif
433         while(s < end)
434         {
435                 register int rgb = *(uint32_t*)s; s += 4;
436                 *d++ = ((rgb&0xFF)>>3) + ((rgb&0xFC00)>>5) + ((rgb&0xF80000)>>8);
437         }
438 }
439
440 static inline void RENAME(rgb32tobgr16)(const uint8_t *src, uint8_t *dst, long src_size)
441 {
442         const uint8_t *s = src;
443         const uint8_t *end;
444 #ifdef HAVE_MMX
445         const uint8_t *mm_end;
446 #endif
447         uint16_t *d = (uint16_t *)dst;
448         end = s + src_size;
449 #ifdef HAVE_MMX
450         __asm __volatile(PREFETCH"      %0"::"m"(*src):"memory");
451         __asm __volatile(
452             "movq       %0, %%mm7\n\t"
453             "movq       %1, %%mm6\n\t"
454             ::"m"(red_16mask),"m"(green_16mask));
455         mm_end = end - 15;
456         while(s < mm_end)
457         {
458             __asm __volatile(
459                 PREFETCH" 32%1\n\t"
460                 "movd   %1, %%mm0\n\t"
461                 "movd   4%1, %%mm3\n\t"
462                 "punpckldq 8%1, %%mm0\n\t"
463                 "punpckldq 12%1, %%mm3\n\t"
464                 "movq   %%mm0, %%mm1\n\t"
465                 "movq   %%mm0, %%mm2\n\t"
466                 "movq   %%mm3, %%mm4\n\t"
467                 "movq   %%mm3, %%mm5\n\t"
468                 "psllq  $8, %%mm0\n\t"
469                 "psllq  $8, %%mm3\n\t"
470                 "pand   %%mm7, %%mm0\n\t"
471                 "pand   %%mm7, %%mm3\n\t"
472                 "psrlq  $5, %%mm1\n\t"
473                 "psrlq  $5, %%mm4\n\t"
474                 "pand   %%mm6, %%mm1\n\t"
475                 "pand   %%mm6, %%mm4\n\t"
476                 "psrlq  $19, %%mm2\n\t"
477                 "psrlq  $19, %%mm5\n\t"
478                 "pand   %2, %%mm2\n\t"
479                 "pand   %2, %%mm5\n\t"
480                 "por    %%mm1, %%mm0\n\t"
481                 "por    %%mm4, %%mm3\n\t"
482                 "por    %%mm2, %%mm0\n\t"
483                 "por    %%mm5, %%mm3\n\t"
484                 "psllq  $16, %%mm3\n\t"
485                 "por    %%mm3, %%mm0\n\t"
486                 MOVNTQ" %%mm0, %0\n\t"
487                 :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
488                 d += 4;
489                 s += 16;
490         }
491         __asm __volatile(SFENCE:::"memory");
492         __asm __volatile(EMMS:::"memory");
493 #endif
494         while(s < end)
495         {
496                 register int rgb = *(uint32_t*)s; s += 4;
497                 *d++ = ((rgb&0xF8)<<8) + ((rgb&0xFC00)>>5) + ((rgb&0xF80000)>>19);
498         }
499 }
500
501 static inline void RENAME(rgb32to15)(const uint8_t *src, uint8_t *dst, long src_size)
502 {
503         const uint8_t *s = src;
504         const uint8_t *end;
505 #ifdef HAVE_MMX
506         const uint8_t *mm_end;
507 #endif
508         uint16_t *d = (uint16_t *)dst;
509         end = s + src_size;
510 #ifdef HAVE_MMX
511         mm_end = end - 15;
512 #if 1 //is faster only if multiplies are reasonable fast (FIXME figure out on which cpus this is faster, on Athlon its slightly faster)
513         asm volatile(
514                 "movq %3, %%mm5                 \n\t"
515                 "movq %4, %%mm6                 \n\t"
516                 "movq %5, %%mm7                 \n\t"
517                 ASMALIGN(4)
518                 "1:                             \n\t"
519                 PREFETCH" 32(%1)                \n\t"
520                 "movd   (%1), %%mm0             \n\t"
521                 "movd   4(%1), %%mm3            \n\t"
522                 "punpckldq 8(%1), %%mm0         \n\t"
523                 "punpckldq 12(%1), %%mm3        \n\t"
524                 "movq %%mm0, %%mm1              \n\t"
525                 "movq %%mm3, %%mm4              \n\t"
526                 "pand %%mm6, %%mm0              \n\t"
527                 "pand %%mm6, %%mm3              \n\t"
528                 "pmaddwd %%mm7, %%mm0           \n\t"
529                 "pmaddwd %%mm7, %%mm3           \n\t"
530                 "pand %%mm5, %%mm1              \n\t"
531                 "pand %%mm5, %%mm4              \n\t"
532                 "por %%mm1, %%mm0               \n\t"   
533                 "por %%mm4, %%mm3               \n\t"
534                 "psrld $6, %%mm0                \n\t"
535                 "pslld $10, %%mm3               \n\t"
536                 "por %%mm3, %%mm0               \n\t"
537                 MOVNTQ" %%mm0, (%0)             \n\t"
538                 "add $16, %1                    \n\t"
539                 "add $8, %0                     \n\t"
540                 "cmp %2, %1                     \n\t"
541                 " jb 1b                         \n\t"
542                 : "+r" (d), "+r"(s)
543                 : "r" (mm_end), "m" (mask3215g), "m" (mask3216br), "m" (mul3215)
544         );
545 #else
546         __asm __volatile(PREFETCH"      %0"::"m"(*src):"memory");
547         __asm __volatile(
548             "movq       %0, %%mm7\n\t"
549             "movq       %1, %%mm6\n\t"
550             ::"m"(red_15mask),"m"(green_15mask));
551         while(s < mm_end)
552         {
553             __asm __volatile(
554                 PREFETCH" 32%1\n\t"
555                 "movd   %1, %%mm0\n\t"
556                 "movd   4%1, %%mm3\n\t"
557                 "punpckldq 8%1, %%mm0\n\t"
558                 "punpckldq 12%1, %%mm3\n\t"
559                 "movq   %%mm0, %%mm1\n\t"
560                 "movq   %%mm0, %%mm2\n\t"
561                 "movq   %%mm3, %%mm4\n\t"
562                 "movq   %%mm3, %%mm5\n\t"
563                 "psrlq  $3, %%mm0\n\t"
564                 "psrlq  $3, %%mm3\n\t"
565                 "pand   %2, %%mm0\n\t"
566                 "pand   %2, %%mm3\n\t"
567                 "psrlq  $6, %%mm1\n\t"
568                 "psrlq  $6, %%mm4\n\t"
569                 "pand   %%mm6, %%mm1\n\t"
570                 "pand   %%mm6, %%mm4\n\t"
571                 "psrlq  $9, %%mm2\n\t"
572                 "psrlq  $9, %%mm5\n\t"
573                 "pand   %%mm7, %%mm2\n\t"
574                 "pand   %%mm7, %%mm5\n\t"
575                 "por    %%mm1, %%mm0\n\t"
576                 "por    %%mm4, %%mm3\n\t"
577                 "por    %%mm2, %%mm0\n\t"
578                 "por    %%mm5, %%mm3\n\t"
579                 "psllq  $16, %%mm3\n\t"
580                 "por    %%mm3, %%mm0\n\t"
581                 MOVNTQ" %%mm0, %0\n\t"
582                 :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
583                 d += 4;
584                 s += 16;
585         }
586 #endif
587         __asm __volatile(SFENCE:::"memory");
588         __asm __volatile(EMMS:::"memory");
589 #endif
590         while(s < end)
591         {
592                 register int rgb = *(uint32_t*)s; s += 4;
593                 *d++ = ((rgb&0xFF)>>3) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>9);
594         }
595 }
596
597 static inline void RENAME(rgb32tobgr15)(const uint8_t *src, uint8_t *dst, long src_size)
598 {
599         const uint8_t *s = src;
600         const uint8_t *end;
601 #ifdef HAVE_MMX
602         const uint8_t *mm_end;
603 #endif
604         uint16_t *d = (uint16_t *)dst;
605         end = s + src_size;
606 #ifdef HAVE_MMX
607         __asm __volatile(PREFETCH"      %0"::"m"(*src):"memory");
608         __asm __volatile(
609             "movq       %0, %%mm7\n\t"
610             "movq       %1, %%mm6\n\t"
611             ::"m"(red_15mask),"m"(green_15mask));
612         mm_end = end - 15;
613         while(s < mm_end)
614         {
615             __asm __volatile(
616                 PREFETCH" 32%1\n\t"
617                 "movd   %1, %%mm0\n\t"
618                 "movd   4%1, %%mm3\n\t"
619                 "punpckldq 8%1, %%mm0\n\t"
620                 "punpckldq 12%1, %%mm3\n\t"
621                 "movq   %%mm0, %%mm1\n\t"
622                 "movq   %%mm0, %%mm2\n\t"
623                 "movq   %%mm3, %%mm4\n\t"
624                 "movq   %%mm3, %%mm5\n\t"
625                 "psllq  $7, %%mm0\n\t"
626                 "psllq  $7, %%mm3\n\t"
627                 "pand   %%mm7, %%mm0\n\t"
628                 "pand   %%mm7, %%mm3\n\t"
629                 "psrlq  $6, %%mm1\n\t"
630                 "psrlq  $6, %%mm4\n\t"
631                 "pand   %%mm6, %%mm1\n\t"
632                 "pand   %%mm6, %%mm4\n\t"
633                 "psrlq  $19, %%mm2\n\t"
634                 "psrlq  $19, %%mm5\n\t"
635                 "pand   %2, %%mm2\n\t"
636                 "pand   %2, %%mm5\n\t"
637                 "por    %%mm1, %%mm0\n\t"
638                 "por    %%mm4, %%mm3\n\t"
639                 "por    %%mm2, %%mm0\n\t"
640                 "por    %%mm5, %%mm3\n\t"
641                 "psllq  $16, %%mm3\n\t"
642                 "por    %%mm3, %%mm0\n\t"
643                 MOVNTQ" %%mm0, %0\n\t"
644                 :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
645                 d += 4;
646                 s += 16;
647         }
648         __asm __volatile(SFENCE:::"memory");
649         __asm __volatile(EMMS:::"memory");
650 #endif
651         while(s < end)
652         {
653                 register int rgb = *(uint32_t*)s; s += 4;
654                 *d++ = ((rgb&0xF8)<<7) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>19);
655         }
656 }
657
658 static inline void RENAME(rgb24to16)(const uint8_t *src, uint8_t *dst, long src_size)
659 {
660         const uint8_t *s = src;
661         const uint8_t *end;
662 #ifdef HAVE_MMX
663         const uint8_t *mm_end;
664 #endif
665         uint16_t *d = (uint16_t *)dst;
666         end = s + src_size;
667 #ifdef HAVE_MMX
668         __asm __volatile(PREFETCH"      %0"::"m"(*src):"memory");
669         __asm __volatile(
670             "movq       %0, %%mm7\n\t"
671             "movq       %1, %%mm6\n\t"
672             ::"m"(red_16mask),"m"(green_16mask));
673         mm_end = end - 11;
674         while(s < mm_end)
675         {
676             __asm __volatile(
677                 PREFETCH" 32%1\n\t"
678                 "movd   %1, %%mm0\n\t"
679                 "movd   3%1, %%mm3\n\t"
680                 "punpckldq 6%1, %%mm0\n\t"
681                 "punpckldq 9%1, %%mm3\n\t"
682                 "movq   %%mm0, %%mm1\n\t"
683                 "movq   %%mm0, %%mm2\n\t"
684                 "movq   %%mm3, %%mm4\n\t"
685                 "movq   %%mm3, %%mm5\n\t"
686                 "psrlq  $3, %%mm0\n\t"
687                 "psrlq  $3, %%mm3\n\t"
688                 "pand   %2, %%mm0\n\t"
689                 "pand   %2, %%mm3\n\t"
690                 "psrlq  $5, %%mm1\n\t"
691                 "psrlq  $5, %%mm4\n\t"
692                 "pand   %%mm6, %%mm1\n\t"
693                 "pand   %%mm6, %%mm4\n\t"
694                 "psrlq  $8, %%mm2\n\t"
695                 "psrlq  $8, %%mm5\n\t"
696                 "pand   %%mm7, %%mm2\n\t"
697                 "pand   %%mm7, %%mm5\n\t"
698                 "por    %%mm1, %%mm0\n\t"
699                 "por    %%mm4, %%mm3\n\t"
700                 "por    %%mm2, %%mm0\n\t"
701                 "por    %%mm5, %%mm3\n\t"
702                 "psllq  $16, %%mm3\n\t"
703                 "por    %%mm3, %%mm0\n\t"
704                 MOVNTQ" %%mm0, %0\n\t"
705                 :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
706                 d += 4;
707                 s += 12;
708         }
709         __asm __volatile(SFENCE:::"memory");
710         __asm __volatile(EMMS:::"memory");
711 #endif
712         while(s < end)
713         {
714                 const int b= *s++;
715                 const int g= *s++;
716                 const int r= *s++;
717                 *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8);
718         }
719 }
720
721 static inline void RENAME(rgb24tobgr16)(const uint8_t *src, uint8_t *dst, long src_size)
722 {
723         const uint8_t *s = src;
724         const uint8_t *end;
725 #ifdef HAVE_MMX
726         const uint8_t *mm_end;
727 #endif
728         uint16_t *d = (uint16_t *)dst;
729         end = s + src_size;
730 #ifdef HAVE_MMX
731         __asm __volatile(PREFETCH"      %0"::"m"(*src):"memory");
732         __asm __volatile(
733             "movq       %0, %%mm7\n\t"
734             "movq       %1, %%mm6\n\t"
735             ::"m"(red_16mask),"m"(green_16mask));
736         mm_end = end - 15;
737         while(s < mm_end)
738         {
739             __asm __volatile(
740                 PREFETCH" 32%1\n\t"
741                 "movd   %1, %%mm0\n\t"
742                 "movd   3%1, %%mm3\n\t"
743                 "punpckldq 6%1, %%mm0\n\t"
744                 "punpckldq 9%1, %%mm3\n\t"
745                 "movq   %%mm0, %%mm1\n\t"
746                 "movq   %%mm0, %%mm2\n\t"
747                 "movq   %%mm3, %%mm4\n\t"
748                 "movq   %%mm3, %%mm5\n\t"
749                 "psllq  $8, %%mm0\n\t"
750                 "psllq  $8, %%mm3\n\t"
751                 "pand   %%mm7, %%mm0\n\t"
752                 "pand   %%mm7, %%mm3\n\t"
753                 "psrlq  $5, %%mm1\n\t"
754                 "psrlq  $5, %%mm4\n\t"
755                 "pand   %%mm6, %%mm1\n\t"
756                 "pand   %%mm6, %%mm4\n\t"
757                 "psrlq  $19, %%mm2\n\t"
758                 "psrlq  $19, %%mm5\n\t"
759                 "pand   %2, %%mm2\n\t"
760                 "pand   %2, %%mm5\n\t"
761                 "por    %%mm1, %%mm0\n\t"
762                 "por    %%mm4, %%mm3\n\t"
763                 "por    %%mm2, %%mm0\n\t"
764                 "por    %%mm5, %%mm3\n\t"
765                 "psllq  $16, %%mm3\n\t"
766                 "por    %%mm3, %%mm0\n\t"
767                 MOVNTQ" %%mm0, %0\n\t"
768                 :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
769                 d += 4;
770                 s += 12;
771         }
772         __asm __volatile(SFENCE:::"memory");
773         __asm __volatile(EMMS:::"memory");
774 #endif
775         while(s < end)
776         {
777                 const int r= *s++;
778                 const int g= *s++;
779                 const int b= *s++;
780                 *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8);
781         }
782 }
783
784 static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, long src_size)
785 {
786         const uint8_t *s = src;
787         const uint8_t *end;
788 #ifdef HAVE_MMX
789         const uint8_t *mm_end;
790 #endif
791         uint16_t *d = (uint16_t *)dst;
792         end = s + src_size;
793 #ifdef HAVE_MMX
794         __asm __volatile(PREFETCH"      %0"::"m"(*src):"memory");
795         __asm __volatile(
796             "movq       %0, %%mm7\n\t"
797             "movq       %1, %%mm6\n\t"
798             ::"m"(red_15mask),"m"(green_15mask));
799         mm_end = end - 11;
800         while(s < mm_end)
801         {
802             __asm __volatile(
803                 PREFETCH" 32%1\n\t"
804                 "movd   %1, %%mm0\n\t"
805                 "movd   3%1, %%mm3\n\t"
806                 "punpckldq 6%1, %%mm0\n\t"
807                 "punpckldq 9%1, %%mm3\n\t"
808                 "movq   %%mm0, %%mm1\n\t"
809                 "movq   %%mm0, %%mm2\n\t"
810                 "movq   %%mm3, %%mm4\n\t"
811                 "movq   %%mm3, %%mm5\n\t"
812                 "psrlq  $3, %%mm0\n\t"
813                 "psrlq  $3, %%mm3\n\t"
814                 "pand   %2, %%mm0\n\t"
815                 "pand   %2, %%mm3\n\t"
816                 "psrlq  $6, %%mm1\n\t"
817                 "psrlq  $6, %%mm4\n\t"
818                 "pand   %%mm6, %%mm1\n\t"
819                 "pand   %%mm6, %%mm4\n\t"
820                 "psrlq  $9, %%mm2\n\t"
821                 "psrlq  $9, %%mm5\n\t"
822                 "pand   %%mm7, %%mm2\n\t"
823                 "pand   %%mm7, %%mm5\n\t"
824                 "por    %%mm1, %%mm0\n\t"
825                 "por    %%mm4, %%mm3\n\t"
826                 "por    %%mm2, %%mm0\n\t"
827                 "por    %%mm5, %%mm3\n\t"
828                 "psllq  $16, %%mm3\n\t"
829                 "por    %%mm3, %%mm0\n\t"
830                 MOVNTQ" %%mm0, %0\n\t"
831                 :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
832                 d += 4;
833                 s += 12;
834         }
835         __asm __volatile(SFENCE:::"memory");
836         __asm __volatile(EMMS:::"memory");
837 #endif
838         while(s < end)
839         {
840                 const int b= *s++;
841                 const int g= *s++;
842                 const int r= *s++;
843                 *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7);
844         }
845 }
846
847 static inline void RENAME(rgb24tobgr15)(const uint8_t *src, uint8_t *dst, long src_size)
848 {
849         const uint8_t *s = src;
850         const uint8_t *end;
851 #ifdef HAVE_MMX
852         const uint8_t *mm_end;
853 #endif
854         uint16_t *d = (uint16_t *)dst;
855         end = s + src_size;
856 #ifdef HAVE_MMX
857         __asm __volatile(PREFETCH"      %0"::"m"(*src):"memory");
858         __asm __volatile(
859             "movq       %0, %%mm7\n\t"
860             "movq       %1, %%mm6\n\t"
861             ::"m"(red_15mask),"m"(green_15mask));
862         mm_end = end - 15;
863         while(s < mm_end)
864         {
865             __asm __volatile(
866                 PREFETCH" 32%1\n\t"
867                 "movd   %1, %%mm0\n\t"
868                 "movd   3%1, %%mm3\n\t"
869                 "punpckldq 6%1, %%mm0\n\t"
870                 "punpckldq 9%1, %%mm3\n\t"
871                 "movq   %%mm0, %%mm1\n\t"
872                 "movq   %%mm0, %%mm2\n\t"
873                 "movq   %%mm3, %%mm4\n\t"
874                 "movq   %%mm3, %%mm5\n\t"
875                 "psllq  $7, %%mm0\n\t"
876                 "psllq  $7, %%mm3\n\t"
877                 "pand   %%mm7, %%mm0\n\t"
878                 "pand   %%mm7, %%mm3\n\t"
879                 "psrlq  $6, %%mm1\n\t"
880                 "psrlq  $6, %%mm4\n\t"
881                 "pand   %%mm6, %%mm1\n\t"
882                 "pand   %%mm6, %%mm4\n\t"
883                 "psrlq  $19, %%mm2\n\t"
884                 "psrlq  $19, %%mm5\n\t"
885                 "pand   %2, %%mm2\n\t"
886                 "pand   %2, %%mm5\n\t"
887                 "por    %%mm1, %%mm0\n\t"
888                 "por    %%mm4, %%mm3\n\t"
889                 "por    %%mm2, %%mm0\n\t"
890                 "por    %%mm5, %%mm3\n\t"
891                 "psllq  $16, %%mm3\n\t"
892                 "por    %%mm3, %%mm0\n\t"
893                 MOVNTQ" %%mm0, %0\n\t"
894                 :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
895                 d += 4;
896                 s += 12;
897         }
898         __asm __volatile(SFENCE:::"memory");
899         __asm __volatile(EMMS:::"memory");
900 #endif
901         while(s < end)
902         {
903                 const int r= *s++;
904                 const int g= *s++;
905                 const int b= *s++;
906                 *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7);
907         }
908 }
909
910 /*
911   I use here less accurate approximation by simply
912  left-shifting the input
913   value and filling the low order bits with
914  zeroes. This method improves png's
915   compression but this scheme cannot reproduce white exactly, since it does not
916   generate an all-ones maximum value; the net effect is to darken the
917   image slightly.
918
919   The better method should be "left bit replication":
920
921    4 3 2 1 0
922    ---------
923    1 1 0 1 1
924
925    7 6 5 4 3  2 1 0
926    ----------------
927    1 1 0 1 1  1 1 0
928    |=======|  |===|
929        |      Leftmost Bits Repeated to Fill Open Bits
930        |
931    Original Bits
932 */
933 static inline void RENAME(rgb15to24)(const uint8_t *src, uint8_t *dst, long src_size)
934 {
935         const uint16_t *end;
936 #ifdef HAVE_MMX
937         const uint16_t *mm_end;
938 #endif
939         uint8_t *d = (uint8_t *)dst;
940         const uint16_t *s = (uint16_t *)src;
941         end = s + src_size/2;
942 #ifdef HAVE_MMX
943         __asm __volatile(PREFETCH"      %0"::"m"(*s):"memory");
944         mm_end = end - 7;
945         while(s < mm_end)
946         {
947             __asm __volatile(
948                 PREFETCH" 32%1\n\t"
949                 "movq   %1, %%mm0\n\t"
950                 "movq   %1, %%mm1\n\t"
951                 "movq   %1, %%mm2\n\t"
952                 "pand   %2, %%mm0\n\t"
953                 "pand   %3, %%mm1\n\t"
954                 "pand   %4, %%mm2\n\t"
955                 "psllq  $3, %%mm0\n\t"
956                 "psrlq  $2, %%mm1\n\t"
957                 "psrlq  $7, %%mm2\n\t"
958                 "movq   %%mm0, %%mm3\n\t"
959                 "movq   %%mm1, %%mm4\n\t"
960                 "movq   %%mm2, %%mm5\n\t"
961                 "punpcklwd %5, %%mm0\n\t"
962                 "punpcklwd %5, %%mm1\n\t"
963                 "punpcklwd %5, %%mm2\n\t"
964                 "punpckhwd %5, %%mm3\n\t"
965                 "punpckhwd %5, %%mm4\n\t"
966                 "punpckhwd %5, %%mm5\n\t"
967                 "psllq  $8, %%mm1\n\t"
968                 "psllq  $16, %%mm2\n\t"
969                 "por    %%mm1, %%mm0\n\t"
970                 "por    %%mm2, %%mm0\n\t"
971                 "psllq  $8, %%mm4\n\t"
972                 "psllq  $16, %%mm5\n\t"
973                 "por    %%mm4, %%mm3\n\t"
974                 "por    %%mm5, %%mm3\n\t"
975
976                 "movq   %%mm0, %%mm6\n\t"
977                 "movq   %%mm3, %%mm7\n\t"
978                 
979                 "movq   8%1, %%mm0\n\t"
980                 "movq   8%1, %%mm1\n\t"
981                 "movq   8%1, %%mm2\n\t"
982                 "pand   %2, %%mm0\n\t"
983                 "pand   %3, %%mm1\n\t"
984                 "pand   %4, %%mm2\n\t"
985                 "psllq  $3, %%mm0\n\t"
986                 "psrlq  $2, %%mm1\n\t"
987                 "psrlq  $7, %%mm2\n\t"
988                 "movq   %%mm0, %%mm3\n\t"
989                 "movq   %%mm1, %%mm4\n\t"
990                 "movq   %%mm2, %%mm5\n\t"
991                 "punpcklwd %5, %%mm0\n\t"
992                 "punpcklwd %5, %%mm1\n\t"
993                 "punpcklwd %5, %%mm2\n\t"
994                 "punpckhwd %5, %%mm3\n\t"
995                 "punpckhwd %5, %%mm4\n\t"
996                 "punpckhwd %5, %%mm5\n\t"
997                 "psllq  $8, %%mm1\n\t"
998                 "psllq  $16, %%mm2\n\t"
999                 "por    %%mm1, %%mm0\n\t"
1000                 "por    %%mm2, %%mm0\n\t"
1001                 "psllq  $8, %%mm4\n\t"
1002                 "psllq  $16, %%mm5\n\t"
1003                 "por    %%mm4, %%mm3\n\t"
1004                 "por    %%mm5, %%mm3\n\t"
1005
1006                 :"=m"(*d)
1007                 :"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r), "m"(mmx_null)
1008                 :"memory");
1009             /* Borrowed 32 to 24 */
1010             __asm __volatile(
1011                 "movq   %%mm0, %%mm4\n\t"
1012                 "movq   %%mm3, %%mm5\n\t"
1013                 "movq   %%mm6, %%mm0\n\t"
1014                 "movq   %%mm7, %%mm1\n\t"
1015                 
1016                 "movq   %%mm4, %%mm6\n\t"
1017                 "movq   %%mm5, %%mm7\n\t"
1018                 "movq   %%mm0, %%mm2\n\t"
1019                 "movq   %%mm1, %%mm3\n\t"
1020
1021                 "psrlq  $8, %%mm2\n\t"
1022                 "psrlq  $8, %%mm3\n\t"
1023                 "psrlq  $8, %%mm6\n\t"
1024                 "psrlq  $8, %%mm7\n\t"
1025                 "pand   %2, %%mm0\n\t"
1026                 "pand   %2, %%mm1\n\t"
1027                 "pand   %2, %%mm4\n\t"
1028                 "pand   %2, %%mm5\n\t"
1029                 "pand   %3, %%mm2\n\t"
1030                 "pand   %3, %%mm3\n\t"
1031                 "pand   %3, %%mm6\n\t"
1032                 "pand   %3, %%mm7\n\t"
1033                 "por    %%mm2, %%mm0\n\t"
1034                 "por    %%mm3, %%mm1\n\t"
1035                 "por    %%mm6, %%mm4\n\t"
1036                 "por    %%mm7, %%mm5\n\t"
1037
1038                 "movq   %%mm1, %%mm2\n\t"
1039                 "movq   %%mm4, %%mm3\n\t"
1040                 "psllq  $48, %%mm2\n\t"
1041                 "psllq  $32, %%mm3\n\t"
1042                 "pand   %4, %%mm2\n\t"
1043                 "pand   %5, %%mm3\n\t"
1044                 "por    %%mm2, %%mm0\n\t"
1045                 "psrlq  $16, %%mm1\n\t"
1046                 "psrlq  $32, %%mm4\n\t"
1047                 "psllq  $16, %%mm5\n\t"
1048                 "por    %%mm3, %%mm1\n\t"
1049                 "pand   %6, %%mm5\n\t"
1050                 "por    %%mm5, %%mm4\n\t"
1051
1052                 MOVNTQ" %%mm0, %0\n\t"
1053                 MOVNTQ" %%mm1, 8%0\n\t"
1054                 MOVNTQ" %%mm4, 16%0"
1055
1056                 :"=m"(*d)
1057                 :"m"(*s),"m"(mask24l),"m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh)
1058                 :"memory");
1059                 d += 24;
1060                 s += 8;
1061         }
1062         __asm __volatile(SFENCE:::"memory");
1063         __asm __volatile(EMMS:::"memory");
1064 #endif
1065         while(s < end)
1066         {
1067                 register uint16_t bgr;
1068                 bgr = *s++;
1069                 *d++ = (bgr&0x1F)<<3;
1070                 *d++ = (bgr&0x3E0)>>2;
1071                 *d++ = (bgr&0x7C00)>>7;
1072         }
1073 }
1074
1075 static inline void RENAME(rgb16to24)(const uint8_t *src, uint8_t *dst, long src_size)
1076 {
1077         const uint16_t *end;
1078 #ifdef HAVE_MMX
1079         const uint16_t *mm_end;
1080 #endif
1081         uint8_t *d = (uint8_t *)dst;
1082         const uint16_t *s = (const uint16_t *)src;
1083         end = s + src_size/2;
1084 #ifdef HAVE_MMX
1085         __asm __volatile(PREFETCH"      %0"::"m"(*s):"memory");
1086         mm_end = end - 7;
1087         while(s < mm_end)
1088         {
1089             __asm __volatile(
1090                 PREFETCH" 32%1\n\t"
1091                 "movq   %1, %%mm0\n\t"
1092                 "movq   %1, %%mm1\n\t"
1093                 "movq   %1, %%mm2\n\t"
1094                 "pand   %2, %%mm0\n\t"
1095                 "pand   %3, %%mm1\n\t"
1096                 "pand   %4, %%mm2\n\t"
1097                 "psllq  $3, %%mm0\n\t"
1098                 "psrlq  $3, %%mm1\n\t"
1099                 "psrlq  $8, %%mm2\n\t"
1100                 "movq   %%mm0, %%mm3\n\t"
1101                 "movq   %%mm1, %%mm4\n\t"
1102                 "movq   %%mm2, %%mm5\n\t"
1103                 "punpcklwd %5, %%mm0\n\t"
1104                 "punpcklwd %5, %%mm1\n\t"
1105                 "punpcklwd %5, %%mm2\n\t"
1106                 "punpckhwd %5, %%mm3\n\t"
1107                 "punpckhwd %5, %%mm4\n\t"
1108                 "punpckhwd %5, %%mm5\n\t"
1109                 "psllq  $8, %%mm1\n\t"
1110                 "psllq  $16, %%mm2\n\t"
1111                 "por    %%mm1, %%mm0\n\t"
1112                 "por    %%mm2, %%mm0\n\t"
1113                 "psllq  $8, %%mm4\n\t"
1114                 "psllq  $16, %%mm5\n\t"
1115                 "por    %%mm4, %%mm3\n\t"
1116                 "por    %%mm5, %%mm3\n\t"
1117                 
1118                 "movq   %%mm0, %%mm6\n\t"
1119                 "movq   %%mm3, %%mm7\n\t"
1120
1121                 "movq   8%1, %%mm0\n\t"
1122                 "movq   8%1, %%mm1\n\t"
1123                 "movq   8%1, %%mm2\n\t"
1124                 "pand   %2, %%mm0\n\t"
1125                 "pand   %3, %%mm1\n\t"
1126                 "pand   %4, %%mm2\n\t"
1127                 "psllq  $3, %%mm0\n\t"
1128                 "psrlq  $3, %%mm1\n\t"
1129                 "psrlq  $8, %%mm2\n\t"
1130                 "movq   %%mm0, %%mm3\n\t"
1131                 "movq   %%mm1, %%mm4\n\t"
1132                 "movq   %%mm2, %%mm5\n\t"
1133                 "punpcklwd %5, %%mm0\n\t"
1134                 "punpcklwd %5, %%mm1\n\t"
1135                 "punpcklwd %5, %%mm2\n\t"
1136                 "punpckhwd %5, %%mm3\n\t"
1137                 "punpckhwd %5, %%mm4\n\t"
1138                 "punpckhwd %5, %%mm5\n\t"
1139                 "psllq  $8, %%mm1\n\t"
1140                 "psllq  $16, %%mm2\n\t"
1141                 "por    %%mm1, %%mm0\n\t"
1142                 "por    %%mm2, %%mm0\n\t"
1143                 "psllq  $8, %%mm4\n\t"
1144                 "psllq  $16, %%mm5\n\t"
1145                 "por    %%mm4, %%mm3\n\t"
1146                 "por    %%mm5, %%mm3\n\t"
1147                 :"=m"(*d)
1148                 :"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r),"m"(mmx_null)           
1149                 :"memory");
1150             /* Borrowed 32 to 24 */
1151             __asm __volatile(
1152                 "movq   %%mm0, %%mm4\n\t"
1153                 "movq   %%mm3, %%mm5\n\t"
1154                 "movq   %%mm6, %%mm0\n\t"
1155                 "movq   %%mm7, %%mm1\n\t"
1156                 
1157                 "movq   %%mm4, %%mm6\n\t"
1158                 "movq   %%mm5, %%mm7\n\t"
1159                 "movq   %%mm0, %%mm2\n\t"
1160                 "movq   %%mm1, %%mm3\n\t"
1161
1162                 "psrlq  $8, %%mm2\n\t"
1163                 "psrlq  $8, %%mm3\n\t"
1164                 "psrlq  $8, %%mm6\n\t"
1165                 "psrlq  $8, %%mm7\n\t"
1166                 "pand   %2, %%mm0\n\t"
1167                 "pand   %2, %%mm1\n\t"
1168                 "pand   %2, %%mm4\n\t"
1169                 "pand   %2, %%mm5\n\t"
1170                 "pand   %3, %%mm2\n\t"
1171                 "pand   %3, %%mm3\n\t"
1172                 "pand   %3, %%mm6\n\t"
1173                 "pand   %3, %%mm7\n\t"
1174                 "por    %%mm2, %%mm0\n\t"
1175                 "por    %%mm3, %%mm1\n\t"
1176                 "por    %%mm6, %%mm4\n\t"
1177                 "por    %%mm7, %%mm5\n\t"
1178
1179                 "movq   %%mm1, %%mm2\n\t"
1180                 "movq   %%mm4, %%mm3\n\t"
1181                 "psllq  $48, %%mm2\n\t"
1182                 "psllq  $32, %%mm3\n\t"
1183                 "pand   %4, %%mm2\n\t"
1184                 "pand   %5, %%mm3\n\t"
1185                 "por    %%mm2, %%mm0\n\t"
1186                 "psrlq  $16, %%mm1\n\t"
1187                 "psrlq  $32, %%mm4\n\t"
1188                 "psllq  $16, %%mm5\n\t"
1189                 "por    %%mm3, %%mm1\n\t"
1190                 "pand   %6, %%mm5\n\t"
1191                 "por    %%mm5, %%mm4\n\t"
1192
1193                 MOVNTQ" %%mm0, %0\n\t"
1194                 MOVNTQ" %%mm1, 8%0\n\t"
1195                 MOVNTQ" %%mm4, 16%0"
1196
1197                 :"=m"(*d)
1198                 :"m"(*s),"m"(mask24l),"m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh)
1199                 :"memory");
1200                 d += 24;
1201                 s += 8;
1202         }
1203         __asm __volatile(SFENCE:::"memory");
1204         __asm __volatile(EMMS:::"memory");
1205 #endif
1206         while(s < end)
1207         {
1208                 register uint16_t bgr;
1209                 bgr = *s++;
1210                 *d++ = (bgr&0x1F)<<3;
1211                 *d++ = (bgr&0x7E0)>>3;
1212                 *d++ = (bgr&0xF800)>>8;
1213         }
1214 }
1215
1216 static inline void RENAME(rgb15to32)(const uint8_t *src, uint8_t *dst, long src_size)
1217 {
1218         const uint16_t *end;
1219 #ifdef HAVE_MMX
1220         const uint16_t *mm_end;
1221 #endif
1222         uint8_t *d = (uint8_t *)dst;
1223         const uint16_t *s = (const uint16_t *)src;
1224         end = s + src_size/2;
1225 #ifdef HAVE_MMX
1226         __asm __volatile(PREFETCH"      %0"::"m"(*s):"memory");
1227         __asm __volatile("pxor  %%mm7,%%mm7\n\t":::"memory");
1228         mm_end = end - 3;
1229         while(s < mm_end)
1230         {
1231             __asm __volatile(
1232                 PREFETCH" 32%1\n\t"
1233                 "movq   %1, %%mm0\n\t"
1234                 "movq   %1, %%mm1\n\t"
1235                 "movq   %1, %%mm2\n\t"
1236                 "pand   %2, %%mm0\n\t"
1237                 "pand   %3, %%mm1\n\t"
1238                 "pand   %4, %%mm2\n\t"
1239                 "psllq  $3, %%mm0\n\t"
1240                 "psrlq  $2, %%mm1\n\t"
1241                 "psrlq  $7, %%mm2\n\t"
1242                 "movq   %%mm0, %%mm3\n\t"
1243                 "movq   %%mm1, %%mm4\n\t"
1244                 "movq   %%mm2, %%mm5\n\t"
1245                 "punpcklwd %%mm7, %%mm0\n\t"
1246                 "punpcklwd %%mm7, %%mm1\n\t"
1247                 "punpcklwd %%mm7, %%mm2\n\t"
1248                 "punpckhwd %%mm7, %%mm3\n\t"
1249                 "punpckhwd %%mm7, %%mm4\n\t"
1250                 "punpckhwd %%mm7, %%mm5\n\t"
1251                 "psllq  $8, %%mm1\n\t"
1252                 "psllq  $16, %%mm2\n\t"
1253                 "por    %%mm1, %%mm0\n\t"
1254                 "por    %%mm2, %%mm0\n\t"
1255                 "psllq  $8, %%mm4\n\t"
1256                 "psllq  $16, %%mm5\n\t"
1257                 "por    %%mm4, %%mm3\n\t"
1258                 "por    %%mm5, %%mm3\n\t"
1259                 MOVNTQ" %%mm0, %0\n\t"
1260                 MOVNTQ" %%mm3, 8%0\n\t"
1261                 :"=m"(*d)
1262                 :"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r)
1263                 :"memory");
1264                 d += 16;
1265                 s += 4;
1266         }
1267         __asm __volatile(SFENCE:::"memory");
1268         __asm __volatile(EMMS:::"memory");
1269 #endif
1270         while(s < end)
1271         {
1272 #if 0 //slightly slower on athlon
1273                 int bgr= *s++;
1274                 *((uint32_t*)d)++ = ((bgr&0x1F)<<3) + ((bgr&0x3E0)<<6) + ((bgr&0x7C00)<<9);
1275 #else
1276                 register uint16_t bgr;
1277                 bgr = *s++;
1278 #ifdef WORDS_BIGENDIAN
1279                 *d++ = 0;
1280                 *d++ = (bgr&0x7C00)>>7;
1281                 *d++ = (bgr&0x3E0)>>2;
1282                 *d++ = (bgr&0x1F)<<3;
1283 #else
1284                 *d++ = (bgr&0x1F)<<3;
1285                 *d++ = (bgr&0x3E0)>>2;
1286                 *d++ = (bgr&0x7C00)>>7;
1287                 *d++ = 0;
1288 #endif
1289
1290 #endif
1291         }
1292 }
1293
1294 static inline void RENAME(rgb16to32)(const uint8_t *src, uint8_t *dst, long src_size)
1295 {
1296         const uint16_t *end;
1297 #ifdef HAVE_MMX
1298         const uint16_t *mm_end;
1299 #endif
1300         uint8_t *d = (uint8_t *)dst;
1301         const uint16_t *s = (uint16_t *)src;
1302         end = s + src_size/2;
1303 #ifdef HAVE_MMX
1304         __asm __volatile(PREFETCH"      %0"::"m"(*s):"memory");
1305         __asm __volatile("pxor  %%mm7,%%mm7\n\t":::"memory");
1306         mm_end = end - 3;
1307         while(s < mm_end)
1308         {
1309             __asm __volatile(
1310                 PREFETCH" 32%1\n\t"
1311                 "movq   %1, %%mm0\n\t"
1312                 "movq   %1, %%mm1\n\t"
1313                 "movq   %1, %%mm2\n\t"
1314                 "pand   %2, %%mm0\n\t"
1315                 "pand   %3, %%mm1\n\t"
1316                 "pand   %4, %%mm2\n\t"
1317                 "psllq  $3, %%mm0\n\t"
1318                 "psrlq  $3, %%mm1\n\t"
1319                 "psrlq  $8, %%mm2\n\t"
1320                 "movq   %%mm0, %%mm3\n\t"
1321                 "movq   %%mm1, %%mm4\n\t"
1322                 "movq   %%mm2, %%mm5\n\t"
1323                 "punpcklwd %%mm7, %%mm0\n\t"
1324                 "punpcklwd %%mm7, %%mm1\n\t"
1325                 "punpcklwd %%mm7, %%mm2\n\t"
1326                 "punpckhwd %%mm7, %%mm3\n\t"
1327                 "punpckhwd %%mm7, %%mm4\n\t"
1328                 "punpckhwd %%mm7, %%mm5\n\t"
1329                 "psllq  $8, %%mm1\n\t"
1330                 "psllq  $16, %%mm2\n\t"
1331                 "por    %%mm1, %%mm0\n\t"
1332                 "por    %%mm2, %%mm0\n\t"
1333                 "psllq  $8, %%mm4\n\t"
1334                 "psllq  $16, %%mm5\n\t"
1335                 "por    %%mm4, %%mm3\n\t"
1336                 "por    %%mm5, %%mm3\n\t"
1337                 MOVNTQ" %%mm0, %0\n\t"
1338                 MOVNTQ" %%mm3, 8%0\n\t"
1339                 :"=m"(*d)
1340                 :"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r)
1341                 :"memory");
1342                 d += 16;
1343                 s += 4;
1344         }
1345         __asm __volatile(SFENCE:::"memory");
1346         __asm __volatile(EMMS:::"memory");
1347 #endif
1348         while(s < end)
1349         {
1350                 register uint16_t bgr;
1351                 bgr = *s++;
1352 #ifdef WORDS_BIGENDIAN
1353                 *d++ = 0;
1354                 *d++ = (bgr&0xF800)>>8;
1355                 *d++ = (bgr&0x7E0)>>3;
1356                 *d++ = (bgr&0x1F)<<3;
1357 #else
1358                 *d++ = (bgr&0x1F)<<3;
1359                 *d++ = (bgr&0x7E0)>>3;
1360                 *d++ = (bgr&0xF800)>>8;
1361                 *d++ = 0;
1362 #endif
1363         }
1364 }
1365
1366 static inline void RENAME(rgb32tobgr32)(const uint8_t *src, uint8_t *dst, long src_size)
1367 {
1368 #ifdef HAVE_MMX
1369 /* TODO: unroll this loop */
1370         asm volatile (
1371                 "xor %%"REG_a", %%"REG_a"       \n\t"
1372                 ASMALIGN(4)
1373                 "1:                             \n\t"
1374                 PREFETCH" 32(%0, %%"REG_a")     \n\t"
1375                 "movq (%0, %%"REG_a"), %%mm0    \n\t"
1376                 "movq %%mm0, %%mm1              \n\t"
1377                 "movq %%mm0, %%mm2              \n\t"
1378                 "pslld $16, %%mm0               \n\t"
1379                 "psrld $16, %%mm1               \n\t"
1380                 "pand "MANGLE(mask32r)", %%mm0  \n\t"
1381                 "pand "MANGLE(mask32g)", %%mm2  \n\t"
1382                 "pand "MANGLE(mask32b)", %%mm1  \n\t"
1383                 "por %%mm0, %%mm2               \n\t"
1384                 "por %%mm1, %%mm2               \n\t"
1385                 MOVNTQ" %%mm2, (%1, %%"REG_a")  \n\t"
1386                 "add $8, %%"REG_a"              \n\t"
1387                 "cmp %2, %%"REG_a"              \n\t"
1388                 " jb 1b                         \n\t"
1389                 :: "r" (src), "r"(dst), "r" (src_size-7)
1390                 : "%"REG_a
1391         );
1392
1393         __asm __volatile(SFENCE:::"memory");
1394         __asm __volatile(EMMS:::"memory");
1395 #else
1396         unsigned i;
1397         unsigned num_pixels = src_size >> 2;
1398         for(i=0; i<num_pixels; i++)
1399         {
1400 #ifdef WORDS_BIGENDIAN  
1401           dst[4*i + 1] = src[4*i + 3];
1402           dst[4*i + 2] = src[4*i + 2];
1403           dst[4*i + 3] = src[4*i + 1];
1404 #else
1405           dst[4*i + 0] = src[4*i + 2];
1406           dst[4*i + 1] = src[4*i + 1];
1407           dst[4*i + 2] = src[4*i + 0];
1408 #endif
1409         }
1410 #endif
1411 }
1412
1413 static inline void RENAME(rgb24tobgr24)(const uint8_t *src, uint8_t *dst, long src_size)
1414 {
1415         unsigned i;
1416 #ifdef HAVE_MMX
1417         long mmx_size= 23 - src_size;
1418         asm volatile (
1419                 "movq "MANGLE(mask24r)", %%mm5  \n\t"
1420                 "movq "MANGLE(mask24g)", %%mm6  \n\t"
1421                 "movq "MANGLE(mask24b)", %%mm7  \n\t"
1422                 ASMALIGN(4)
1423                 "1:                             \n\t"
1424                 PREFETCH" 32(%1, %%"REG_a")     \n\t"
1425                 "movq   (%1, %%"REG_a"), %%mm0  \n\t" // BGR BGR BG
1426                 "movq   (%1, %%"REG_a"), %%mm1  \n\t" // BGR BGR BG
1427                 "movq  2(%1, %%"REG_a"), %%mm2  \n\t" // R BGR BGR B
1428                 "psllq $16, %%mm0               \n\t" // 00 BGR BGR
1429                 "pand %%mm5, %%mm0              \n\t"
1430                 "pand %%mm6, %%mm1              \n\t"
1431                 "pand %%mm7, %%mm2              \n\t"
1432                 "por %%mm0, %%mm1               \n\t"
1433                 "por %%mm2, %%mm1               \n\t"                
1434                 "movq  6(%1, %%"REG_a"), %%mm0  \n\t" // BGR BGR BG
1435                 MOVNTQ" %%mm1,   (%2, %%"REG_a")\n\t" // RGB RGB RG
1436                 "movq  8(%1, %%"REG_a"), %%mm1  \n\t" // R BGR BGR B
1437                 "movq 10(%1, %%"REG_a"), %%mm2  \n\t" // GR BGR BGR
1438                 "pand %%mm7, %%mm0              \n\t"
1439                 "pand %%mm5, %%mm1              \n\t"
1440                 "pand %%mm6, %%mm2              \n\t"
1441                 "por %%mm0, %%mm1               \n\t"
1442                 "por %%mm2, %%mm1               \n\t"                
1443                 "movq 14(%1, %%"REG_a"), %%mm0  \n\t" // R BGR BGR B
1444                 MOVNTQ" %%mm1,  8(%2, %%"REG_a")\n\t" // B RGB RGB R
1445                 "movq 16(%1, %%"REG_a"), %%mm1  \n\t" // GR BGR BGR
1446                 "movq 18(%1, %%"REG_a"), %%mm2  \n\t" // BGR BGR BG
1447                 "pand %%mm6, %%mm0              \n\t"
1448                 "pand %%mm7, %%mm1              \n\t"
1449                 "pand %%mm5, %%mm2              \n\t"
1450                 "por %%mm0, %%mm1               \n\t"
1451                 "por %%mm2, %%mm1               \n\t"                
1452                 MOVNTQ" %%mm1, 16(%2, %%"REG_a")\n\t"
1453                 "add $24, %%"REG_a"             \n\t"
1454                 " js 1b                         \n\t"
1455                 : "+a" (mmx_size)
1456                 : "r" (src-mmx_size), "r"(dst-mmx_size)
1457         );
1458
1459         __asm __volatile(SFENCE:::"memory");
1460         __asm __volatile(EMMS:::"memory");
1461
1462         if(mmx_size==23) return; //finihsed, was multiple of 8
1463
1464         src+= src_size;
1465         dst+= src_size;
1466         src_size= 23-mmx_size;
1467         src-= src_size;
1468         dst-= src_size;
1469 #endif
1470         for(i=0; i<src_size; i+=3)
1471         {
1472                 register uint8_t x;
1473                 x          = src[i + 2];
1474                 dst[i + 1] = src[i + 1];
1475                 dst[i + 2] = src[i + 0];
1476                 dst[i + 0] = x;
1477         }
1478 }
1479
1480 static inline void RENAME(yuvPlanartoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
1481         long width, long height,
1482         long lumStride, long chromStride, long dstStride, long vertLumPerChroma)
1483 {
1484         long y;
1485         const long chromWidth= width>>1;
1486         for(y=0; y<height; y++)
1487         {
1488 #ifdef HAVE_MMX
1489 //FIXME handle 2 lines a once (fewer prefetch, reuse some chrom, but very likely limited by mem anyway)
1490                 asm volatile(
1491                         "xor %%"REG_a", %%"REG_a"       \n\t"
1492                         ASMALIGN(4)
1493                         "1:                             \n\t"
1494                         PREFETCH" 32(%1, %%"REG_a", 2)  \n\t"
1495                         PREFETCH" 32(%2, %%"REG_a")     \n\t"
1496                         PREFETCH" 32(%3, %%"REG_a")     \n\t"
1497                         "movq (%2, %%"REG_a"), %%mm0    \n\t" // U(0)
1498                         "movq %%mm0, %%mm2              \n\t" // U(0)
1499                         "movq (%3, %%"REG_a"), %%mm1    \n\t" // V(0)
1500                         "punpcklbw %%mm1, %%mm0         \n\t" // UVUV UVUV(0)
1501                         "punpckhbw %%mm1, %%mm2         \n\t" // UVUV UVUV(8)
1502
1503                         "movq (%1, %%"REG_a",2), %%mm3  \n\t" // Y(0)
1504                         "movq 8(%1, %%"REG_a",2), %%mm5 \n\t" // Y(8)
1505                         "movq %%mm3, %%mm4              \n\t" // Y(0)
1506                         "movq %%mm5, %%mm6              \n\t" // Y(8)
1507                         "punpcklbw %%mm0, %%mm3         \n\t" // YUYV YUYV(0)
1508                         "punpckhbw %%mm0, %%mm4         \n\t" // YUYV YUYV(4)
1509                         "punpcklbw %%mm2, %%mm5         \n\t" // YUYV YUYV(8)
1510                         "punpckhbw %%mm2, %%mm6         \n\t" // YUYV YUYV(12)
1511
1512                         MOVNTQ" %%mm3, (%0, %%"REG_a", 4)\n\t"
1513                         MOVNTQ" %%mm4, 8(%0, %%"REG_a", 4)\n\t"
1514                         MOVNTQ" %%mm5, 16(%0, %%"REG_a", 4)\n\t"
1515                         MOVNTQ" %%mm6, 24(%0, %%"REG_a", 4)\n\t"
1516
1517                         "add $8, %%"REG_a"              \n\t"
1518                         "cmp %4, %%"REG_a"              \n\t"
1519                         " jb 1b                         \n\t"
1520                         ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth)
1521                         : "%"REG_a
1522                 );
1523 #else
1524
1525 #if defined ARCH_ALPHA && defined HAVE_MVI
1526 #define pl2yuy2(n)                                      \
1527         y1 = yc[n];                                     \
1528         y2 = yc2[n];                                    \
1529         u = uc[n];                                      \
1530         v = vc[n];                                      \
1531         asm("unpkbw %1, %0" : "=r"(y1) : "r"(y1));      \
1532         asm("unpkbw %1, %0" : "=r"(y2) : "r"(y2));      \
1533         asm("unpkbl %1, %0" : "=r"(u) : "r"(u));        \
1534         asm("unpkbl %1, %0" : "=r"(v) : "r"(v));        \
1535         yuv1 = (u << 8) + (v << 24);                    \
1536         yuv2 = yuv1 + y2;                               \
1537         yuv1 += y1;                                     \
1538         qdst[n] = yuv1;                                 \
1539         qdst2[n] = yuv2;
1540
1541                 int i;
1542                 uint64_t *qdst = (uint64_t *) dst;
1543                 uint64_t *qdst2 = (uint64_t *) (dst + dstStride);
1544                 const uint32_t *yc = (uint32_t *) ysrc;
1545                 const uint32_t *yc2 = (uint32_t *) (ysrc + lumStride);
1546                 const uint16_t *uc = (uint16_t*) usrc, *vc = (uint16_t*) vsrc;
1547                 for(i = 0; i < chromWidth; i += 8){
1548                         uint64_t y1, y2, yuv1, yuv2;
1549                         uint64_t u, v;
1550                         /* Prefetch */
1551                         asm("ldq $31,64(%0)" :: "r"(yc));
1552                         asm("ldq $31,64(%0)" :: "r"(yc2));
1553                         asm("ldq $31,64(%0)" :: "r"(uc));
1554                         asm("ldq $31,64(%0)" :: "r"(vc));
1555
1556                         pl2yuy2(0);
1557                         pl2yuy2(1);
1558                         pl2yuy2(2);
1559                         pl2yuy2(3);
1560
1561                         yc += 4;
1562                         yc2 += 4;
1563                         uc += 4;
1564                         vc += 4;
1565                         qdst += 4;
1566                         qdst2 += 4;
1567                 }
1568                 y++;
1569                 ysrc += lumStride;
1570                 dst += dstStride;
1571
1572 #elif __WORDSIZE >= 64
1573                 int i;
1574                 uint64_t *ldst = (uint64_t *) dst;
1575                 const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc;
1576                 for(i = 0; i < chromWidth; i += 2){
1577                         uint64_t k, l;
1578                         k = yc[0] + (uc[0] << 8) +
1579                             (yc[1] << 16) + (vc[0] << 24);
1580                         l = yc[2] + (uc[1] << 8) +
1581                             (yc[3] << 16) + (vc[1] << 24);
1582                         *ldst++ = k + (l << 32);
1583                         yc += 4;
1584                         uc += 2;
1585                         vc += 2;
1586                 }
1587
1588 #else
1589                 int i, *idst = (int32_t *) dst;
1590                 const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc;
1591                 for(i = 0; i < chromWidth; i++){
1592 #ifdef WORDS_BIGENDIAN
1593                         *idst++ = (yc[0] << 24)+ (uc[0] << 16) +
1594                             (yc[1] << 8) + (vc[0] << 0);
1595 #else
1596                         *idst++ = yc[0] + (uc[0] << 8) +
1597                             (yc[1] << 16) + (vc[0] << 24);
1598 #endif
1599                         yc += 2;
1600                         uc++;
1601                         vc++;
1602                 }
1603 #endif
1604 #endif
1605                 if((y&(vertLumPerChroma-1))==(vertLumPerChroma-1) )
1606                 {
1607                         usrc += chromStride;
1608                         vsrc += chromStride;
1609                 }
1610                 ysrc += lumStride;
1611                 dst += dstStride;
1612         }
1613 #ifdef HAVE_MMX
1614 asm(    EMMS" \n\t"
1615         SFENCE" \n\t"
1616         :::"memory");
1617 #endif
1618 }
1619
1620 /**
1621  *
1622  * height should be a multiple of 2 and width should be a multiple of 16 (if this is a
1623  * problem for anyone then tell me, and ill fix it)
1624  */
1625 static inline void RENAME(yv12toyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
1626         long width, long height,
1627         long lumStride, long chromStride, long dstStride)
1628 {
1629         //FIXME interpolate chroma
1630         RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2);
1631 }
1632
1633 static inline void RENAME(yuvPlanartouyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
1634         long width, long height,
1635         long lumStride, long chromStride, long dstStride, long vertLumPerChroma)
1636 {
1637         long y;
1638         const long chromWidth= width>>1;
1639         for(y=0; y<height; y++)
1640         {
1641 #ifdef HAVE_MMX
1642 //FIXME handle 2 lines a once (fewer prefetch, reuse some chrom, but very likely limited by mem anyway)
1643                 asm volatile(
1644                         "xor %%"REG_a", %%"REG_a"       \n\t"
1645                         ASMALIGN(4)
1646                         "1:                             \n\t"
1647                         PREFETCH" 32(%1, %%"REG_a", 2)  \n\t"
1648                         PREFETCH" 32(%2, %%"REG_a")     \n\t"
1649                         PREFETCH" 32(%3, %%"REG_a")     \n\t"
1650                         "movq (%2, %%"REG_a"), %%mm0    \n\t" // U(0)
1651                         "movq %%mm0, %%mm2              \n\t" // U(0)
1652                         "movq (%3, %%"REG_a"), %%mm1    \n\t" // V(0)
1653                         "punpcklbw %%mm1, %%mm0         \n\t" // UVUV UVUV(0)
1654                         "punpckhbw %%mm1, %%mm2         \n\t" // UVUV UVUV(8)
1655
1656                         "movq (%1, %%"REG_a",2), %%mm3  \n\t" // Y(0)
1657                         "movq 8(%1, %%"REG_a",2), %%mm5 \n\t" // Y(8)
1658                         "movq %%mm0, %%mm4              \n\t" // Y(0)
1659                         "movq %%mm2, %%mm6              \n\t" // Y(8)
1660                         "punpcklbw %%mm3, %%mm0         \n\t" // YUYV YUYV(0)
1661                         "punpckhbw %%mm3, %%mm4         \n\t" // YUYV YUYV(4)
1662                         "punpcklbw %%mm5, %%mm2         \n\t" // YUYV YUYV(8)
1663                         "punpckhbw %%mm5, %%mm6         \n\t" // YUYV YUYV(12)
1664
1665                         MOVNTQ" %%mm0, (%0, %%"REG_a", 4)\n\t"
1666                         MOVNTQ" %%mm4, 8(%0, %%"REG_a", 4)\n\t"
1667                         MOVNTQ" %%mm2, 16(%0, %%"REG_a", 4)\n\t"
1668                         MOVNTQ" %%mm6, 24(%0, %%"REG_a", 4)\n\t"
1669
1670                         "add $8, %%"REG_a"              \n\t"
1671                         "cmp %4, %%"REG_a"              \n\t"
1672                         " jb 1b                         \n\t"
1673                         ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth)
1674                         : "%"REG_a
1675                 );
1676 #else
1677 //FIXME adapt the alpha asm code from yv12->yuy2
1678
1679 #if __WORDSIZE >= 64
1680                 int i;
1681                 uint64_t *ldst = (uint64_t *) dst;
1682                 const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc;
1683                 for(i = 0; i < chromWidth; i += 2){
1684                         uint64_t k, l;
1685                         k = uc[0] + (yc[0] << 8) +
1686                             (vc[0] << 16) + (yc[1] << 24);
1687                         l = uc[1] + (yc[2] << 8) +
1688                             (vc[1] << 16) + (yc[3] << 24);
1689                         *ldst++ = k + (l << 32);
1690                         yc += 4;
1691                         uc += 2;
1692                         vc += 2;
1693                 }
1694
1695 #else
1696                 int i, *idst = (int32_t *) dst;
1697                 const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc;
1698                 for(i = 0; i < chromWidth; i++){
1699 #ifdef WORDS_BIGENDIAN
1700                         *idst++ = (uc[0] << 24)+ (yc[0] << 16) +
1701                             (vc[0] << 8) + (yc[1] << 0);
1702 #else
1703                         *idst++ = uc[0] + (yc[0] << 8) +
1704                             (vc[0] << 16) + (yc[1] << 24);
1705 #endif
1706                         yc += 2;
1707                         uc++;
1708                         vc++;
1709                 }
1710 #endif
1711 #endif
1712                 if((y&(vertLumPerChroma-1))==(vertLumPerChroma-1) )
1713                 {
1714                         usrc += chromStride;
1715                         vsrc += chromStride;
1716                 }
1717                 ysrc += lumStride;
1718                 dst += dstStride;
1719         }
1720 #ifdef HAVE_MMX
1721 asm(    EMMS" \n\t"
1722         SFENCE" \n\t"
1723         :::"memory");
1724 #endif
1725 }
1726
1727 /**
1728  *
1729  * height should be a multiple of 2 and width should be a multiple of 16 (if this is a
1730  * problem for anyone then tell me, and ill fix it)
1731  */
1732 static inline void RENAME(yv12touyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
1733         long width, long height,
1734         long lumStride, long chromStride, long dstStride)
1735 {
1736         //FIXME interpolate chroma
1737         RENAME(yuvPlanartouyvy)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2);
1738 }
1739
1740 /**
1741  *
1742  * width should be a multiple of 16
1743  */
1744 static inline void RENAME(yuv422ptoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
1745         long width, long height,
1746         long lumStride, long chromStride, long dstStride)
1747 {
1748         RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 1);
1749 }
1750
1751 /**
1752  *
1753  * height should be a multiple of 2 and width should be a multiple of 16 (if this is a
1754  * problem for anyone then tell me, and ill fix it)
1755  */
1756 static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
1757         long width, long height,
1758         long lumStride, long chromStride, long srcStride)
1759 {
1760         long y;
1761         const long chromWidth= width>>1;
1762         for(y=0; y<height; y+=2)
1763         {
1764 #ifdef HAVE_MMX
1765                 asm volatile(
1766                         "xor %%"REG_a", %%"REG_a"       \n\t"
1767                         "pcmpeqw %%mm7, %%mm7           \n\t"
1768                         "psrlw $8, %%mm7                \n\t" // FF,00,FF,00...
1769                         ASMALIGN(4)
1770                         "1:                             \n\t"
1771                         PREFETCH" 64(%0, %%"REG_a", 4)  \n\t"
1772                         "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
1773                         "movq 8(%0, %%"REG_a", 4), %%mm1\n\t" // YUYV YUYV(4)
1774                         "movq %%mm0, %%mm2              \n\t" // YUYV YUYV(0)
1775                         "movq %%mm1, %%mm3              \n\t" // YUYV YUYV(4)
1776                         "psrlw $8, %%mm0                \n\t" // U0V0 U0V0(0)
1777                         "psrlw $8, %%mm1                \n\t" // U0V0 U0V0(4)
1778                         "pand %%mm7, %%mm2              \n\t" // Y0Y0 Y0Y0(0)
1779                         "pand %%mm7, %%mm3              \n\t" // Y0Y0 Y0Y0(4)
1780                         "packuswb %%mm1, %%mm0          \n\t" // UVUV UVUV(0)
1781                         "packuswb %%mm3, %%mm2          \n\t" // YYYY YYYY(0)
1782
1783                         MOVNTQ" %%mm2, (%1, %%"REG_a", 2)\n\t"
1784
1785                         "movq 16(%0, %%"REG_a", 4), %%mm1\n\t" // YUYV YUYV(8)
1786                         "movq 24(%0, %%"REG_a", 4), %%mm2\n\t" // YUYV YUYV(12)
1787                         "movq %%mm1, %%mm3              \n\t" // YUYV YUYV(8)
1788                         "movq %%mm2, %%mm4              \n\t" // YUYV YUYV(12)
1789                         "psrlw $8, %%mm1                \n\t" // U0V0 U0V0(8)
1790                         "psrlw $8, %%mm2                \n\t" // U0V0 U0V0(12)
1791                         "pand %%mm7, %%mm3              \n\t" // Y0Y0 Y0Y0(8)
1792                         "pand %%mm7, %%mm4              \n\t" // Y0Y0 Y0Y0(12)
1793                         "packuswb %%mm2, %%mm1          \n\t" // UVUV UVUV(8)
1794                         "packuswb %%mm4, %%mm3          \n\t" // YYYY YYYY(8)
1795
1796                         MOVNTQ" %%mm3, 8(%1, %%"REG_a", 2)\n\t"
1797
1798                         "movq %%mm0, %%mm2              \n\t" // UVUV UVUV(0)
1799                         "movq %%mm1, %%mm3              \n\t" // UVUV UVUV(8)
1800                         "psrlw $8, %%mm0                \n\t" // V0V0 V0V0(0)
1801                         "psrlw $8, %%mm1                \n\t" // V0V0 V0V0(8)
1802                         "pand %%mm7, %%mm2              \n\t" // U0U0 U0U0(0)
1803                         "pand %%mm7, %%mm3              \n\t" // U0U0 U0U0(8)
1804                         "packuswb %%mm1, %%mm0          \n\t" // VVVV VVVV(0)
1805                         "packuswb %%mm3, %%mm2          \n\t" // UUUU UUUU(0)
1806
1807                         MOVNTQ" %%mm0, (%3, %%"REG_a")  \n\t"
1808                         MOVNTQ" %%mm2, (%2, %%"REG_a")  \n\t"
1809
1810                         "add $8, %%"REG_a"              \n\t"
1811                         "cmp %4, %%"REG_a"              \n\t"
1812                         " jb 1b                         \n\t"
1813                         ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
1814                         : "memory", "%"REG_a
1815                 );
1816
1817                 ydst += lumStride;
1818                 src  += srcStride;
1819
1820                 asm volatile(
1821                         "xor %%"REG_a", %%"REG_a"       \n\t"
1822                         ASMALIGN(4)
1823                         "1:                             \n\t"
1824                         PREFETCH" 64(%0, %%"REG_a", 4)  \n\t"
1825                         "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
1826                         "movq 8(%0, %%"REG_a", 4), %%mm1\n\t" // YUYV YUYV(4)
1827                         "movq 16(%0, %%"REG_a", 4), %%mm2\n\t" // YUYV YUYV(8)
1828                         "movq 24(%0, %%"REG_a", 4), %%mm3\n\t" // YUYV YUYV(12)
1829                         "pand %%mm7, %%mm0              \n\t" // Y0Y0 Y0Y0(0)
1830                         "pand %%mm7, %%mm1              \n\t" // Y0Y0 Y0Y0(4)
1831                         "pand %%mm7, %%mm2              \n\t" // Y0Y0 Y0Y0(8)
1832                         "pand %%mm7, %%mm3              \n\t" // Y0Y0 Y0Y0(12)
1833                         "packuswb %%mm1, %%mm0          \n\t" // YYYY YYYY(0)
1834                         "packuswb %%mm3, %%mm2          \n\t" // YYYY YYYY(8)
1835
1836                         MOVNTQ" %%mm0, (%1, %%"REG_a", 2)\n\t"
1837                         MOVNTQ" %%mm2, 8(%1, %%"REG_a", 2)\n\t"
1838
1839                         "add $8, %%"REG_a"              \n\t"
1840                         "cmp %4, %%"REG_a"              \n\t"
1841                         " jb 1b                         \n\t"
1842
1843                         ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
1844                         : "memory", "%"REG_a
1845                 );
1846 #else
1847                 long i;
1848                 for(i=0; i<chromWidth; i++)
1849                 {
1850                         ydst[2*i+0]     = src[4*i+0];
1851                         udst[i]         = src[4*i+1];
1852                         ydst[2*i+1]     = src[4*i+2];
1853                         vdst[i]         = src[4*i+3];
1854                 }
1855                 ydst += lumStride;
1856                 src  += srcStride;
1857
1858                 for(i=0; i<chromWidth; i++)
1859                 {
1860                         ydst[2*i+0]     = src[4*i+0];
1861                         ydst[2*i+1]     = src[4*i+2];
1862                 }
1863 #endif
1864                 udst += chromStride;
1865                 vdst += chromStride;
1866                 ydst += lumStride;
1867                 src  += srcStride;
1868         }
1869 #ifdef HAVE_MMX
1870 asm volatile(   EMMS" \n\t"
1871                 SFENCE" \n\t"
1872                 :::"memory");
1873 #endif
1874 }
1875
1876 static inline void RENAME(yvu9toyv12)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc,
1877         uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
1878         long width, long height, long lumStride, long chromStride)
1879 {
1880         /* Y Plane */
1881         memcpy(ydst, ysrc, width*height);
1882
1883         /* XXX: implement upscaling for U,V */
1884 }
1885
1886 static inline void RENAME(planar2x)(const uint8_t *src, uint8_t *dst, long srcWidth, long srcHeight, long srcStride, long dstStride)
1887 {
1888         long x,y;
1889         
1890         dst[0]= src[0];
1891         
1892         // first line
1893         for(x=0; x<srcWidth-1; x++){
1894                 dst[2*x+1]= (3*src[x] +   src[x+1])>>2;
1895                 dst[2*x+2]= (  src[x] + 3*src[x+1])>>2;
1896         }
1897         dst[2*srcWidth-1]= src[srcWidth-1];
1898         
1899         dst+= dstStride;
1900
1901         for(y=1; y<srcHeight; y++){
1902 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
1903                 const long mmxSize= srcWidth&~15;
1904                 asm volatile(
1905                         "mov %4, %%"REG_a"              \n\t"
1906                         "1:                             \n\t"
1907                         "movq (%0, %%"REG_a"), %%mm0    \n\t"
1908                         "movq (%1, %%"REG_a"), %%mm1    \n\t"
1909                         "movq 1(%0, %%"REG_a"), %%mm2   \n\t"
1910                         "movq 1(%1, %%"REG_a"), %%mm3   \n\t"
1911                         "movq -1(%0, %%"REG_a"), %%mm4  \n\t"
1912                         "movq -1(%1, %%"REG_a"), %%mm5  \n\t"
1913                         PAVGB" %%mm0, %%mm5             \n\t"
1914                         PAVGB" %%mm0, %%mm3             \n\t"
1915                         PAVGB" %%mm0, %%mm5             \n\t"
1916                         PAVGB" %%mm0, %%mm3             \n\t"
1917                         PAVGB" %%mm1, %%mm4             \n\t"
1918                         PAVGB" %%mm1, %%mm2             \n\t"
1919                         PAVGB" %%mm1, %%mm4             \n\t"
1920                         PAVGB" %%mm1, %%mm2             \n\t"
1921                         "movq %%mm5, %%mm7              \n\t"
1922                         "movq %%mm4, %%mm6              \n\t"
1923                         "punpcklbw %%mm3, %%mm5         \n\t"
1924                         "punpckhbw %%mm3, %%mm7         \n\t"
1925                         "punpcklbw %%mm2, %%mm4         \n\t"
1926                         "punpckhbw %%mm2, %%mm6         \n\t"
1927 #if 1
1928                         MOVNTQ" %%mm5, (%2, %%"REG_a", 2)\n\t"
1929                         MOVNTQ" %%mm7, 8(%2, %%"REG_a", 2)\n\t"
1930                         MOVNTQ" %%mm4, (%3, %%"REG_a", 2)\n\t"
1931                         MOVNTQ" %%mm6, 8(%3, %%"REG_a", 2)\n\t"
1932 #else
1933                         "movq %%mm5, (%2, %%"REG_a", 2) \n\t"
1934                         "movq %%mm7, 8(%2, %%"REG_a", 2)\n\t"
1935                         "movq %%mm4, (%3, %%"REG_a", 2) \n\t"
1936                         "movq %%mm6, 8(%3, %%"REG_a", 2)\n\t"
1937 #endif
1938                         "add $8, %%"REG_a"              \n\t"
1939                         " js 1b                         \n\t"
1940                         :: "r" (src + mmxSize  ), "r" (src + srcStride + mmxSize  ),
1941                            "r" (dst + mmxSize*2), "r" (dst + dstStride + mmxSize*2),
1942                            "g" (-mmxSize)
1943                         : "%"REG_a
1944
1945                 );
1946 #else
1947                 const long mmxSize=1;
1948 #endif
1949                 dst[0        ]= (3*src[0] +   src[srcStride])>>2;
1950                 dst[dstStride]= (  src[0] + 3*src[srcStride])>>2;
1951
1952                 for(x=mmxSize-1; x<srcWidth-1; x++){
1953                         dst[2*x          +1]= (3*src[x+0] +   src[x+srcStride+1])>>2;
1954                         dst[2*x+dstStride+2]= (  src[x+0] + 3*src[x+srcStride+1])>>2;
1955                         dst[2*x+dstStride+1]= (  src[x+1] + 3*src[x+srcStride  ])>>2;
1956                         dst[2*x          +2]= (3*src[x+1] +   src[x+srcStride  ])>>2;
1957                 }
1958                 dst[srcWidth*2 -1            ]= (3*src[srcWidth-1] +   src[srcWidth-1 + srcStride])>>2;
1959                 dst[srcWidth*2 -1 + dstStride]= (  src[srcWidth-1] + 3*src[srcWidth-1 + srcStride])>>2;
1960
1961                 dst+=dstStride*2;
1962                 src+=srcStride;
1963         }
1964         
1965         // last line
1966 #if 1
1967         dst[0]= src[0];
1968         
1969         for(x=0; x<srcWidth-1; x++){
1970                 dst[2*x+1]= (3*src[x] +   src[x+1])>>2;
1971                 dst[2*x+2]= (  src[x] + 3*src[x+1])>>2;
1972         }
1973         dst[2*srcWidth-1]= src[srcWidth-1];
1974 #else
1975         for(x=0; x<srcWidth; x++){
1976                 dst[2*x+0]=
1977                 dst[2*x+1]= src[x];
1978         }
1979 #endif
1980
1981 #ifdef HAVE_MMX
1982 asm volatile(   EMMS" \n\t"
1983                 SFENCE" \n\t"
1984                 :::"memory");
1985 #endif
1986 }
1987
1988 /**
1989  *
1990  * height should be a multiple of 2 and width should be a multiple of 16 (if this is a
1991  * problem for anyone then tell me, and ill fix it)
1992  * chrominance data is only taken from every secound line others are ignored FIXME write HQ version
1993  */
1994 static inline void RENAME(uyvytoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
1995         long width, long height,
1996         long lumStride, long chromStride, long srcStride)
1997 {
1998         long y;
1999         const long chromWidth= width>>1;
2000         for(y=0; y<height; y+=2)
2001         {
2002 #ifdef HAVE_MMX
2003                 asm volatile(
2004                         "xorl %%eax, %%eax              \n\t"
2005                         "pcmpeqw %%mm7, %%mm7           \n\t"
2006                         "psrlw $8, %%mm7                \n\t" // FF,00,FF,00...
2007                         ASMALIGN(4)
2008                         "1:                             \n\t"
2009                         PREFETCH" 64(%0, %%eax, 4)      \n\t"
2010                         "movq (%0, %%eax, 4), %%mm0     \n\t" // UYVY UYVY(0)
2011                         "movq 8(%0, %%eax, 4), %%mm1    \n\t" // UYVY UYVY(4)
2012                         "movq %%mm0, %%mm2              \n\t" // UYVY UYVY(0)
2013                         "movq %%mm1, %%mm3              \n\t" // UYVY UYVY(4)
2014                         "pand %%mm7, %%mm0              \n\t" // U0V0 U0V0(0)
2015                         "pand %%mm7, %%mm1              \n\t" // U0V0 U0V0(4)
2016                         "psrlw $8, %%mm2                \n\t" // Y0Y0 Y0Y0(0)
2017                         "psrlw $8, %%mm3                \n\t" // Y0Y0 Y0Y0(4)
2018                         "packuswb %%mm1, %%mm0          \n\t" // UVUV UVUV(0)
2019                         "packuswb %%mm3, %%mm2          \n\t" // YYYY YYYY(0)
2020
2021                         MOVNTQ" %%mm2, (%1, %%eax, 2)   \n\t"
2022
2023                         "movq 16(%0, %%eax, 4), %%mm1   \n\t" // UYVY UYVY(8)
2024                         "movq 24(%0, %%eax, 4), %%mm2   \n\t" // UYVY UYVY(12)
2025                         "movq %%mm1, %%mm3              \n\t" // UYVY UYVY(8)
2026                         "movq %%mm2, %%mm4              \n\t" // UYVY UYVY(12)
2027                         "pand %%mm7, %%mm1              \n\t" // U0V0 U0V0(8)
2028                         "pand %%mm7, %%mm2              \n\t" // U0V0 U0V0(12)
2029                         "psrlw $8, %%mm3                \n\t" // Y0Y0 Y0Y0(8)
2030                         "psrlw $8, %%mm4                \n\t" // Y0Y0 Y0Y0(12)
2031                         "packuswb %%mm2, %%mm1          \n\t" // UVUV UVUV(8)
2032                         "packuswb %%mm4, %%mm3          \n\t" // YYYY YYYY(8)
2033
2034                         MOVNTQ" %%mm3, 8(%1, %%eax, 2)  \n\t"
2035
2036                         "movq %%mm0, %%mm2              \n\t" // UVUV UVUV(0)
2037                         "movq %%mm1, %%mm3              \n\t" // UVUV UVUV(8)
2038                         "psrlw $8, %%mm0                \n\t" // V0V0 V0V0(0)
2039                         "psrlw $8, %%mm1                \n\t" // V0V0 V0V0(8)
2040                         "pand %%mm7, %%mm2              \n\t" // U0U0 U0U0(0)
2041                         "pand %%mm7, %%mm3              \n\t" // U0U0 U0U0(8)
2042                         "packuswb %%mm1, %%mm0          \n\t" // VVVV VVVV(0)
2043                         "packuswb %%mm3, %%mm2          \n\t" // UUUU UUUU(0)
2044
2045                         MOVNTQ" %%mm0, (%3, %%eax)      \n\t"
2046                         MOVNTQ" %%mm2, (%2, %%eax)      \n\t"
2047
2048                         "addl $8, %%eax                 \n\t"
2049                         "cmpl %4, %%eax                 \n\t"
2050                         " jb 1b                         \n\t"
2051                         ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
2052                         : "memory", "%eax"
2053                 );
2054
2055                 ydst += lumStride;
2056                 src  += srcStride;
2057
2058                 asm volatile(
2059                         "xorl %%eax, %%eax              \n\t"
2060                         ASMALIGN(4)
2061                         "1:                             \n\t"
2062                         PREFETCH" 64(%0, %%eax, 4)      \n\t"
2063                         "movq (%0, %%eax, 4), %%mm0     \n\t" // YUYV YUYV(0)
2064                         "movq 8(%0, %%eax, 4), %%mm1    \n\t" // YUYV YUYV(4)
2065                         "movq 16(%0, %%eax, 4), %%mm2   \n\t" // YUYV YUYV(8)
2066                         "movq 24(%0, %%eax, 4), %%mm3   \n\t" // YUYV YUYV(12)
2067                         "psrlw $8, %%mm0                \n\t" // Y0Y0 Y0Y0(0)
2068                         "psrlw $8, %%mm1                \n\t" // Y0Y0 Y0Y0(4)
2069                         "psrlw $8, %%mm2                \n\t" // Y0Y0 Y0Y0(8)
2070                         "psrlw $8, %%mm3                \n\t" // Y0Y0 Y0Y0(12)
2071                         "packuswb %%mm1, %%mm0          \n\t" // YYYY YYYY(0)
2072                         "packuswb %%mm3, %%mm2          \n\t" // YYYY YYYY(8)
2073
2074                         MOVNTQ" %%mm0, (%1, %%eax, 2)   \n\t"
2075                         MOVNTQ" %%mm2, 8(%1, %%eax, 2)  \n\t"
2076
2077                         "addl $8, %%eax                 \n\t"
2078                         "cmpl %4, %%eax                 \n\t"
2079                         " jb 1b                         \n\t"
2080
2081                         ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
2082                         : "memory", "%eax"
2083                 );
2084 #else
2085                 long i;
2086                 for(i=0; i<chromWidth; i++)
2087                 {
2088                         udst[i]         = src[4*i+0];
2089                         ydst[2*i+0]     = src[4*i+1];
2090                         vdst[i]         = src[4*i+2];
2091                         ydst[2*i+1]     = src[4*i+3];
2092                 }
2093                 ydst += lumStride;
2094                 src  += srcStride;
2095
2096                 for(i=0; i<chromWidth; i++)
2097                 {
2098                         ydst[2*i+0]     = src[4*i+1];
2099                         ydst[2*i+1]     = src[4*i+3];
2100                 }
2101 #endif
2102                 udst += chromStride;
2103                 vdst += chromStride;
2104                 ydst += lumStride;
2105                 src  += srcStride;
2106         }
2107 #ifdef HAVE_MMX
2108 asm volatile(   EMMS" \n\t"
2109                 SFENCE" \n\t"
2110                 :::"memory");
2111 #endif
2112 }
2113
2114 /**
2115  *
2116  * height should be a multiple of 2 and width should be a multiple of 2 (if this is a
2117  * problem for anyone then tell me, and ill fix it)
2118  * chrominance data is only taken from every secound line others are ignored in the C version FIXME write HQ version
2119  */
2120 static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
2121         long width, long height,
2122         long lumStride, long chromStride, long srcStride)
2123 {
2124         long y;
2125         const long chromWidth= width>>1;
2126 #ifdef HAVE_MMX
2127         for(y=0; y<height-2; y+=2)
2128         {
2129                 long i;
2130                 for(i=0; i<2; i++)
2131                 {
2132                         asm volatile(
2133                                 "mov %2, %%"REG_a"              \n\t"
2134                                 "movq "MANGLE(bgr2YCoeff)", %%mm6               \n\t"
2135                                 "movq "MANGLE(w1111)", %%mm5            \n\t"
2136                                 "pxor %%mm7, %%mm7              \n\t"
2137                                 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d"\n\t"
2138                                 ASMALIGN(4)
2139                                 "1:                             \n\t"
2140                                 PREFETCH" 64(%0, %%"REG_d")     \n\t"
2141                                 "movd (%0, %%"REG_d"), %%mm0    \n\t"
2142                                 "movd 3(%0, %%"REG_d"), %%mm1   \n\t"
2143                                 "punpcklbw %%mm7, %%mm0         \n\t"
2144                                 "punpcklbw %%mm7, %%mm1         \n\t"
2145                                 "movd 6(%0, %%"REG_d"), %%mm2   \n\t"
2146                                 "movd 9(%0, %%"REG_d"), %%mm3   \n\t"
2147                                 "punpcklbw %%mm7, %%mm2         \n\t"
2148                                 "punpcklbw %%mm7, %%mm3         \n\t"
2149                                 "pmaddwd %%mm6, %%mm0           \n\t"
2150                                 "pmaddwd %%mm6, %%mm1           \n\t"
2151                                 "pmaddwd %%mm6, %%mm2           \n\t"
2152                                 "pmaddwd %%mm6, %%mm3           \n\t"
2153 #ifndef FAST_BGR2YV12
2154                                 "psrad $8, %%mm0                \n\t"
2155                                 "psrad $8, %%mm1                \n\t"
2156                                 "psrad $8, %%mm2                \n\t"
2157                                 "psrad $8, %%mm3                \n\t"
2158 #endif
2159                                 "packssdw %%mm1, %%mm0          \n\t"
2160                                 "packssdw %%mm3, %%mm2          \n\t"
2161                                 "pmaddwd %%mm5, %%mm0           \n\t"
2162                                 "pmaddwd %%mm5, %%mm2           \n\t"
2163                                 "packssdw %%mm2, %%mm0          \n\t"
2164                                 "psraw $7, %%mm0                \n\t"
2165
2166                                 "movd 12(%0, %%"REG_d"), %%mm4  \n\t"
2167                                 "movd 15(%0, %%"REG_d"), %%mm1  \n\t"
2168                                 "punpcklbw %%mm7, %%mm4         \n\t"
2169                                 "punpcklbw %%mm7, %%mm1         \n\t"
2170                                 "movd 18(%0, %%"REG_d"), %%mm2  \n\t"
2171                                 "movd 21(%0, %%"REG_d"), %%mm3  \n\t"
2172                                 "punpcklbw %%mm7, %%mm2         \n\t"
2173                                 "punpcklbw %%mm7, %%mm3         \n\t"
2174                                 "pmaddwd %%mm6, %%mm4           \n\t"
2175                                 "pmaddwd %%mm6, %%mm1           \n\t"
2176                                 "pmaddwd %%mm6, %%mm2           \n\t"
2177                                 "pmaddwd %%mm6, %%mm3           \n\t"
2178 #ifndef FAST_BGR2YV12
2179                                 "psrad $8, %%mm4                \n\t"
2180                                 "psrad $8, %%mm1                \n\t"
2181                                 "psrad $8, %%mm2                \n\t"
2182                                 "psrad $8, %%mm3                \n\t"
2183 #endif
2184                                 "packssdw %%mm1, %%mm4          \n\t"
2185                                 "packssdw %%mm3, %%mm2          \n\t"
2186                                 "pmaddwd %%mm5, %%mm4           \n\t"
2187                                 "pmaddwd %%mm5, %%mm2           \n\t"
2188                                 "add $24, %%"REG_d"             \n\t"
2189                                 "packssdw %%mm2, %%mm4          \n\t"
2190                                 "psraw $7, %%mm4                \n\t"
2191
2192                                 "packuswb %%mm4, %%mm0          \n\t"
2193                                 "paddusb "MANGLE(bgr2YOffset)", %%mm0   \n\t"
2194
2195                                 MOVNTQ" %%mm0, (%1, %%"REG_a")  \n\t"
2196                                 "add $8, %%"REG_a"              \n\t"
2197                                 " js 1b                         \n\t"
2198                                 : : "r" (src+width*3), "r" (ydst+width), "g" (-width)
2199                                 : "%"REG_a, "%"REG_d
2200                         );
2201                         ydst += lumStride;
2202                         src  += srcStride;
2203                 }
2204                 src -= srcStride*2;
2205                 asm volatile(
2206                         "mov %4, %%"REG_a"              \n\t"
2207                         "movq "MANGLE(w1111)", %%mm5            \n\t"
2208                         "movq "MANGLE(bgr2UCoeff)", %%mm6               \n\t"
2209                         "pxor %%mm7, %%mm7              \n\t"
2210                         "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d"\n\t"
2211                         "add %%"REG_d", %%"REG_d"       \n\t"
2212                         ASMALIGN(4)
2213                         "1:                             \n\t"
2214                         PREFETCH" 64(%0, %%"REG_d")     \n\t"
2215                         PREFETCH" 64(%1, %%"REG_d")     \n\t"
2216 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
2217                         "movq (%0, %%"REG_d"), %%mm0    \n\t"
2218                         "movq (%1, %%"REG_d"), %%mm1    \n\t"
2219                         "movq 6(%0, %%"REG_d"), %%mm2   \n\t"
2220                         "movq 6(%1, %%"REG_d"), %%mm3   \n\t"
2221                         PAVGB" %%mm1, %%mm0             \n\t"
2222                         PAVGB" %%mm3, %%mm2             \n\t"
2223                         "movq %%mm0, %%mm1              \n\t"
2224                         "movq %%mm2, %%mm3              \n\t"
2225                         "psrlq $24, %%mm0               \n\t"
2226                         "psrlq $24, %%mm2               \n\t"
2227                         PAVGB" %%mm1, %%mm0             \n\t"
2228                         PAVGB" %%mm3, %%mm2             \n\t"
2229                         "punpcklbw %%mm7, %%mm0         \n\t"
2230                         "punpcklbw %%mm7, %%mm2         \n\t"
2231 #else
2232                         "movd (%0, %%"REG_d"), %%mm0    \n\t"
2233                         "movd (%1, %%"REG_d"), %%mm1    \n\t"
2234                         "movd 3(%0, %%"REG_d"), %%mm2   \n\t"
2235                         "movd 3(%1, %%"REG_d"), %%mm3   \n\t"
2236                         "punpcklbw %%mm7, %%mm0         \n\t"
2237                         "punpcklbw %%mm7, %%mm1         \n\t"
2238                         "punpcklbw %%mm7, %%mm2         \n\t"
2239                         "punpcklbw %%mm7, %%mm3         \n\t"
2240                         "paddw %%mm1, %%mm0             \n\t"
2241                         "paddw %%mm3, %%mm2             \n\t"
2242                         "paddw %%mm2, %%mm0             \n\t"
2243                         "movd 6(%0, %%"REG_d"), %%mm4   \n\t"
2244                         "movd 6(%1, %%"REG_d"), %%mm1   \n\t"
2245                         "movd 9(%0, %%"REG_d"), %%mm2   \n\t"
2246                         "movd 9(%1, %%"REG_d"), %%mm3   \n\t"
2247                         "punpcklbw %%mm7, %%mm4         \n\t"
2248                         "punpcklbw %%mm7, %%mm1         \n\t"
2249                         "punpcklbw %%mm7, %%mm2         \n\t"
2250                         "punpcklbw %%mm7, %%mm3         \n\t"
2251                         "paddw %%mm1, %%mm4             \n\t"
2252                         "paddw %%mm3, %%mm2             \n\t"
2253                         "paddw %%mm4, %%mm2             \n\t"
2254                         "psrlw $2, %%mm0                \n\t"
2255                         "psrlw $2, %%mm2                \n\t"
2256 #endif
2257                         "movq "MANGLE(bgr2VCoeff)", %%mm1               \n\t"
2258                         "movq "MANGLE(bgr2VCoeff)", %%mm3               \n\t"
2259
2260                         "pmaddwd %%mm0, %%mm1           \n\t"
2261                         "pmaddwd %%mm2, %%mm3           \n\t"
2262                         "pmaddwd %%mm6, %%mm0           \n\t"
2263                         "pmaddwd %%mm6, %%mm2           \n\t"
2264 #ifndef FAST_BGR2YV12
2265                         "psrad $8, %%mm0                \n\t"
2266                         "psrad $8, %%mm1                \n\t"
2267                         "psrad $8, %%mm2                \n\t"
2268                         "psrad $8, %%mm3                \n\t"
2269 #endif
2270                         "packssdw %%mm2, %%mm0          \n\t"
2271                         "packssdw %%mm3, %%mm1          \n\t"
2272                         "pmaddwd %%mm5, %%mm0           \n\t"
2273                         "pmaddwd %%mm5, %%mm1           \n\t"
2274                         "packssdw %%mm1, %%mm0          \n\t" // V1 V0 U1 U0
2275                         "psraw $7, %%mm0                \n\t"
2276
2277 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
2278                         "movq 12(%0, %%"REG_d"), %%mm4  \n\t"
2279                         "movq 12(%1, %%"REG_d"), %%mm1  \n\t"
2280                         "movq 18(%0, %%"REG_d"), %%mm2  \n\t"
2281                         "movq 18(%1, %%"REG_d"), %%mm3  \n\t"
2282                         PAVGB" %%mm1, %%mm4             \n\t"
2283                         PAVGB" %%mm3, %%mm2             \n\t"
2284                         "movq %%mm4, %%mm1              \n\t"
2285                         "movq %%mm2, %%mm3              \n\t"
2286                         "psrlq $24, %%mm4               \n\t"
2287                         "psrlq $24, %%mm2               \n\t"
2288                         PAVGB" %%mm1, %%mm4             \n\t"
2289                         PAVGB" %%mm3, %%mm2             \n\t"
2290                         "punpcklbw %%mm7, %%mm4         \n\t"
2291                         "punpcklbw %%mm7, %%mm2         \n\t"
2292 #else
2293                         "movd 12(%0, %%"REG_d"), %%mm4  \n\t"
2294                         "movd 12(%1, %%"REG_d"), %%mm1  \n\t"
2295                         "movd 15(%0, %%"REG_d"), %%mm2  \n\t"
2296                         "movd 15(%1, %%"REG_d"), %%mm3  \n\t"
2297                         "punpcklbw %%mm7, %%mm4         \n\t"
2298                         "punpcklbw %%mm7, %%mm1         \n\t"
2299                         "punpcklbw %%mm7, %%mm2         \n\t"
2300                         "punpcklbw %%mm7, %%mm3         \n\t"
2301                         "paddw %%mm1, %%mm4             \n\t"
2302                         "paddw %%mm3, %%mm2             \n\t"
2303                         "paddw %%mm2, %%mm4             \n\t"
2304                         "movd 18(%0, %%"REG_d"), %%mm5  \n\t"
2305                         "movd 18(%1, %%"REG_d"), %%mm1  \n\t"
2306                         "movd 21(%0, %%"REG_d"), %%mm2  \n\t"
2307                         "movd 21(%1, %%"REG_d"), %%mm3  \n\t"
2308                         "punpcklbw %%mm7, %%mm5         \n\t"
2309                         "punpcklbw %%mm7, %%mm1         \n\t"
2310                         "punpcklbw %%mm7, %%mm2         \n\t"
2311                         "punpcklbw %%mm7, %%mm3         \n\t"
2312                         "paddw %%mm1, %%mm5             \n\t"
2313                         "paddw %%mm3, %%mm2             \n\t"
2314                         "paddw %%mm5, %%mm2             \n\t"
2315                         "movq "MANGLE(w1111)", %%mm5            \n\t"
2316                         "psrlw $2, %%mm4                \n\t"
2317                         "psrlw $2, %%mm2                \n\t"
2318 #endif
2319                         "movq "MANGLE(bgr2VCoeff)", %%mm1               \n\t"
2320                         "movq "MANGLE(bgr2VCoeff)", %%mm3               \n\t"
2321
2322                         "pmaddwd %%mm4, %%mm1           \n\t"
2323                         "pmaddwd %%mm2, %%mm3           \n\t"
2324                         "pmaddwd %%mm6, %%mm4           \n\t"
2325                         "pmaddwd %%mm6, %%mm2           \n\t"
2326 #ifndef FAST_BGR2YV12
2327                         "psrad $8, %%mm4                \n\t"
2328                         "psrad $8, %%mm1                \n\t"
2329                         "psrad $8, %%mm2                \n\t"
2330                         "psrad $8, %%mm3                \n\t"
2331 #endif
2332                         "packssdw %%mm2, %%mm4          \n\t"
2333                         "packssdw %%mm3, %%mm1          \n\t"
2334                         "pmaddwd %%mm5, %%mm4           \n\t"
2335                         "pmaddwd %%mm5, %%mm1           \n\t"
2336                         "add $24, %%"REG_d"             \n\t"
2337                         "packssdw %%mm1, %%mm4          \n\t" // V3 V2 U3 U2
2338                         "psraw $7, %%mm4                \n\t"
2339
2340                         "movq %%mm0, %%mm1              \n\t"
2341                         "punpckldq %%mm4, %%mm0         \n\t"
2342                         "punpckhdq %%mm4, %%mm1         \n\t"
2343                         "packsswb %%mm1, %%mm0          \n\t"
2344                         "paddb "MANGLE(bgr2UVOffset)", %%mm0    \n\t"
2345                         "movd %%mm0, (%2, %%"REG_a")    \n\t"
2346                         "punpckhdq %%mm0, %%mm0         \n\t"
2347                         "movd %%mm0, (%3, %%"REG_a")    \n\t"
2348                         "add $4, %%"REG_a"              \n\t"
2349                         " js 1b                         \n\t"
2350                         : : "r" (src+chromWidth*6), "r" (src+srcStride+chromWidth*6), "r" (udst+chromWidth), "r" (vdst+chromWidth), "g" (-chromWidth)
2351                         : "%"REG_a, "%"REG_d
2352                 );
2353
2354                 udst += chromStride;
2355                 vdst += chromStride;
2356                 src  += srcStride*2;
2357         }
2358
2359         asm volatile(   EMMS" \n\t"
2360                         SFENCE" \n\t"
2361                         :::"memory");
2362 #else
2363         y=0;
2364 #endif
2365         for(; y<height; y+=2)
2366         {
2367                 long i;
2368                 for(i=0; i<chromWidth; i++)
2369                 {
2370                         unsigned int b= src[6*i+0];
2371                         unsigned int g= src[6*i+1];
2372                         unsigned int r= src[6*i+2];
2373
2374                         unsigned int Y  =  ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
2375                         unsigned int V  =  ((RV*r + GV*g + BV*b)>>RGB2YUV_SHIFT) + 128;
2376                         unsigned int U  =  ((RU*r + GU*g + BU*b)>>RGB2YUV_SHIFT) + 128;
2377
2378                         udst[i]         = U;
2379                         vdst[i]         = V;
2380                         ydst[2*i]       = Y;
2381
2382                         b= src[6*i+3];
2383                         g= src[6*i+4];
2384                         r= src[6*i+5];
2385
2386                         Y  =  ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
2387                         ydst[2*i+1]     = Y;
2388                 }
2389                 ydst += lumStride;
2390                 src  += srcStride;
2391
2392                 for(i=0; i<chromWidth; i++)
2393                 {
2394                         unsigned int b= src[6*i+0];
2395                         unsigned int g= src[6*i+1];
2396                         unsigned int r= src[6*i+2];
2397
2398                         unsigned int Y  =  ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
2399
2400                         ydst[2*i]       = Y;
2401
2402                         b= src[6*i+3];
2403                         g= src[6*i+4];
2404                         r= src[6*i+5];
2405
2406                         Y  =  ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
2407                         ydst[2*i+1]     = Y;
2408                 }
2409                 udst += chromStride;
2410                 vdst += chromStride;
2411                 ydst += lumStride;
2412                 src  += srcStride;
2413         }
2414 }
2415
2416 void RENAME(interleaveBytes)(uint8_t *src1, uint8_t *src2, uint8_t *dest,
2417                             long width, long height, long src1Stride,
2418                             long src2Stride, long dstStride){
2419         long h;
2420
2421         for(h=0; h < height; h++)
2422         {
2423                 long w;
2424
2425 #ifdef HAVE_MMX
2426 #ifdef HAVE_SSE2
2427                 asm(
2428                         "xor %%"REG_a", %%"REG_a"       \n\t"
2429                         "1:                             \n\t"
2430                         PREFETCH" 64(%1, %%"REG_a")     \n\t"
2431                         PREFETCH" 64(%2, %%"REG_a")     \n\t"
2432                         "movdqa (%1, %%"REG_a"), %%xmm0 \n\t"
2433                         "movdqa (%1, %%"REG_a"), %%xmm1 \n\t"
2434                         "movdqa (%2, %%"REG_a"), %%xmm2 \n\t"
2435                         "punpcklbw %%xmm2, %%xmm0       \n\t"
2436                         "punpckhbw %%xmm2, %%xmm1       \n\t"
2437                         "movntdq %%xmm0, (%0, %%"REG_a", 2)\n\t"
2438                         "movntdq %%xmm1, 16(%0, %%"REG_a", 2)\n\t"
2439                         "add $16, %%"REG_a"             \n\t"
2440                         "cmp %3, %%"REG_a"              \n\t"
2441                         " jb 1b                         \n\t"
2442                         ::"r"(dest), "r"(src1), "r"(src2), "r" (width-15)
2443                         : "memory", "%"REG_a""
2444                 );
2445 #else
2446                 asm(
2447                         "xor %%"REG_a", %%"REG_a"       \n\t"
2448                         "1:                             \n\t"
2449                         PREFETCH" 64(%1, %%"REG_a")     \n\t"
2450                         PREFETCH" 64(%2, %%"REG_a")     \n\t"
2451                         "movq (%1, %%"REG_a"), %%mm0    \n\t"
2452                         "movq 8(%1, %%"REG_a"), %%mm2   \n\t"
2453                         "movq %%mm0, %%mm1              \n\t"
2454                         "movq %%mm2, %%mm3              \n\t"
2455                         "movq (%2, %%"REG_a"), %%mm4    \n\t"
2456                         "movq 8(%2, %%"REG_a"), %%mm5   \n\t"
2457                         "punpcklbw %%mm4, %%mm0         \n\t"
2458                         "punpckhbw %%mm4, %%mm1         \n\t"
2459                         "punpcklbw %%mm5, %%mm2         \n\t"
2460                         "punpckhbw %%mm5, %%mm3         \n\t"
2461                         MOVNTQ" %%mm0, (%0, %%"REG_a", 2)\n\t"
2462                         MOVNTQ" %%mm1, 8(%0, %%"REG_a", 2)\n\t"
2463                         MOVNTQ" %%mm2, 16(%0, %%"REG_a", 2)\n\t"
2464                         MOVNTQ" %%mm3, 24(%0, %%"REG_a", 2)\n\t"
2465                         "add $16, %%"REG_a"             \n\t"
2466                         "cmp %3, %%"REG_a"              \n\t"
2467                         " jb 1b                         \n\t"
2468                         ::"r"(dest), "r"(src1), "r"(src2), "r" (width-15)
2469                         : "memory", "%"REG_a
2470                 );
2471 #endif
2472                 for(w= (width&(~15)); w < width; w++)
2473                 {
2474                         dest[2*w+0] = src1[w];
2475                         dest[2*w+1] = src2[w];
2476                 }
2477 #else
2478                 for(w=0; w < width; w++)
2479                 {
2480                         dest[2*w+0] = src1[w];
2481                         dest[2*w+1] = src2[w];
2482                 }
2483 #endif
2484                 dest += dstStride;
2485                 src1 += src1Stride;
2486                 src2 += src2Stride;
2487         }
2488 #ifdef HAVE_MMX
2489         asm(
2490                 EMMS" \n\t"
2491                 SFENCE" \n\t"
2492                 ::: "memory"
2493                 );
2494 #endif
2495 }
2496
2497 static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2,
2498                         uint8_t *dst1, uint8_t *dst2,
2499                         long width, long height,
2500                         long srcStride1, long srcStride2,
2501                         long dstStride1, long dstStride2)
2502 {
2503     long y,x,w,h;
2504     w=width/2; h=height/2;
2505 #ifdef HAVE_MMX
2506     asm volatile(
2507         PREFETCH" %0\n\t"
2508         PREFETCH" %1\n\t"
2509         ::"m"(*(src1+srcStride1)),"m"(*(src2+srcStride2)):"memory");
2510 #endif
2511     for(y=0;y<h;y++){
2512         const uint8_t* s1=src1+srcStride1*(y>>1);
2513         uint8_t* d=dst1+dstStride1*y;
2514         x=0;
2515 #ifdef HAVE_MMX
2516         for(;x<w-31;x+=32)
2517         {
2518             asm volatile(
2519                 PREFETCH" 32%1\n\t"
2520                 "movq   %1, %%mm0\n\t"
2521                 "movq   8%1, %%mm2\n\t"
2522                 "movq   16%1, %%mm4\n\t"
2523                 "movq   24%1, %%mm6\n\t"
2524                 "movq   %%mm0, %%mm1\n\t"
2525                 "movq   %%mm2, %%mm3\n\t"
2526                 "movq   %%mm4, %%mm5\n\t"
2527                 "movq   %%mm6, %%mm7\n\t"
2528                 "punpcklbw %%mm0, %%mm0\n\t"
2529                 "punpckhbw %%mm1, %%mm1\n\t"
2530                 "punpcklbw %%mm2, %%mm2\n\t"
2531                 "punpckhbw %%mm3, %%mm3\n\t"
2532                 "punpcklbw %%mm4, %%mm4\n\t"
2533                 "punpckhbw %%mm5, %%mm5\n\t"
2534                 "punpcklbw %%mm6, %%mm6\n\t"
2535                 "punpckhbw %%mm7, %%mm7\n\t"
2536                 MOVNTQ" %%mm0, %0\n\t"
2537                 MOVNTQ" %%mm1, 8%0\n\t"
2538                 MOVNTQ" %%mm2, 16%0\n\t"
2539                 MOVNTQ" %%mm3, 24%0\n\t"
2540                 MOVNTQ" %%mm4, 32%0\n\t"
2541                 MOVNTQ" %%mm5, 40%0\n\t"
2542                 MOVNTQ" %%mm6, 48%0\n\t"
2543                 MOVNTQ" %%mm7, 56%0"
2544                 :"=m"(d[2*x])
2545                 :"m"(s1[x])
2546                 :"memory");
2547         }
2548 #endif
2549         for(;x<w;x++) d[2*x]=d[2*x+1]=s1[x];
2550     }
2551     for(y=0;y<h;y++){
2552         const uint8_t* s2=src2+srcStride2*(y>>1);
2553         uint8_t* d=dst2+dstStride2*y;
2554         x=0;
2555 #ifdef HAVE_MMX
2556         for(;x<w-31;x+=32)
2557         {
2558             asm volatile(
2559                 PREFETCH" 32%1\n\t"
2560                 "movq   %1, %%mm0\n\t"
2561                 "movq   8%1, %%mm2\n\t"
2562                 "movq   16%1, %%mm4\n\t"
2563                 "movq   24%1, %%mm6\n\t"
2564                 "movq   %%mm0, %%mm1\n\t"
2565                 "movq   %%mm2, %%mm3\n\t"
2566                 "movq   %%mm4, %%mm5\n\t"
2567                 "movq   %%mm6, %%mm7\n\t"
2568                 "punpcklbw %%mm0, %%mm0\n\t"
2569                 "punpckhbw %%mm1, %%mm1\n\t"
2570                 "punpcklbw %%mm2, %%mm2\n\t"
2571                 "punpckhbw %%mm3, %%mm3\n\t"
2572                 "punpcklbw %%mm4, %%mm4\n\t"
2573                 "punpckhbw %%mm5, %%mm5\n\t"
2574                 "punpcklbw %%mm6, %%mm6\n\t"
2575                 "punpckhbw %%mm7, %%mm7\n\t"
2576                 MOVNTQ" %%mm0, %0\n\t"
2577                 MOVNTQ" %%mm1, 8%0\n\t"
2578                 MOVNTQ" %%mm2, 16%0\n\t"
2579                 MOVNTQ" %%mm3, 24%0\n\t"
2580                 MOVNTQ" %%mm4, 32%0\n\t"
2581                 MOVNTQ" %%mm5, 40%0\n\t"
2582                 MOVNTQ" %%mm6, 48%0\n\t"
2583                 MOVNTQ" %%mm7, 56%0"
2584                 :"=m"(d[2*x])
2585                 :"m"(s2[x])
2586                 :"memory");
2587         }
2588 #endif
2589         for(;x<w;x++) d[2*x]=d[2*x+1]=s2[x];
2590     }
2591 #ifdef HAVE_MMX
2592         asm(
2593                 EMMS" \n\t"
2594                 SFENCE" \n\t"
2595                 ::: "memory"
2596                 );
2597 #endif
2598 }
2599
2600 static inline void RENAME(yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint8_t *src3,
2601                         uint8_t *dst,
2602                         long width, long height,
2603                         long srcStride1, long srcStride2,
2604                         long srcStride3, long dstStride)
2605 {
2606     long y,x,w,h;
2607     w=width/2; h=height;
2608     for(y=0;y<h;y++){
2609         const uint8_t* yp=src1+srcStride1*y;
2610         const uint8_t* up=src2+srcStride2*(y>>2);
2611         const uint8_t* vp=src3+srcStride3*(y>>2);
2612         uint8_t* d=dst+dstStride*y;
2613         x=0;
2614 #ifdef HAVE_MMX
2615         for(;x<w-7;x+=8)
2616         {
2617             asm volatile(
2618                 PREFETCH" 32(%1, %0)\n\t"
2619                 PREFETCH" 32(%2, %0)\n\t"
2620                 PREFETCH" 32(%3, %0)\n\t"
2621                 "movq   (%1, %0, 4), %%mm0\n\t"       /* Y0Y1Y2Y3Y4Y5Y6Y7 */
2622                 "movq   (%2, %0), %%mm1\n\t"       /* U0U1U2U3U4U5U6U7 */
2623                 "movq   (%3, %0), %%mm2\n\t"         /* V0V1V2V3V4V5V6V7 */
2624                 "movq   %%mm0, %%mm3\n\t"    /* Y0Y1Y2Y3Y4Y5Y6Y7 */
2625                 "movq   %%mm1, %%mm4\n\t"    /* U0U1U2U3U4U5U6U7 */
2626                 "movq   %%mm2, %%mm5\n\t"    /* V0V1V2V3V4V5V6V7 */
2627                 "punpcklbw %%mm1, %%mm1\n\t" /* U0U0 U1U1 U2U2 U3U3 */
2628                 "punpcklbw %%mm2, %%mm2\n\t" /* V0V0 V1V1 V2V2 V3V3 */
2629                 "punpckhbw %%mm4, %%mm4\n\t" /* U4U4 U5U5 U6U6 U7U7 */
2630                 "punpckhbw %%mm5, %%mm5\n\t" /* V4V4 V5V5 V6V6 V7V7 */
2631
2632                 "movq   %%mm1, %%mm6\n\t"
2633                 "punpcklbw %%mm2, %%mm1\n\t" /* U0V0 U0V0 U1V1 U1V1*/
2634                 "punpcklbw %%mm1, %%mm0\n\t" /* Y0U0 Y1V0 Y2U0 Y3V0*/
2635                 "punpckhbw %%mm1, %%mm3\n\t" /* Y4U1 Y5V1 Y6U1 Y7V1*/
2636                 MOVNTQ" %%mm0, (%4, %0, 8)\n\t"
2637                 MOVNTQ" %%mm3, 8(%4, %0, 8)\n\t"
2638                 
2639                 "punpckhbw %%mm2, %%mm6\n\t" /* U2V2 U2V2 U3V3 U3V3*/
2640                 "movq   8(%1, %0, 4), %%mm0\n\t"
2641                 "movq   %%mm0, %%mm3\n\t"
2642                 "punpcklbw %%mm6, %%mm0\n\t" /* Y U2 Y V2 Y U2 Y V2*/
2643                 "punpckhbw %%mm6, %%mm3\n\t" /* Y U3 Y V3 Y U3 Y V3*/
2644                 MOVNTQ" %%mm0, 16(%4, %0, 8)\n\t"
2645                 MOVNTQ" %%mm3, 24(%4, %0, 8)\n\t"
2646
2647                 "movq   %%mm4, %%mm6\n\t"
2648                 "movq   16(%1, %0, 4), %%mm0\n\t"
2649                 "movq   %%mm0, %%mm3\n\t"
2650                 "punpcklbw %%mm5, %%mm4\n\t"
2651                 "punpcklbw %%mm4, %%mm0\n\t" /* Y U4 Y V4 Y U4 Y V4*/
2652                 "punpckhbw %%mm4, %%mm3\n\t" /* Y U5 Y V5 Y U5 Y V5*/
2653                 MOVNTQ" %%mm0, 32(%4, %0, 8)\n\t"
2654                 MOVNTQ" %%mm3, 40(%4, %0, 8)\n\t"
2655                 
2656                 "punpckhbw %%mm5, %%mm6\n\t"
2657                 "movq   24(%1, %0, 4), %%mm0\n\t"
2658                 "movq   %%mm0, %%mm3\n\t"
2659                 "punpcklbw %%mm6, %%mm0\n\t" /* Y U6 Y V6 Y U6 Y V6*/
2660                 "punpckhbw %%mm6, %%mm3\n\t" /* Y U7 Y V7 Y U7 Y V7*/
2661                 MOVNTQ" %%mm0, 48(%4, %0, 8)\n\t"
2662                 MOVNTQ" %%mm3, 56(%4, %0, 8)\n\t"
2663
2664                 : "+r" (x)
2665                 : "r"(yp), "r" (up), "r"(vp), "r"(d)
2666                 :"memory");
2667         }
2668 #endif
2669         for(; x<w; x++)
2670         {
2671             const long x2= x<<2;
2672             d[8*x+0]=yp[x2];
2673             d[8*x+1]=up[x];
2674             d[8*x+2]=yp[x2+1];
2675             d[8*x+3]=vp[x];
2676             d[8*x+4]=yp[x2+2];
2677             d[8*x+5]=up[x];
2678             d[8*x+6]=yp[x2+3];
2679             d[8*x+7]=vp[x];
2680         }
2681     }
2682 #ifdef HAVE_MMX
2683         asm(
2684                 EMMS" \n\t"
2685                 SFENCE" \n\t"
2686                 ::: "memory"
2687                 );
2688 #endif
2689 }