qt: playlist: use item title if available
[vlc.git] / modules / video_chroma / i422_yuy2.h
1 /*****************************************************************************
2  * i422_yuy2.h : YUV to YUV conversion module for vlc
3  *****************************************************************************
4  * Copyright (C) 2002 VLC authors and VideoLAN
5  *
6  * Authors: Samuel Hocevar <sam@zoy.org>
7  *          Damien Fouilleul <damienf@videolan.org>
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU Lesser General Public License as published by
11  * the Free Software Foundation; either version 2.1 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17  * GNU Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public License
20  * along with this program; if not, write to the Free Software Foundation,
21  * Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
22  *****************************************************************************/
23
24 #ifdef MODULE_NAME_IS_i422_yuy2_mmx
25
26 #if defined(CAN_COMPILE_MMX)
27
28 /* MMX assembly */
29  
30 #define MMX_CALL(MMX_INSTRUCTIONS)          \
31     do {                                    \
32     __asm__ __volatile__(                   \
33         ".p2align 3 \n\t"                   \
34         MMX_INSTRUCTIONS                    \
35         :                                   \
36         : "r" (p_line), "r" (p_y),          \
37           "r" (p_u), "r" (p_v)              \
38         : "mm0", "mm1", "mm2" );            \
39         p_line += 16; p_y += 8;             \
40         p_u += 4; p_v += 4;                 \
41     } while(0)
42
43 #define MMX_END __asm__ __volatile__ ( "emms" )
44
45 #define MMX_YUV422_YUYV "                                                 \n\
46 movq       (%1), %%mm0  # Load 8 Y            y7 y6 y5 y4 y3 y2 y1 y0     \n\
47 movd       (%2), %%mm1  # Load 4 Cb           00 00 00 00 u3 u2 u1 u0     \n\
48 movd       (%3), %%mm2  # Load 4 Cr           00 00 00 00 v3 v2 v1 v0     \n\
49 punpcklbw %%mm2, %%mm1  #                     v3 u3 v2 u2 v1 u1 v0 u0     \n\
50 movq      %%mm0, %%mm2  #                     y7 y6 y5 y4 y3 y2 y1 y0     \n\
51 punpcklbw %%mm1, %%mm2  #                     v1 y3 u1 y2 v0 y1 u0 y0     \n\
52 movq      %%mm2, (%0)   # Store low YUYV                                  \n\
53 punpckhbw %%mm1, %%mm0  #                     v3 y7 u3 y6 v2 y5 u2 y4     \n\
54 movq      %%mm0, 8(%0)  # Store high YUYV                                 \n\
55 "
56
57 #define MMX_YUV422_YVYU "                                                 \n\
58 movq       (%1), %%mm0  # Load 8 Y            y7 y6 y5 y4 y3 y2 y1 y0     \n\
59 movd       (%2), %%mm2  # Load 4 Cb           00 00 00 00 u3 u2 u1 u0     \n\
60 movd       (%3), %%mm1  # Load 4 Cr           00 00 00 00 v3 v2 v1 v0     \n\
61 punpcklbw %%mm2, %%mm1  #                     u3 v3 u2 v2 u1 v1 u0 v0     \n\
62 movq      %%mm0, %%mm2  #                     y7 y6 y5 y4 y3 y2 y1 y0     \n\
63 punpcklbw %%mm1, %%mm2  #                     u1 y3 v1 y2 u0 y1 v0 y0     \n\
64 movq      %%mm2, (%0)   # Store low YUYV                                  \n\
65 punpckhbw %%mm1, %%mm0  #                     u3 y7 v3 y6 u2 y5 v2 y4     \n\
66 movq      %%mm0, 8(%0)  # Store high YUYV                                 \n\
67 "
68
69 #define MMX_YUV422_UYVY "                                                 \n\
70 movq       (%1), %%mm0  # Load 8 Y            y7 y6 y5 y4 y3 y2 y1 y0     \n\
71 movd       (%2), %%mm1  # Load 4 Cb           00 00 00 00 u3 u2 u1 u0     \n\
72 movd       (%3), %%mm2  # Load 4 Cr           00 00 00 00 v3 v2 v1 v0     \n\
73 punpcklbw %%mm2, %%mm1  #                     v3 u3 v2 u2 v1 u1 v0 u0     \n\
74 movq      %%mm1, %%mm2  #                     v3 u3 v2 u2 v1 u1 v0 u0     \n\
75 punpcklbw %%mm0, %%mm2  #                     y3 v1 y2 u1 y1 v0 y0 u0     \n\
76 movq      %%mm2, (%0)   # Store low UYVY                                  \n\
77 punpckhbw %%mm0, %%mm1  #                     y7 v3 y6 u3 y5 v2 y4 u2     \n\
78 movq      %%mm1, 8(%0)  # Store high UYVY                                 \n\
79 "
80
81 #elif defined(HAVE_MMX_INTRINSICS)
82
83 /* MMX intrinsics */
84
85 #include <mmintrin.h>
86
87 #define MMX_CALL(MMX_INSTRUCTIONS)  \
88     do {                            \
89         __m64 mm0, mm1, mm2;        \
90         MMX_INSTRUCTIONS            \
91         p_line += 16; p_y += 8;     \
92         p_u += 4; p_v += 4;         \
93     } while(0)
94
95 #define MMX_END _mm_empty()
96
97 #define MMX_YUV422_YUYV                     \
98     mm0 = (__m64)*(uint64_t*)p_y;           \
99     mm1 = _mm_cvtsi32_si64(*(int*)p_u);     \
100     mm2 = _mm_cvtsi32_si64(*(int*)p_v);     \
101     mm1 = _mm_unpacklo_pi8(mm1, mm2);       \
102     mm2 = mm0;                              \
103     mm2 = _mm_unpacklo_pi8(mm2, mm1);       \
104     *(uint64_t*)p_line = (uint64_t)mm2;     \
105     mm0 = _mm_unpackhi_pi8(mm0, mm1);       \
106     *(uint64_t*)(p_line+8) = (uint64_t)mm0;
107  
108 #define MMX_YUV422_YVYU                     \
109     mm0 = (__m64)*(uint64_t*)p_y;           \
110     mm2 = _mm_cvtsi32_si64(*(int*)p_u);     \
111     mm1 = _mm_cvtsi32_si64(*(int*)p_v);     \
112     mm1 = _mm_unpacklo_pi8(mm1, mm2);       \
113     mm2 = mm0;                              \
114     mm2 = _mm_unpacklo_pi8(mm2, mm1);       \
115     *(uint64_t*)p_line = (uint64_t)mm2;     \
116     mm0 = _mm_unpackhi_pi8(mm0, mm1);       \
117     *(uint64_t*)(p_line+8) = (uint64_t)mm0;
118
119 #define MMX_YUV422_UYVY                     \
120     mm0 = (__m64)*(uint64_t*)p_y;           \
121     mm1 = _mm_cvtsi32_si64(*(int*)p_u);     \
122     mm2 = _mm_cvtsi32_si64(*(int*)p_v);     \
123     mm1 = _mm_unpacklo_pi8(mm1, mm2);       \
124     mm2 = mm1;                              \
125     mm2 = _mm_unpacklo_pi8(mm2, mm0);       \
126     *(uint64_t*)p_line = (uint64_t)mm2;     \
127     mm1 = _mm_unpackhi_pi8(mm1, mm0);       \
128     *(uint64_t*)(p_line+8) = (uint64_t)mm1;
129
130 #endif
131  
132 #elif defined( MODULE_NAME_IS_i422_yuy2_sse2 )
133
134 #if defined(CAN_COMPILE_SSE2)
135
136 /* SSE2 assembly */
137
138 #define SSE2_CALL(SSE2_INSTRUCTIONS)        \
139     do {                                    \
140     __asm__ __volatile__(                   \
141         ".p2align 3 \n\t"                   \
142         SSE2_INSTRUCTIONS                   \
143         :                                   \
144         : "r" (p_line), "r" (p_y),          \
145           "r" (p_u), "r" (p_v)              \
146         : "xmm0", "xmm1", "xmm2" );         \
147         p_line += 32; p_y += 16;            \
148         p_u += 8; p_v += 8;                 \
149     } while(0)
150
151 #define SSE2_END  __asm__ __volatile__ ( "sfence" ::: "memory" )
152
153 #define SSE2_YUV422_YUYV_ALIGNED "                                      \n\
154 movdqa      (%1), %%xmm0  # Load 16 Y           yF yE yD ... y2 y1 y0   \n\
155 movq        (%2), %%xmm1  # Load 8 Cb           00 00 00 ... u2 u1 u0   \n\
156 movq        (%3), %%xmm2  # Load 8 Cr           00 00 00 ... v2 v1 v0   \n\
157 punpcklbw %%xmm2, %%xmm1  #                     v7 u7 ... v1 u1 v0 u0   \n\
158 movdqa    %%xmm0, %%xmm2  #                     yF yE yD ... y2 y1 y0   \n\
159 punpcklbw %%xmm1, %%xmm2  #                     v3 y7 ... v0 y1 u0 y0   \n\
160 movntdq   %%xmm2, (%0)    # Store low YUYV                              \n\
161 punpckhbw %%xmm1, %%xmm0  #                     v7 yF ... v4 y9 u4 y8   \n\
162 movntdq   %%xmm0, 16(%0)  # Store high YUYV                             \n\
163 "
164
165 #define SSE2_YUV422_YUYV_UNALIGNED "                                    \n\
166 movdqu      (%1), %%xmm0  # Load 16 Y           yF yE yD ... y2 y1 y0   \n\
167 movq        (%2), %%xmm1  # Load 8 Cb           00 00 00 ... u2 u1 u0   \n\
168 movq        (%3), %%xmm2  # Load 8 Cr           00 00 00 ... v2 v1 v0   \n\
169 prefetchnta (%0)          # Tell CPU not to cache output YUYV data      \n\
170 punpcklbw %%xmm2, %%xmm1  #                     v7 u7 ... v1 u1 v0 u0   \n\
171 movdqa    %%xmm0, %%xmm2  #                     yF yE yD ... y2 y1 y0   \n\
172 punpcklbw %%xmm1, %%xmm2  #                     v3 y7 ... v0 y1 u0 y0   \n\
173 movdqu    %%xmm2, (%0)    # Store low YUYV                              \n\
174 punpckhbw %%xmm1, %%xmm0  #                     v7 yF ... v4 y9 u4 y8   \n\
175 movdqu    %%xmm0, 16(%0)  # Store high YUYV                             \n\
176 "
177
178 #define SSE2_YUV422_YVYU_ALIGNED "                                      \n\
179 movdqa      (%1), %%xmm0  # Load 16 Y           yF yE yD ... y2 y1 y0   \n\
180 movq        (%2), %%xmm2  # Load 8 Cb           00 00 00 ... u2 u1 u0   \n\
181 movq        (%3), %%xmm1  # Load 8 Cr           00 00 00 ... v2 v1 v0   \n\
182 punpcklbw %%xmm2, %%xmm1  #                     u7 v7 ... u1 v1 u0 v0   \n\
183 movdqa    %%xmm0, %%xmm2  #                     yF yE yD ... y2 y1 y0   \n\
184 punpcklbw %%xmm1, %%xmm2  #                     u3 y7 ... u0 y1 v0 y0   \n\
185 movntdq   %%xmm2, (%0)    # Store low YUYV                              \n\
186 punpckhbw %%xmm1, %%xmm0  #                     u7 yF ... u4 y9 v4 y8   \n\
187 movntdq   %%xmm0, 16(%0)  # Store high YUYV                             \n\
188 "
189
190 #define SSE2_YUV422_YVYU_UNALIGNED "                                    \n\
191 movdqu      (%1), %%xmm0  # Load 16 Y           yF yE yD ... y2 y1 y0   \n\
192 movq        (%2), %%xmm2  # Load 8 Cb           00 00 00 ... u2 u1 u0   \n\
193 movq        (%3), %%xmm1  # Load 8 Cr           00 00 00 ... v2 v1 v0   \n\
194 prefetchnta (%0)          # Tell CPU not to cache output YUYV data      \n\
195 punpcklbw %%xmm2, %%xmm1  #                     u7 v7 ... u1 v1 u0 v0   \n\
196 movdqa    %%xmm0, %%xmm2  #                     yF yE yD ... y2 y1 y0   \n\
197 punpcklbw %%xmm1, %%xmm2  #                     u3 y7 ... u0 y1 v0 y0   \n\
198 movdqu    %%xmm2, (%0)    # Store low YUYV                              \n\
199 punpckhbw %%xmm1, %%xmm0  #                     u7 yF ... u4 y9 v4 y8   \n\
200 movdqu    %%xmm0, 16(%0)  # Store high YUYV                             \n\
201 "
202
203 #define SSE2_YUV422_UYVY_ALIGNED "                                      \n\
204 movdqa      (%1), %%xmm0  # Load 16 Y           yF yE yD ... y2 y1 y0   \n\
205 movq        (%2), %%xmm1  # Load 8 Cb           00 00 00 ... u2 u1 u0   \n\
206 movq        (%3), %%xmm2  # Load 8 Cr           00 00 00 ... v2 v1 v0   \n\
207 punpcklbw %%xmm2, %%xmm1  #                     v7 u7 ... v1 u1 v0 u0   \n\
208 movdqa    %%xmm1, %%xmm2  #                     v7 u7 ... v1 u1 v0 u0   \n\
209 punpcklbw %%xmm0, %%xmm2  #                     y7 v3 ... y1 v0 y0 u0   \n\
210 movntdq   %%xmm2, (%0)    # Store low UYVY                              \n\
211 punpckhbw %%xmm0, %%xmm1  #                     yF v7 ... y9 v4 y8 u4   \n\
212 movntdq   %%xmm1, 16(%0)  # Store high UYVY                             \n\
213 "
214
215 #define SSE2_YUV422_UYVY_UNALIGNED "                                    \n\
216 movdqu      (%1), %%xmm0  # Load 16 Y           yF yE yD ... y2 y1 y0   \n\
217 movq        (%2), %%xmm1  # Load 8 Cb           00 00 00 ... u2 u1 u0   \n\
218 movq        (%3), %%xmm2  # Load 8 Cr           00 00 00 ... v2 v1 v0   \n\
219 prefetchnta (%0)          # Tell CPU not to cache output YUYV data      \n\
220 punpcklbw %%xmm2, %%xmm1  #                     v7 u7 ... v1 u1 v0 u0   \n\
221 movdqa    %%xmm1, %%xmm2  #                     v7 u7 ... v1 u1 v0 u0   \n\
222 punpcklbw %%xmm0, %%xmm2  #                     y7 v3 ... y1 v0 y0 u0   \n\
223 movdqu    %%xmm2, (%0)    # Store low UYVY                              \n\
224 punpckhbw %%xmm0, %%xmm1  #                     yF v7 ... y9 v4 y8 u4   \n\
225 movdqu    %%xmm1, 16(%0)  # Store high UYVY                             \n\
226 "
227
228 #elif defined(HAVE_SSE2_INTRINSICS)
229
230 /* SSE2 intrinsics */
231
232 #include <emmintrin.h>
233
234 #define SSE2_CALL(SSE2_INSTRUCTIONS)    \
235     do {                                \
236         __m128i xmm0, xmm1, xmm2;        \
237         SSE2_INSTRUCTIONS               \
238         p_line += 32; p_y += 16;        \
239         p_u += 8; p_v += 8;             \
240     } while(0)
241
242 #define SSE2_END  _mm_sfence()
243
244 #define SSE2_YUV422_YUYV_ALIGNED                \
245     xmm0 = _mm_load_si128((__m128i *)p_y);      \
246     xmm1 = _mm_loadl_epi64((__m128i *)p_u);     \
247     xmm2 = _mm_loadl_epi64((__m128i *)p_v);     \
248     xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);       \
249     xmm2 = xmm0;                                \
250     xmm2 = _mm_unpacklo_epi8(xmm2, xmm1);       \
251     _mm_stream_si128((__m128i*)(p_line), xmm2); \
252     xmm0 = _mm_unpackhi_epi8(xmm0, xmm1);       \
253     _mm_stream_si128((__m128i*)(p_line+16), xmm0);
254  
255 #define SSE2_YUV422_YUYV_UNALIGNED              \
256     xmm0 = _mm_loadu_si128((__m128i *)p_y);     \
257     xmm1 = _mm_loadl_epi64((__m128i *)p_u);     \
258     xmm2 = _mm_loadl_epi64((__m128i *)p_v);     \
259     xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);       \
260     xmm2 = xmm0;                                \
261     xmm2 = _mm_unpacklo_epi8(xmm2, xmm1);       \
262     _mm_storeu_si128((__m128i*)(p_line), xmm2); \
263     xmm0 = _mm_unpackhi_epi8(xmm0, xmm1);       \
264     _mm_storeu_si128((__m128i*)(p_line+16), xmm0);
265  
266 #define SSE2_YUV422_YVYU_ALIGNED                \
267     xmm0 = _mm_load_si128((__m128i *)p_y);      \
268     xmm2 = _mm_loadl_epi64((__m128i *)p_u);     \
269     xmm1 = _mm_loadl_epi64((__m128i *)p_v);     \
270     xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);       \
271     xmm2 = xmm0;                                \
272     xmm2 = _mm_unpacklo_epi8(xmm2, xmm1);       \
273     _mm_stream_si128((__m128i*)(p_line), xmm2); \
274     xmm0 = _mm_unpackhi_epi8(xmm0, xmm1);       \
275     _mm_stream_si128((__m128i*)(p_line+16), xmm0);
276
277 #define SSE2_YUV422_YVYU_UNALIGNED              \
278     xmm0 = _mm_loadu_si128((__m128i *)p_y);     \
279     xmm2 = _mm_loadl_epi64((__m128i *)p_u);     \
280     xmm1 = _mm_loadl_epi64((__m128i *)p_v);     \
281     xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);       \
282     xmm2 = xmm0;                                \
283     xmm2 = _mm_unpacklo_epi8(xmm2, xmm1);       \
284     _mm_storeu_si128((__m128i*)(p_line), xmm2); \
285     xmm0 = _mm_unpackhi_epi8(xmm0, xmm1);       \
286     _mm_storeu_si128((__m128i*)(p_line+16), xmm0);
287
288 #define SSE2_YUV422_UYVY_ALIGNED                \
289     xmm0 = _mm_load_si128((__m128i *)p_y);      \
290     xmm1 = _mm_loadl_epi64((__m128i *)p_u);     \
291     xmm2 = _mm_loadl_epi64((__m128i *)p_v);     \
292     xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);       \
293     xmm2 = xmm1;                                \
294     xmm2 = _mm_unpacklo_epi8(xmm2, xmm0);       \
295     _mm_stream_si128((__m128i*)(p_line), xmm2); \
296     xmm1 = _mm_unpackhi_epi8(xmm1, xmm0);       \
297     _mm_stream_si128((__m128i*)(p_line+16), xmm1);
298
299 #define SSE2_YUV422_UYVY_UNALIGNED              \
300     xmm0 = _mm_loadu_si128((__m128i *)p_y);     \
301     xmm1 = _mm_loadl_epi64((__m128i *)p_u);     \
302     xmm2 = _mm_loadl_epi64((__m128i *)p_v);     \
303     xmm1 = _mm_unpacklo_epi8(xmm1, xmm2);       \
304     xmm2 = xmm1;                                \
305     xmm2 = _mm_unpacklo_epi8(xmm2, xmm0);       \
306     _mm_storeu_si128((__m128i*)(p_line), xmm2); \
307     xmm1 = _mm_unpackhi_epi8(xmm1, xmm0);       \
308     _mm_storeu_si128((__m128i*)(p_line+16), xmm1);
309
310 #endif
311
312 #endif
313
314 #define C_YUV422_YUYV( p_line, p_y, p_u, p_v )                              \
315     *(p_line)++ = *(p_y)++;                                                 \
316     *(p_line)++ = *(p_u)++;                                                 \
317     *(p_line)++ = *(p_y)++;                                                 \
318     *(p_line)++ = *(p_v)++;                                                 \
319
320 #define C_YUV422_YVYU( p_line, p_y, p_u, p_v )                              \
321     *(p_line)++ = *(p_y)++;                                                 \
322     *(p_line)++ = *(p_v)++;                                                 \
323     *(p_line)++ = *(p_y)++;                                                 \
324     *(p_line)++ = *(p_u)++;                                                 \
325
326 #define C_YUV422_UYVY( p_line, p_y, p_u, p_v )                              \
327     *(p_line)++ = *(p_u)++;                                                 \
328     *(p_line)++ = *(p_y)++;                                                 \
329     *(p_line)++ = *(p_v)++;                                                 \
330     *(p_line)++ = *(p_y)++;                                                 \
331
332 #define C_YUV422_Y211( p_line, p_y, p_u, p_v )                              \
333     *(p_line)++ = *(p_y); p_y += 2;                                         \
334     *(p_line)++ = *(p_u) - 0x80; p_u += 2;                                  \
335     *(p_line)++ = *(p_y); p_y += 2;                                         \
336     *(p_line)++ = *(p_v) - 0x80; p_v += 2;                                  \
337
338