copy: add SSE optimization to U&V planes interleaving
[vlc.git] / modules / video_chroma / copy.c
1 /*****************************************************************************
2  * copy.c: Fast YV12/NV12 copy
3  *****************************************************************************
4  * Copyright (C) 2010 Laurent Aimar
5  * $Id$
6  *
7  * Authors: Laurent Aimar <fenrir _AT_ videolan _DOT_ org>
8  *          Victorien Le Couviour--Tuffet <victorien.lecouviour.tuffet@gmail.com>
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms of the GNU Lesser General Public License as published by
12  * the Free Software Foundation; either version 2.1 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18  * GNU Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public License
21  * along with this program; if not, write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
23  *****************************************************************************/
24
25 #ifdef HAVE_CONFIG_H
26 # include "config.h"
27 #endif
28
29 #include <vlc_common.h>
30 #include <vlc_picture.h>
31 #include <vlc_cpu.h>
32 #include <assert.h>
33
34 #include "copy.h"
35
36 int CopyInitCache(copy_cache_t *cache, unsigned width)
37 {
38 #ifdef CAN_COMPILE_SSE2
39     cache->size = __MAX((width + 0x3f) & ~ 0x3f, 8192);
40     cache->buffer = vlc_memalign(64, cache->size);
41     if (!cache->buffer)
42         return VLC_EGENERIC;
43 #else
44     (void) cache; (void) width;
45 #endif
46     return VLC_SUCCESS;
47 }
48
49 void CopyCleanCache(copy_cache_t *cache)
50 {
51 #ifdef CAN_COMPILE_SSE2
52     vlc_free(cache->buffer);
53     cache->buffer = NULL;
54     cache->size   = 0;
55 #else
56     (void) cache;
57 #endif
58 }
59
60 #ifdef CAN_COMPILE_SSE2
61 /* Copy 16/64 bytes from srcp to dstp loading data with the SSE>=2 instruction
62  * load and storing data with the SSE>=2 instruction store.
63  */
64 #define COPY16(dstp, srcp, load, store) \
65     asm volatile (                      \
66         load "  0(%[src]), %%xmm1\n"    \
67         store " %%xmm1,    0(%[dst])\n" \
68         : : [dst]"r"(dstp), [src]"r"(srcp) : "memory", "xmm1")
69
70 #define COPY64(dstp, srcp, load, store) \
71     asm volatile (                      \
72         load "  0(%[src]), %%xmm1\n"    \
73         load " 16(%[src]), %%xmm2\n"    \
74         load " 32(%[src]), %%xmm3\n"    \
75         load " 48(%[src]), %%xmm4\n"    \
76         store " %%xmm1,    0(%[dst])\n" \
77         store " %%xmm2,   16(%[dst])\n" \
78         store " %%xmm3,   32(%[dst])\n" \
79         store " %%xmm4,   48(%[dst])\n" \
80         : : [dst]"r"(dstp), [src]"r"(srcp) : "memory", "xmm1", "xmm2", "xmm3", "xmm4")
81
82 #ifndef __SSE4_1__
83 # undef vlc_CPU_SSE4_1
84 # define vlc_CPU_SSE4_1() ((cpu & VLC_CPU_SSE4_1) != 0)
85 #endif
86
87 #ifndef __SSSE3__
88 # undef vlc_CPU_SSSE3
89 # define vlc_CPU_SSSE3() ((cpu & VLC_CPU_SSSE3) != 0)
90 #endif
91
92 #ifndef __SSE2__
93 # undef vlc_CPU_SSE2
94 # define vlc_CPU_SSE2() ((cpu & VLC_CPU_SSE2) != 0)
95 #endif
96
97 /* Optimized copy from "Uncacheable Speculative Write Combining" memory
98  * as used by some video surface.
99  * XXX It is really efficient only when SSE4.1 is available.
100  */
101 VLC_SSE
102 static void CopyFromUswc(uint8_t *dst, size_t dst_pitch,
103                          const uint8_t *src, size_t src_pitch,
104                          unsigned width, unsigned height,
105                          unsigned cpu)
106 {
107 #if defined (__SSE4_1__) || !defined(CAN_COMPILE_SSSE3)
108     VLC_UNUSED(cpu);
109 #endif
110     assert(((intptr_t)dst & 0x0f) == 0 && (dst_pitch & 0x0f) == 0);
111
112     asm volatile ("mfence");
113
114     for (unsigned y = 0; y < height; y++) {
115         const unsigned unaligned = (-(uintptr_t)src) & 0x0f;
116         unsigned x = unaligned;
117
118 #ifdef CAN_COMPILE_SSE4_1
119         if (vlc_CPU_SSE4_1()) {
120             if (!unaligned) {
121                 for (; x+63 < width; x += 64)
122                     COPY64(&dst[x], &src[x], "movntdqa", "movdqa");
123             } else {
124                 COPY16(dst, src, "movdqu", "movdqa");
125                 for (; x+63 < width; x += 64)
126                     COPY64(&dst[x], &src[x], "movntdqa", "movdqu");
127             }
128         } else
129 #endif
130         {
131             if (!unaligned) {
132                 for (; x+63 < width; x += 64)
133                     COPY64(&dst[x], &src[x], "movdqa", "movdqa");
134             } else {
135                 COPY16(dst, src, "movdqu", "movdqa");
136                 for (; x+63 < width; x += 64)
137                     COPY64(&dst[x], &src[x], "movdqa", "movdqu");
138             }
139         }
140
141         for (; x < width; x++)
142             dst[x] = src[x];
143
144         src += src_pitch;
145         dst += dst_pitch;
146     }
147     asm volatile ("mfence");
148 }
149
150 VLC_SSE
151 static void Copy2d(uint8_t *dst, size_t dst_pitch,
152                    const uint8_t *src, size_t src_pitch,
153                    unsigned width, unsigned height)
154 {
155     assert(((intptr_t)src & 0x0f) == 0 && (src_pitch & 0x0f) == 0);
156
157     for (unsigned y = 0; y < height; y++) {
158         unsigned x = 0;
159
160         bool unaligned = ((intptr_t)dst & 0x0f) != 0;
161         if (!unaligned) {
162             for (; x+63 < width; x += 64)
163                 COPY64(&dst[x], &src[x], "movdqa", "movntdq");
164         } else {
165             for (; x+63 < width; x += 64)
166                 COPY64(&dst[x], &src[x], "movdqa", "movdqu");
167         }
168
169         for (; x < width; x++)
170             dst[x] = src[x];
171
172         src += src_pitch;
173         dst += dst_pitch;
174     }
175 }
176
177 VLC_SSE
178 static void
179 SSE_InterleaveUV(uint8_t *dst, size_t dst_pitch,
180                  uint8_t *srcu, size_t srcu_pitch,
181                  uint8_t *srcv, size_t srcv_pitch,
182                  unsigned int width, unsigned int height,
183                  unsigned int cpu)
184 {
185     assert(!((intptr_t)srcu & 0xf) && !(srcu_pitch & 0x0f) &&
186            !((intptr_t)srcv & 0xf) && !(srcv_pitch & 0x0f));
187
188 #if defined(__SSSE3__) || !defined (CAN_COMPILE_SSSE3)
189     VLC_UNUSED(cpu);
190 #endif
191
192     uint8_t const       shuffle[] = { 0, 8,
193                                       1, 9,
194                                       2, 10,
195                                       3, 11,
196                                       4, 12,
197                                       5, 13,
198                                       6, 14,
199                                       7, 15 };
200
201     for (unsigned int y = 0; y < height; ++y)
202     {
203         unsigned int    x;
204
205 #define LOAD2X32                        \
206     "movhpd 0x00(%[src2]), %%xmm0\n"    \
207     "movlpd 0x00(%[src1]), %%xmm0\n"    \
208                                         \
209     "movhpd 0x08(%[src2]), %%xmm1\n"    \
210     "movlpd 0x08(%[src1]), %%xmm1\n"    \
211                                         \
212     "movhpd 0x10(%[src2]), %%xmm2\n"    \
213     "movlpd 0x10(%[src1]), %%xmm2\n"    \
214                                         \
215     "movhpd 0x18(%[src2]), %%xmm3\n"    \
216     "movlpd 0x18(%[src1]), %%xmm3\n"
217
218 #define STORE64                         \
219     "movdqu %%xmm0, 0x00(%[dst])\n"     \
220     "movdqu %%xmm1, 0x10(%[dst])\n"     \
221     "movdqu %%xmm2, 0x20(%[dst])\n"     \
222     "movdqu %%xmm3, 0x30(%[dst])\n"
223
224 #ifdef CAN_COMPILE_SSSE3
225         if (vlc_CPU_SSSE3())
226             for (x = 0; x < (width & ~31); x += 32)
227                 asm volatile
228                     (
229                         "movdqu (%[shuffle]), %%xmm7\n"
230                         LOAD2X32
231                         "pshufb %%xmm7, %%xmm0\n"
232                         "pshufb %%xmm7, %%xmm1\n"
233                         "pshufb %%xmm7, %%xmm2\n"
234                         "pshufb %%xmm7, %%xmm3\n"
235                         STORE64
236                         : : [dst]"r"(dst+2*x),
237                             [src1]"r"(srcu+x), [src2]"r"(srcv+x),
238                             [shuffle]"r"(shuffle)
239                         : "memory", "xmm0", "xmm1", "xmm2", "xmm3", "xmm7"
240                     );
241         else
242 #endif
243
244         {
245             for (x = 0; x < (width & ~31); x += 32)
246                 asm volatile
247                     (
248                         LOAD2X32
249                         "movhlps   %%xmm0, %%xmm4\n"
250                         "punpcklbw %%xmm4, %%xmm0\n"
251
252                         "movhlps   %%xmm1, %%xmm4\n"
253                         "punpcklbw %%xmm4, %%xmm1\n"
254
255                         "movhlps   %%xmm2, %%xmm4\n"
256                         "punpcklbw %%xmm4, %%xmm2\n"
257
258                         "movhlps   %%xmm3, %%xmm4\n"
259                         "punpcklbw %%xmm4, %%xmm3\n"
260                         STORE64
261                         : : [dst]"r"(dst+2*x),
262                             [src1]"r"(srcu+x), [src2]"r"(srcv+x)
263                         : "memory",
264                           "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm7"
265                     );
266         }
267 #undef LOAD2X32
268 #undef STORE64
269
270         for (; x < width; ++x)
271         {
272             dst[2*x+0] = srcu[x];
273             dst[2*x+1] = srcv[x];
274         }
275         srcu += srcu_pitch;
276         srcv += srcv_pitch;
277         dst += dst_pitch;
278     }
279 }
280
281 VLC_SSE
282 static void SSE_SplitUV(uint8_t *dstu, size_t dstu_pitch,
283                         uint8_t *dstv, size_t dstv_pitch,
284                         const uint8_t *src, size_t src_pitch,
285                         unsigned width, unsigned height, unsigned cpu)
286 {
287 #if defined(__SSSE3__) || !defined (CAN_COMPILE_SSSE3)
288     VLC_UNUSED(cpu);
289 #endif
290     const uint8_t shuffle[] = { 0, 2, 4, 6, 8, 10, 12, 14,
291                                 1, 3, 5, 7, 9, 11, 13, 15 };
292     const uint8_t mask[] = { 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00,
293                              0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00 };
294
295     assert(((intptr_t)src & 0xf) == 0 && (src_pitch & 0x0f) == 0);
296
297     for (unsigned y = 0; y < height; y++) {
298         unsigned x = 0;
299
300 #define LOAD64 \
301     "movdqa  0(%[src]), %%xmm0\n" \
302     "movdqa 16(%[src]), %%xmm1\n" \
303     "movdqa 32(%[src]), %%xmm2\n" \
304     "movdqa 48(%[src]), %%xmm3\n"
305
306 #define STORE2X32 \
307     "movq   %%xmm0,   0(%[dst1])\n" \
308     "movq   %%xmm1,   8(%[dst1])\n" \
309     "movhpd %%xmm0,   0(%[dst2])\n" \
310     "movhpd %%xmm1,   8(%[dst2])\n" \
311     "movq   %%xmm2,  16(%[dst1])\n" \
312     "movq   %%xmm3,  24(%[dst1])\n" \
313     "movhpd %%xmm2,  16(%[dst2])\n" \
314     "movhpd %%xmm3,  24(%[dst2])\n"
315
316 #ifdef CAN_COMPILE_SSSE3
317         if (vlc_CPU_SSSE3())
318         {
319             for (x = 0; x < (width & ~31); x += 32) {
320                 asm volatile (
321                     "movdqu (%[shuffle]), %%xmm7\n"
322                     LOAD64
323                     "pshufb  %%xmm7, %%xmm0\n"
324                     "pshufb  %%xmm7, %%xmm1\n"
325                     "pshufb  %%xmm7, %%xmm2\n"
326                     "pshufb  %%xmm7, %%xmm3\n"
327                     STORE2X32
328                     : : [dst1]"r"(&dstu[x]), [dst2]"r"(&dstv[x]), [src]"r"(&src[2*x]), [shuffle]"r"(shuffle) : "memory", "xmm0", "xmm1", "xmm2", "xmm3", "xmm7");
329             }
330         } else
331 #endif
332         {
333             for (x = 0; x < (width & ~31); x += 32) {
334                 asm volatile (
335                     "movdqu (%[mask]), %%xmm7\n"
336                     LOAD64
337                     "movdqa   %%xmm0, %%xmm4\n"
338                     "movdqa   %%xmm1, %%xmm5\n"
339                     "movdqa   %%xmm2, %%xmm6\n"
340                     "psrlw    $8,     %%xmm0\n"
341                     "psrlw    $8,     %%xmm1\n"
342                     "pand     %%xmm7, %%xmm4\n"
343                     "pand     %%xmm7, %%xmm5\n"
344                     "pand     %%xmm7, %%xmm6\n"
345                     "packuswb %%xmm4, %%xmm0\n"
346                     "packuswb %%xmm5, %%xmm1\n"
347                     "pand     %%xmm3, %%xmm7\n"
348                     "psrlw    $8,     %%xmm2\n"
349                     "psrlw    $8,     %%xmm3\n"
350                     "packuswb %%xmm6, %%xmm2\n"
351                     "packuswb %%xmm7, %%xmm3\n"
352                     STORE2X32
353                     : : [dst2]"r"(&dstu[x]), [dst1]"r"(&dstv[x]), [src]"r"(&src[2*x]), [mask]"r"(mask) : "memory", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7");
354             }
355         }
356 #undef STORE2X32
357 #undef LOAD64
358
359         for (; x < width; x++) {
360             dstu[x] = src[2*x+0];
361             dstv[x] = src[2*x+1];
362         }
363         src  += src_pitch;
364         dstu += dstu_pitch;
365         dstv += dstv_pitch;
366     }
367 }
368
369 static void SSE_CopyPlane(uint8_t *dst, size_t dst_pitch,
370                           const uint8_t *src, size_t src_pitch,
371                           uint8_t *cache, size_t cache_size,
372                           unsigned height, unsigned cpu)
373 {
374     const unsigned w16 = (src_pitch+15) & ~15;
375     const unsigned hstep = cache_size / w16;
376     assert(hstep > 0);
377
378     if (src_pitch == dst_pitch)
379         memcpy(dst, src, src_pitch * height);
380     else
381     for (unsigned y = 0; y < height; y += hstep) {
382         const unsigned hblock =  __MIN(hstep, height - y);
383
384         /* Copy a bunch of line into our cache */
385         CopyFromUswc(cache, w16,
386                      src, src_pitch,
387                      src_pitch, hblock, cpu);
388
389         /* Copy from our cache to the destination */
390         Copy2d(dst, dst_pitch,
391                cache, w16,
392                src_pitch, hblock);
393
394         /* */
395         src += src_pitch * hblock;
396         dst += dst_pitch * hblock;
397     }
398 }
399
400 static void
401 SSE_InterleavePlanes(uint8_t *dst, size_t dst_pitch,
402                      uint8_t *srcu, size_t srcu_pitch,
403                      uint8_t *srcv, size_t srcv_pitch,
404                      uint8_t *cache, size_t cache_size,
405                      unsigned int height,
406                      unsigned int cpu)
407 {
408     assert(srcu_pitch == srcv_pitch);
409     unsigned int const  w16 = (srcu_pitch+15) & ~15;
410     unsigned int const  hstep = (cache_size) / (2*w16);
411     assert(hstep > 0);
412
413     for (unsigned int y = 0; y < height; y += hstep)
414     {
415         unsigned int const      hblock = __MIN(hstep, height - y);
416
417         /* Copy a bunch of line into our cache */
418         CopyFromUswc(cache, w16, srcu, srcu_pitch,
419                      srcu_pitch, hblock, cpu);
420         CopyFromUswc(cache+w16*hblock, w16, srcv, srcv_pitch,
421                      srcv_pitch, hblock, cpu);
422
423         /* Copy from our cache to the destination */
424         SSE_InterleaveUV(dst, dst_pitch, cache, w16,
425                          cache+w16*hblock, w16, srcu_pitch, hblock, cpu);
426
427         /* */
428         srcu += hblock * srcu_pitch;
429         srcv += hblock * srcv_pitch;
430         dst += hblock * dst_pitch;
431     }
432 }
433
434 static void SSE_SplitPlanes(uint8_t *dstu, size_t dstu_pitch,
435                             uint8_t *dstv, size_t dstv_pitch,
436                             const uint8_t *src, size_t src_pitch,
437                             uint8_t *cache, size_t cache_size,
438                             unsigned height, unsigned cpu)
439 {
440     const unsigned w16 = (src_pitch+15) & ~15;
441     const unsigned hstep = cache_size / w16;
442     assert(hstep > 0);
443
444     for (unsigned y = 0; y < height; y += hstep) {
445         const unsigned hblock =  __MIN(hstep, height - y);
446
447         /* Copy a bunch of line into our cache */
448         CopyFromUswc(cache, w16, src, src_pitch,
449                      src_pitch, hblock, cpu);
450
451         /* Copy from our cache to the destination */
452         SSE_SplitUV(dstu, dstu_pitch, dstv, dstv_pitch,
453                     cache, w16, src_pitch, hblock, cpu);
454
455         /* */
456         src  += src_pitch  * hblock;
457         dstu += dstu_pitch * hblock;
458         dstv += dstv_pitch * hblock;
459     }
460 }
461
462 static void SSE_CopyFromNv12ToYv12(picture_t *dst,
463                                    uint8_t *src[2], size_t src_pitch[2],
464                                    unsigned height,
465                                    copy_cache_t *cache, unsigned cpu)
466 {
467     SSE_CopyPlane(dst->p[0].p_pixels, dst->p[0].i_pitch,
468                   src[0], src_pitch[0],
469                   cache->buffer, cache->size,
470                   height, cpu);
471     SSE_SplitPlanes(dst->p[2].p_pixels, dst->p[2].i_pitch,
472                     dst->p[1].p_pixels, dst->p[1].i_pitch,
473                     src[1], src_pitch[1],
474                     cache->buffer, cache->size,
475                     (height+1)/2, cpu);
476     asm volatile ("emms");
477 }
478
479 static void SSE_CopyFromYv12ToYv12(picture_t *dst,
480                                    uint8_t *src[3], size_t src_pitch[3],
481                                    unsigned height,
482                                    copy_cache_t *cache, unsigned cpu)
483 {
484     for (unsigned n = 0; n < 3; n++) {
485         const unsigned d = n > 0 ? 2 : 1;
486         SSE_CopyPlane(dst->p[n].p_pixels, dst->p[n].i_pitch,
487                       src[n], src_pitch[n],
488                       cache->buffer, cache->size,
489                       (height+d-1)/d, cpu);
490     }
491     asm volatile ("emms");
492 }
493
494
495 static void SSE_CopyFromNv12ToNv12(picture_t *dst,
496                              uint8_t *src[2], size_t src_pitch[2],
497                              unsigned height,
498                              copy_cache_t *cache, unsigned cpu)
499 {
500     SSE_CopyPlane(dst->p[0].p_pixels, dst->p[0].i_pitch,
501                   src[0], src_pitch[0],
502                   cache->buffer, cache->size,
503                   height, cpu);
504     SSE_CopyPlane(dst->p[1].p_pixels, dst->p[1].i_pitch,
505                   src[1], src_pitch[1],
506                   cache->buffer, cache->size,
507                   height/2, cpu);
508     asm volatile ("emms");
509 }
510
511 static void
512 SSE_CopyFromNv12ToI420(picture_t *dest, uint8_t *src[2],
513                        size_t src_pitch[2], unsigned int height,
514                        copy_cache_t *cache, unsigned int cpu)
515 {
516     SSE_CopyPlane(dest->p[0].p_pixels, dest->p[0].i_pitch,
517                   src[0], src_pitch[0], cache->buffer, cache->size,
518                   height, cpu);
519     SSE_SplitPlanes(dest->p[1].p_pixels, dest->p[1].i_pitch,
520                     dest->p[2].p_pixels, dest->p[2].i_pitch,
521                     src[1], src_pitch[1], cache->buffer, cache->size,
522                     height / 2, cpu);
523     asm volatile ("emms");
524 }
525
526 static void SSE_CopyFromI420ToNv12(picture_t *dst,
527                              uint8_t *src[3], size_t src_pitch[3],
528                              unsigned height,
529                              copy_cache_t *cache, unsigned cpu)
530 {
531     SSE_CopyPlane(dst->p[0].p_pixels, dst->p[0].i_pitch,
532                   src[0], src_pitch[0],
533                   cache->buffer, cache->size,
534                   height, cpu);
535     SSE_InterleavePlanes(dst->p[1].p_pixels, dst->p[1].i_pitch,
536                          src[U_PLANE], src_pitch[U_PLANE],
537                          src[V_PLANE], src_pitch[V_PLANE],
538                          cache->buffer, cache->size, height / 2, cpu);
539     asm volatile ("emms");
540 }
541 #undef COPY64
542 #endif /* CAN_COMPILE_SSE2 */
543
544 static void CopyPlane(uint8_t *dst, size_t dst_pitch,
545                       const uint8_t *src, size_t src_pitch,
546                       unsigned height)
547 {
548     if (src_pitch == dst_pitch)
549         memcpy(dst, src, src_pitch * height);
550     else
551     for (unsigned y = 0; y < height; y++) {
552         memcpy(dst, src, src_pitch);
553         src += src_pitch;
554         dst += dst_pitch;
555     }
556 }
557
558 static void SplitPlanes(uint8_t *dstu, size_t dstu_pitch,
559                         uint8_t *dstv, size_t dstv_pitch,
560                         const uint8_t *src, size_t src_pitch,
561                         unsigned height)
562 {
563     for (unsigned y = 0; y < height; y++) {
564         for (unsigned x = 0; x < src_pitch / 2; x++) {
565             dstu[x] = src[2*x+0];
566             dstv[x] = src[2*x+1];
567         }
568         src  += src_pitch;
569         dstu += dstu_pitch;
570         dstv += dstv_pitch;
571     }
572 }
573
574 void CopyFromNv12ToYv12(picture_t *dst, uint8_t *src[2], size_t src_pitch[2],
575                         unsigned height, copy_cache_t *cache)
576 {
577 #ifdef CAN_COMPILE_SSE2
578     unsigned cpu = vlc_CPU();
579     if (vlc_CPU_SSE2())
580         return SSE_CopyFromNv12ToYv12(dst, src, src_pitch, height, cache, cpu);
581 #else
582     (void) cache;
583 #endif
584
585     CopyPlane(dst->p[0].p_pixels, dst->p[0].i_pitch,
586               src[0], src_pitch[0], height);
587     SplitPlanes(dst->p[2].p_pixels, dst->p[2].i_pitch,
588                 dst->p[1].p_pixels, dst->p[1].i_pitch,
589                 src[1], src_pitch[1], height/2);
590 }
591
592 void CopyFromNv12ToNv12(picture_t *dst, uint8_t *src[2], size_t src_pitch[2],
593                   unsigned height, copy_cache_t *cache)
594 {
595 #ifdef CAN_COMPILE_SSE2
596     unsigned cpu = vlc_CPU();
597     if (vlc_CPU_SSE2())
598         return SSE_CopyFromNv12ToNv12(dst, src, src_pitch, height,
599                                 cache, cpu);
600 #else
601     (void) cache;
602 #endif
603
604     CopyPlane(dst->p[0].p_pixels, dst->p[0].i_pitch,
605               src[0], src_pitch[0], height);
606     CopyPlane(dst->p[1].p_pixels, dst->p[1].i_pitch,
607               src[1], src_pitch[1], height/2);
608 }
609
610 void CopyFromNv12ToI420(picture_t *dst, uint8_t *src[2], size_t src_pitch[2],
611                         unsigned height, copy_cache_t *cache)
612 {
613 #ifdef CAN_COMPILE_SSE2
614     unsigned    cpu = vlc_CPU();
615
616     if (vlc_CPU_SSE2())
617         return SSE_CopyFromNv12ToI420(dst, src, src_pitch, height, cache, cpu);
618 #else
619     VLC_UNUSED(cache);
620 #endif
621
622     CopyPlane(dst->p[0].p_pixels, dst->p[0].i_pitch,
623               src[0], src_pitch[0], height);
624     SplitPlanes(dst->p[1].p_pixels, dst->p[1].i_pitch,
625                 dst->p[2].p_pixels, dst->p[2].i_pitch,
626                 src[1], src_pitch[1], height/2);
627 }
628
629 void CopyFromI420ToNv12(picture_t *dst, uint8_t *src[3], size_t src_pitch[3],
630                         unsigned height, copy_cache_t *cache)
631 {
632 #ifdef CAN_COMPILE_SSE2
633     unsigned cpu = vlc_CPU();
634     if (vlc_CPU_SSE2())
635         return SSE_CopyFromI420ToNv12(dst, src, src_pitch, height,
636                                 cache, cpu);
637 #else
638     (void) cache;
639 #endif
640
641     CopyPlane(dst->p[0].p_pixels, dst->p[0].i_pitch,
642               src[0], src_pitch[0], height);
643
644     const unsigned copy_lines = height / 2;
645     const unsigned copy_pitch = src_pitch[1];
646
647     const int i_extra_pitch_uv = dst->p[1].i_pitch - 2 * copy_pitch;
648     const int i_extra_pitch_u  = src_pitch[U_PLANE] - copy_pitch;
649     const int i_extra_pitch_v  = src_pitch[V_PLANE] - copy_pitch;
650
651     uint8_t *dstUV = dst->p[1].p_pixels;
652     uint8_t *srcU  = src[U_PLANE];
653     uint8_t *srcV  = src[V_PLANE];
654     for ( unsigned int line = 0; line < copy_lines; line++ )
655     {
656         for ( unsigned int col = 0; col < copy_pitch; col++ )
657         {
658             *dstUV++ = *srcU++;
659             *dstUV++ = *srcV++;
660         }
661         dstUV += i_extra_pitch_uv;
662         srcU  += i_extra_pitch_u;
663         srcV  += i_extra_pitch_v;
664     }
665 }
666
667 void CopyFromI420_10ToP010(picture_t *dst, uint8_t *src[3], size_t src_pitch[3],
668                         unsigned height, copy_cache_t *cache)
669 {
670     (void) cache;
671
672     const int i_extra_pitch_dst_y = (dst->p[0].i_pitch  - src_pitch[0]) / 2;
673     const int i_extra_pitch_src_y = (src_pitch[Y_PLANE] - src_pitch[0]) / 2;
674     uint16_t *dstY = dst->p[0].p_pixels;
675     uint16_t *srcY = src[Y_PLANE];
676     for (unsigned y = 0; y < height; y++) {
677         for (unsigned x = 0; x < (src_pitch[0] / 2); x++) {
678             *dstY++ = *srcY++ << 6;
679         }
680         dstY += i_extra_pitch_dst_y;
681         srcY += i_extra_pitch_src_y;
682     }
683
684     const unsigned copy_lines = height / 2;
685     const unsigned copy_pitch = src_pitch[1] / 2;
686
687     const int i_extra_pitch_uv = dst->p[1].i_pitch / 2 - 2 * copy_pitch;
688     const int i_extra_pitch_u  = src_pitch[U_PLANE] / 2 - copy_pitch;
689     const int i_extra_pitch_v  = src_pitch[V_PLANE] / 2 - copy_pitch;
690
691     uint16_t *dstUV = dst->p[1].p_pixels;
692     uint16_t *srcU  = src[U_PLANE];
693     uint16_t *srcV  = src[V_PLANE];
694     for ( unsigned int line = 0; line < copy_lines; line++ )
695     {
696         for ( unsigned int col = 0; col < copy_pitch; col++ )
697         {
698             *dstUV++ = *srcU++ << 6;
699             *dstUV++ = *srcV++ << 6;
700         }
701         dstUV += i_extra_pitch_uv;
702         srcU  += i_extra_pitch_u;
703         srcV  += i_extra_pitch_v;
704     }
705 }
706
707
708 void CopyFromYv12ToYv12(picture_t *dst, uint8_t *src[3], size_t src_pitch[3],
709                         unsigned height, copy_cache_t *cache)
710 {
711 #ifdef CAN_COMPILE_SSE2
712     unsigned cpu = vlc_CPU();
713     if (vlc_CPU_SSE2())
714         return SSE_CopyFromYv12ToYv12(dst, src, src_pitch, height, cache, cpu);
715 #else
716     (void) cache;
717 #endif
718
719      CopyPlane(dst->p[0].p_pixels, dst->p[0].i_pitch,
720                src[0], src_pitch[0], height);
721      CopyPlane(dst->p[1].p_pixels, dst->p[1].i_pitch,
722                src[1], src_pitch[1], height / 2);
723      CopyPlane(dst->p[2].p_pixels, dst->p[2].i_pitch,
724                src[2], src_pitch[2], height / 2);
725 }