03918fd44c68d89d35bc5f564b044283dce78923
[vlc.git] / modules / video_chroma / copy.c
1 /*****************************************************************************
2  * copy.c: Fast YV12/NV12 copy
3  *****************************************************************************
4  * Copyright (C) 2010 Laurent Aimar
5  * $Id$
6  *
7  * Authors: Laurent Aimar <fenrir _AT_ videolan _DOT_ org>
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU Lesser General Public License as published by
11  * the Free Software Foundation; either version 2.1 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17  * GNU Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public License
20  * along with this program; if not, write to the Free Software Foundation,
21  * Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
22  *****************************************************************************/
23
24 #ifdef HAVE_CONFIG_H
25 # include "config.h"
26 #endif
27
28 #include <vlc_common.h>
29 #include <vlc_picture.h>
30 #include <vlc_cpu.h>
31 #include <assert.h>
32
33 #include "copy.h"
34
35 int CopyInitCache(copy_cache_t *cache, unsigned width)
36 {
37 #ifdef CAN_COMPILE_SSE2
38     cache->size = __MAX((width + 0x3f) & ~ 0x3f, 8192);
39     cache->buffer = vlc_memalign(64, cache->size);
40     if (!cache->buffer)
41         return VLC_EGENERIC;
42 #else
43     (void) cache; (void) width;
44 #endif
45     return VLC_SUCCESS;
46 }
47
48 void CopyCleanCache(copy_cache_t *cache)
49 {
50 #ifdef CAN_COMPILE_SSE2
51     vlc_free(cache->buffer);
52     cache->buffer = NULL;
53     cache->size   = 0;
54 #else
55     (void) cache;
56 #endif
57 }
58
59 #ifdef CAN_COMPILE_SSE2
60 /* Copy 16/64 bytes from srcp to dstp loading data with the SSE>=2 instruction
61  * load and storing data with the SSE>=2 instruction store.
62  */
63 #define COPY16(dstp, srcp, load, store) \
64     asm volatile (                      \
65         load "  0(%[src]), %%xmm1\n"    \
66         store " %%xmm1,    0(%[dst])\n" \
67         : : [dst]"r"(dstp), [src]"r"(srcp) : "memory", "xmm1")
68
69 #define COPY64(dstp, srcp, load, store) \
70     asm volatile (                      \
71         load "  0(%[src]), %%xmm1\n"    \
72         load " 16(%[src]), %%xmm2\n"    \
73         load " 32(%[src]), %%xmm3\n"    \
74         load " 48(%[src]), %%xmm4\n"    \
75         store " %%xmm1,    0(%[dst])\n" \
76         store " %%xmm2,   16(%[dst])\n" \
77         store " %%xmm3,   32(%[dst])\n" \
78         store " %%xmm4,   48(%[dst])\n" \
79         : : [dst]"r"(dstp), [src]"r"(srcp) : "memory", "xmm1", "xmm2", "xmm3", "xmm4")
80
81 #ifndef __SSE4_1__
82 # undef vlc_CPU_SSE4_1
83 # define vlc_CPU_SSE4_1() ((cpu & VLC_CPU_SSE4_1) != 0)
84 #endif
85
86 #ifndef __SSSE3__
87 # undef vlc_CPU_SSSE3
88 # define vlc_CPU_SSSE3() ((cpu & VLC_CPU_SSSE3) != 0)
89 #endif
90
91 #ifndef __SSE2__
92 # undef vlc_CPU_SSE2
93 # define vlc_CPU_SSE2() ((cpu & VLC_CPU_SSE2) != 0)
94 #endif
95
96 /* Optimized copy from "Uncacheable Speculative Write Combining" memory
97  * as used by some video surface.
98  * XXX It is really efficient only when SSE4.1 is available.
99  */
100 VLC_SSE
101 static void CopyFromUswc(uint8_t *dst, size_t dst_pitch,
102                          const uint8_t *src, size_t src_pitch,
103                          unsigned width, unsigned height,
104                          unsigned cpu)
105 {
106 #if defined (__SSE4_1__) || !defined(CAN_COMPILE_SSSE3)
107     VLC_UNUSED(cpu);
108 #endif
109     assert(((intptr_t)dst & 0x0f) == 0 && (dst_pitch & 0x0f) == 0);
110
111     asm volatile ("mfence");
112
113     for (unsigned y = 0; y < height; y++) {
114         const unsigned unaligned = (-(uintptr_t)src) & 0x0f;
115         unsigned x = unaligned;
116
117 #ifdef CAN_COMPILE_SSE4_1
118         if (vlc_CPU_SSE4_1()) {
119             if (!unaligned) {
120                 for (; x+63 < width; x += 64)
121                     COPY64(&dst[x], &src[x], "movntdqa", "movdqa");
122             } else {
123                 COPY16(dst, src, "movdqu", "movdqa");
124                 for (; x+63 < width; x += 64)
125                     COPY64(&dst[x], &src[x], "movntdqa", "movdqu");
126             }
127         } else
128 #endif
129         {
130             if (!unaligned) {
131                 for (; x+63 < width; x += 64)
132                     COPY64(&dst[x], &src[x], "movdqa", "movdqa");
133             } else {
134                 COPY16(dst, src, "movdqu", "movdqa");
135                 for (; x+63 < width; x += 64)
136                     COPY64(&dst[x], &src[x], "movdqa", "movdqu");
137             }
138         }
139
140         for (; x < width; x++)
141             dst[x] = src[x];
142
143         src += src_pitch;
144         dst += dst_pitch;
145     }
146     asm volatile ("mfence");
147 }
148
149 VLC_SSE
150 static void Copy2d(uint8_t *dst, size_t dst_pitch,
151                    const uint8_t *src, size_t src_pitch,
152                    unsigned width, unsigned height)
153 {
154     assert(((intptr_t)src & 0x0f) == 0 && (src_pitch & 0x0f) == 0);
155
156     for (unsigned y = 0; y < height; y++) {
157         unsigned x = 0;
158
159         bool unaligned = ((intptr_t)dst & 0x0f) != 0;
160         if (!unaligned) {
161             for (; x+63 < width; x += 64)
162                 COPY64(&dst[x], &src[x], "movdqa", "movntdq");
163         } else {
164             for (; x+63 < width; x += 64)
165                 COPY64(&dst[x], &src[x], "movdqa", "movdqu");
166         }
167
168         for (; x < width; x++)
169             dst[x] = src[x];
170
171         src += src_pitch;
172         dst += dst_pitch;
173     }
174 }
175
176 VLC_SSE
177 static void SSE_SplitUV(uint8_t *dstu, size_t dstu_pitch,
178                         uint8_t *dstv, size_t dstv_pitch,
179                         const uint8_t *src, size_t src_pitch,
180                         unsigned width, unsigned height, unsigned cpu)
181 {
182 #if defined(__SSSE3__) || !defined (CAN_COMPILE_SSSE3)
183     VLC_UNUSED(cpu);
184 #endif
185     const uint8_t shuffle[] = { 0, 2, 4, 6, 8, 10, 12, 14,
186                                 1, 3, 5, 7, 9, 11, 13, 15 };
187     const uint8_t mask[] = { 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00,
188                              0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00 };
189
190     assert(((intptr_t)src & 0xf) == 0 && (src_pitch & 0x0f) == 0);
191
192     for (unsigned y = 0; y < height; y++) {
193         unsigned x = 0;
194
195 #define LOAD64 \
196     "movdqa  0(%[src]), %%xmm0\n" \
197     "movdqa 16(%[src]), %%xmm1\n" \
198     "movdqa 32(%[src]), %%xmm2\n" \
199     "movdqa 48(%[src]), %%xmm3\n"
200
201 #define STORE2X32 \
202     "movq   %%xmm0,   0(%[dst1])\n" \
203     "movq   %%xmm1,   8(%[dst1])\n" \
204     "movhpd %%xmm0,   0(%[dst2])\n" \
205     "movhpd %%xmm1,   8(%[dst2])\n" \
206     "movq   %%xmm2,  16(%[dst1])\n" \
207     "movq   %%xmm3,  24(%[dst1])\n" \
208     "movhpd %%xmm2,  16(%[dst2])\n" \
209     "movhpd %%xmm3,  24(%[dst2])\n"
210
211 #ifdef CAN_COMPILE_SSSE3
212         if (vlc_CPU_SSSE3())
213         {
214             for (x = 0; x < (width & ~31); x += 32) {
215                 asm volatile (
216                     "movdqu (%[shuffle]), %%xmm7\n"
217                     LOAD64
218                     "pshufb  %%xmm7, %%xmm0\n"
219                     "pshufb  %%xmm7, %%xmm1\n"
220                     "pshufb  %%xmm7, %%xmm2\n"
221                     "pshufb  %%xmm7, %%xmm3\n"
222                     STORE2X32
223                     : : [dst1]"r"(&dstu[x]), [dst2]"r"(&dstv[x]), [src]"r"(&src[2*x]), [shuffle]"r"(shuffle) : "memory", "xmm0", "xmm1", "xmm2", "xmm3", "xmm7");
224             }
225         } else
226 #endif
227         {
228             for (x = 0; x < (width & ~31); x += 32) {
229                 asm volatile (
230                     "movdqu (%[mask]), %%xmm7\n"
231                     LOAD64
232                     "movdqa   %%xmm0, %%xmm4\n"
233                     "movdqa   %%xmm1, %%xmm5\n"
234                     "movdqa   %%xmm2, %%xmm6\n"
235                     "psrlw    $8,     %%xmm0\n"
236                     "psrlw    $8,     %%xmm1\n"
237                     "pand     %%xmm7, %%xmm4\n"
238                     "pand     %%xmm7, %%xmm5\n"
239                     "pand     %%xmm7, %%xmm6\n"
240                     "packuswb %%xmm4, %%xmm0\n"
241                     "packuswb %%xmm5, %%xmm1\n"
242                     "pand     %%xmm3, %%xmm7\n"
243                     "psrlw    $8,     %%xmm2\n"
244                     "psrlw    $8,     %%xmm3\n"
245                     "packuswb %%xmm6, %%xmm2\n"
246                     "packuswb %%xmm7, %%xmm3\n"
247                     STORE2X32
248                     : : [dst2]"r"(&dstu[x]), [dst1]"r"(&dstv[x]), [src]"r"(&src[2*x]), [mask]"r"(mask) : "memory", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7");
249             }
250         }
251 #undef STORE2X32
252 #undef LOAD64
253
254         for (; x < width; x++) {
255             dstu[x] = src[2*x+0];
256             dstv[x] = src[2*x+1];
257         }
258         src  += src_pitch;
259         dstu += dstu_pitch;
260         dstv += dstv_pitch;
261     }
262 }
263
264 static void SSE_CopyPlane(uint8_t *dst, size_t dst_pitch,
265                           const uint8_t *src, size_t src_pitch,
266                           uint8_t *cache, size_t cache_size,
267                           unsigned height, unsigned cpu)
268 {
269     const unsigned w16 = (src_pitch+15) & ~15;
270     const unsigned hstep = cache_size / w16;
271     assert(hstep > 0);
272
273     if (src_pitch == dst_pitch)
274         memcpy(dst, src, src_pitch * height);
275     else
276     for (unsigned y = 0; y < height; y += hstep) {
277         const unsigned hblock =  __MIN(hstep, height - y);
278
279         /* Copy a bunch of line into our cache */
280         CopyFromUswc(cache, w16,
281                      src, src_pitch,
282                      src_pitch, hblock, cpu);
283
284         /* Copy from our cache to the destination */
285         Copy2d(dst, dst_pitch,
286                cache, w16,
287                src_pitch, hblock);
288
289         /* */
290         src += src_pitch * hblock;
291         dst += dst_pitch * hblock;
292     }
293 }
294
295 static void SSE_SplitPlanes(uint8_t *dstu, size_t dstu_pitch,
296                             uint8_t *dstv, size_t dstv_pitch,
297                             const uint8_t *src, size_t src_pitch,
298                             uint8_t *cache, size_t cache_size,
299                             unsigned height, unsigned cpu)
300 {
301     const unsigned w16 = (src_pitch+15) & ~15;
302     const unsigned hstep = cache_size / w16;
303     assert(hstep > 0);
304
305     for (unsigned y = 0; y < height; y += hstep) {
306         const unsigned hblock =  __MIN(hstep, height - y);
307
308         /* Copy a bunch of line into our cache */
309         CopyFromUswc(cache, w16, src, src_pitch,
310                      src_pitch, hblock, cpu);
311
312         /* Copy from our cache to the destination */
313         SSE_SplitUV(dstu, dstu_pitch, dstv, dstv_pitch,
314                     cache, w16, src_pitch, hblock, cpu);
315
316         /* */
317         src  += src_pitch  * hblock;
318         dstu += dstu_pitch * hblock;
319         dstv += dstv_pitch * hblock;
320     }
321 }
322
323 static void SSE_CopyFromNv12ToYv12(picture_t *dst,
324                                    uint8_t *src[2], size_t src_pitch[2],
325                                    unsigned height,
326                                    copy_cache_t *cache, unsigned cpu)
327 {
328     SSE_CopyPlane(dst->p[0].p_pixels, dst->p[0].i_pitch,
329                   src[0], src_pitch[0],
330                   cache->buffer, cache->size,
331                   height, cpu);
332     SSE_SplitPlanes(dst->p[2].p_pixels, dst->p[2].i_pitch,
333                     dst->p[1].p_pixels, dst->p[1].i_pitch,
334                     src[1], src_pitch[1],
335                     cache->buffer, cache->size,
336                     (height+1)/2, cpu);
337     asm volatile ("emms");
338 }
339
340 static void SSE_CopyFromYv12ToYv12(picture_t *dst,
341                                    uint8_t *src[3], size_t src_pitch[3],
342                                    unsigned height,
343                                    copy_cache_t *cache, unsigned cpu)
344 {
345     for (unsigned n = 0; n < 3; n++) {
346         const unsigned d = n > 0 ? 2 : 1;
347         SSE_CopyPlane(dst->p[n].p_pixels, dst->p[n].i_pitch,
348                       src[n], src_pitch[n],
349                       cache->buffer, cache->size,
350                       (height+d-1)/d, cpu);
351     }
352     asm volatile ("emms");
353 }
354
355
356 static void SSE_CopyFromNv12ToNv12(picture_t *dst,
357                              uint8_t *src[2], size_t src_pitch[2],
358                              unsigned height,
359                              copy_cache_t *cache, unsigned cpu)
360 {
361     SSE_CopyPlane(dst->p[0].p_pixels, dst->p[0].i_pitch,
362                   src[0], src_pitch[0],
363                   cache->buffer, cache->size,
364                   height, cpu);
365     SSE_CopyPlane(dst->p[1].p_pixels, dst->p[1].i_pitch,
366                   src[1], src_pitch[1],
367                   cache->buffer, cache->size,
368                   height/2, cpu);
369     asm volatile ("emms");
370 }
371
372 static void
373 SSE_CopyFromNv12ToI420(picture_t *dest, uint8_t *src[2],
374                        size_t src_pitch[2], unsigned int height,
375                        copy_cache_t *cache, unsigned int cpu)
376 {
377     SSE_CopyPlane(dest->p[0].p_pixels, dest->p[0].i_pitch,
378                   src[0], src_pitch[0], cache->buffer, cache->size,
379                   height, cpu);
380     SSE_SplitPlanes(dest->p[1].p_pixels, dest->p[1].i_pitch,
381                     dest->p[2].p_pixels, dest->p[2].i_pitch,
382                     src[1], src_pitch[1], cache->buffer, cache->size,
383                     height / 2, cpu);
384     asm volatile ("emms");
385 }
386
387 static void SSE_CopyFromI420ToNv12(picture_t *dst,
388                              uint8_t *src[3], size_t src_pitch[3],
389                              unsigned height,
390                              copy_cache_t *cache, unsigned cpu)
391 {
392     SSE_CopyPlane(dst->p[0].p_pixels, dst->p[0].i_pitch,
393                   src[0], src_pitch[0],
394                   cache->buffer, cache->size,
395                   height, cpu);
396
397     /* TODO optimise the plane merging */
398     const unsigned copy_lines = height / 2;
399     const unsigned copy_pitch = src_pitch[1];
400
401     const int i_extra_pitch_uv = dst->p[1].i_pitch - 2 * copy_pitch;
402     const int i_extra_pitch_u  = src_pitch[U_PLANE] - copy_pitch;
403     const int i_extra_pitch_v  = src_pitch[V_PLANE] - copy_pitch;
404
405     uint8_t *dstUV = dst->p[1].p_pixels;
406     uint8_t *srcU  = src[U_PLANE];
407     uint8_t *srcV  = src[V_PLANE];
408     for ( unsigned int line = 0; line < copy_lines; line++ )
409     {
410         for ( unsigned int col = 0; col < copy_pitch; col++ )
411         {
412             *dstUV++ = *srcU++;
413             *dstUV++ = *srcV++;
414         }
415         dstUV += i_extra_pitch_uv;
416         srcU  += i_extra_pitch_u;
417         srcV  += i_extra_pitch_v;
418     }
419     asm volatile ("emms");
420 }
421 #undef COPY64
422 #endif /* CAN_COMPILE_SSE2 */
423
424 static void CopyPlane(uint8_t *dst, size_t dst_pitch,
425                       const uint8_t *src, size_t src_pitch,
426                       unsigned height)
427 {
428     if (src_pitch == dst_pitch)
429         memcpy(dst, src, src_pitch * height);
430     else
431     for (unsigned y = 0; y < height; y++) {
432         memcpy(dst, src, src_pitch);
433         src += src_pitch;
434         dst += dst_pitch;
435     }
436 }
437
438 static void SplitPlanes(uint8_t *dstu, size_t dstu_pitch,
439                         uint8_t *dstv, size_t dstv_pitch,
440                         const uint8_t *src, size_t src_pitch,
441                         unsigned height)
442 {
443     for (unsigned y = 0; y < height; y++) {
444         for (unsigned x = 0; x < src_pitch / 2; x++) {
445             dstu[x] = src[2*x+0];
446             dstv[x] = src[2*x+1];
447         }
448         src  += src_pitch;
449         dstu += dstu_pitch;
450         dstv += dstv_pitch;
451     }
452 }
453
454 void CopyFromNv12ToYv12(picture_t *dst, uint8_t *src[2], size_t src_pitch[2],
455                         unsigned height, copy_cache_t *cache)
456 {
457 #ifdef CAN_COMPILE_SSE2
458     unsigned cpu = vlc_CPU();
459     if (vlc_CPU_SSE2())
460         return SSE_CopyFromNv12ToYv12(dst, src, src_pitch, height, cache, cpu);
461 #else
462     (void) cache;
463 #endif
464
465     CopyPlane(dst->p[0].p_pixels, dst->p[0].i_pitch,
466               src[0], src_pitch[0], height);
467     SplitPlanes(dst->p[2].p_pixels, dst->p[2].i_pitch,
468                 dst->p[1].p_pixels, dst->p[1].i_pitch,
469                 src[1], src_pitch[1], height/2);
470 }
471
472 void CopyFromNv12ToNv12(picture_t *dst, uint8_t *src[2], size_t src_pitch[2],
473                   unsigned height, copy_cache_t *cache)
474 {
475 #ifdef CAN_COMPILE_SSE2
476     unsigned cpu = vlc_CPU();
477     if (vlc_CPU_SSE2())
478         return SSE_CopyFromNv12ToNv12(dst, src, src_pitch, height,
479                                 cache, cpu);
480 #else
481     (void) cache;
482 #endif
483
484     CopyPlane(dst->p[0].p_pixels, dst->p[0].i_pitch,
485               src[0], src_pitch[0], height);
486     CopyPlane(dst->p[1].p_pixels, dst->p[1].i_pitch,
487               src[1], src_pitch[1], height/2);
488 }
489
490 void CopyFromNv12ToI420(picture_t *dst, uint8_t *src[2], size_t src_pitch[2],
491                         unsigned height, copy_cache_t *cache)
492 {
493 #ifdef CAN_COMPILE_SSE2
494     unsigned    cpu = vlc_CPU();
495
496     if (vlc_CPU_SSE2())
497         return SSE_CopyFromNv12ToI420(dst, src, src_pitch, height, cache, cpu);
498 #else
499     VLC_UNUSED(cache);
500 #endif
501
502     CopyPlane(dst->p[0].p_pixels, dst->p[0].i_pitch,
503               src[0], src_pitch[0], height);
504     SplitPlanes(dst->p[1].p_pixels, dst->p[1].i_pitch,
505                 dst->p[2].p_pixels, dst->p[2].i_pitch,
506                 src[1], src_pitch[1], height/2);
507 }
508
509 void CopyFromI420ToNv12(picture_t *dst, uint8_t *src[3], size_t src_pitch[3],
510                         unsigned height, copy_cache_t *cache)
511 {
512 #ifdef CAN_COMPILE_SSE2
513     unsigned cpu = vlc_CPU();
514     if (vlc_CPU_SSE2())
515         return SSE_CopyFromI420ToNv12(dst, src, src_pitch, height,
516                                 cache, cpu);
517 #else
518     (void) cache;
519 #endif
520
521     CopyPlane(dst->p[0].p_pixels, dst->p[0].i_pitch,
522               src[0], src_pitch[0], height);
523
524     const unsigned copy_lines = height / 2;
525     const unsigned copy_pitch = src_pitch[1];
526
527     const int i_extra_pitch_uv = dst->p[1].i_pitch - 2 * copy_pitch;
528     const int i_extra_pitch_u  = src_pitch[U_PLANE] - copy_pitch;
529     const int i_extra_pitch_v  = src_pitch[V_PLANE] - copy_pitch;
530
531     uint8_t *dstUV = dst->p[1].p_pixels;
532     uint8_t *srcU  = src[U_PLANE];
533     uint8_t *srcV  = src[V_PLANE];
534     for ( unsigned int line = 0; line < copy_lines; line++ )
535     {
536         for ( unsigned int col = 0; col < copy_pitch; col++ )
537         {
538             *dstUV++ = *srcU++;
539             *dstUV++ = *srcV++;
540         }
541         dstUV += i_extra_pitch_uv;
542         srcU  += i_extra_pitch_u;
543         srcV  += i_extra_pitch_v;
544     }
545 }
546
547 void CopyFromI420_10ToP010(picture_t *dst, uint8_t *src[3], size_t src_pitch[3],
548                         unsigned height, copy_cache_t *cache)
549 {
550     (void) cache;
551
552     const int i_extra_pitch_dst_y = (dst->p[0].i_pitch  - src_pitch[0]) / 2;
553     const int i_extra_pitch_src_y = (src_pitch[Y_PLANE] - src_pitch[0]) / 2;
554     uint16_t *dstY = dst->p[0].p_pixels;
555     uint16_t *srcY = src[Y_PLANE];
556     for (unsigned y = 0; y < height; y++) {
557         for (unsigned x = 0; x < (src_pitch[0] / 2); x++) {
558             *dstY++ = *srcY++ << 6;
559         }
560         dstY += i_extra_pitch_dst_y;
561         srcY += i_extra_pitch_src_y;
562     }
563
564     const unsigned copy_lines = height / 2;
565     const unsigned copy_pitch = src_pitch[1] / 2;
566
567     const int i_extra_pitch_uv = dst->p[1].i_pitch / 2 - 2 * copy_pitch;
568     const int i_extra_pitch_u  = src_pitch[U_PLANE] / 2 - copy_pitch;
569     const int i_extra_pitch_v  = src_pitch[V_PLANE] / 2 - copy_pitch;
570
571     uint16_t *dstUV = dst->p[1].p_pixels;
572     uint16_t *srcU  = src[U_PLANE];
573     uint16_t *srcV  = src[V_PLANE];
574     for ( unsigned int line = 0; line < copy_lines; line++ )
575     {
576         for ( unsigned int col = 0; col < copy_pitch; col++ )
577         {
578             *dstUV++ = *srcU++ << 6;
579             *dstUV++ = *srcV++ << 6;
580         }
581         dstUV += i_extra_pitch_uv;
582         srcU  += i_extra_pitch_u;
583         srcV  += i_extra_pitch_v;
584     }
585 }
586
587
588 void CopyFromYv12ToYv12(picture_t *dst, uint8_t *src[3], size_t src_pitch[3],
589                         unsigned height, copy_cache_t *cache)
590 {
591 #ifdef CAN_COMPILE_SSE2
592     unsigned cpu = vlc_CPU();
593     if (vlc_CPU_SSE2())
594         return SSE_CopyFromYv12ToYv12(dst, src, src_pitch, height, cache, cpu);
595 #else
596     (void) cache;
597 #endif
598
599      CopyPlane(dst->p[0].p_pixels, dst->p[0].i_pitch,
600                src[0], src_pitch[0], height);
601      CopyPlane(dst->p[1].p_pixels, dst->p[1].i_pitch,
602                src[1], src_pitch[1], height / 2);
603      CopyPlane(dst->p[2].p_pixels, dst->p[2].i_pitch,
604                src[2], src_pitch[2], height / 2);
605 }