61e31494d1ec028c05e777b0f759b93c16a7edcc
[vlc.git] / modules / video_chroma / copy.c
1 /*****************************************************************************
2  * copy.c: Fast YV12/NV12 copy
3  *****************************************************************************
4  * Copyright (C) 2010 Laurent Aimar
5  * $Id$
6  *
7  * Authors: Laurent Aimar <fenrir _AT_ videolan _DOT_ org>
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU Lesser General Public License as published by
11  * the Free Software Foundation; either version 2.1 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17  * GNU Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public License
20  * along with this program; if not, write to the Free Software Foundation,
21  * Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
22  *****************************************************************************/
23
24 #ifdef HAVE_CONFIG_H
25 # include "config.h"
26 #endif
27
28 #include <vlc_common.h>
29 #include <vlc_picture.h>
30 #include <vlc_cpu.h>
31 #include <assert.h>
32
33 #include "copy.h"
34
35 int CopyInitCache(copy_cache_t *cache, unsigned width)
36 {
37 #ifdef CAN_COMPILE_SSE2
38     cache->size = __MAX((width + 0x3f) & ~ 0x3f, 8192);
39     cache->buffer = vlc_memalign(64, cache->size);
40     if (!cache->buffer)
41         return VLC_EGENERIC;
42 #else
43     (void) cache; (void) width;
44 #endif
45     return VLC_SUCCESS;
46 }
47
48 void CopyCleanCache(copy_cache_t *cache)
49 {
50 #ifdef CAN_COMPILE_SSE2
51     vlc_free(cache->buffer);
52     cache->buffer = NULL;
53     cache->size   = 0;
54 #else
55     (void) cache;
56 #endif
57 }
58
59 #ifdef CAN_COMPILE_SSE2
60 /* Copy 16/64 bytes from srcp to dstp loading data with the SSE>=2 instruction
61  * load and storing data with the SSE>=2 instruction store.
62  */
63 #define COPY16(dstp, srcp, load, store) \
64     asm volatile (                      \
65         load "  0(%[src]), %%xmm1\n"    \
66         store " %%xmm1,    0(%[dst])\n" \
67         : : [dst]"r"(dstp), [src]"r"(srcp) : "memory", "xmm1")
68
69 #define COPY64(dstp, srcp, load, store) \
70     asm volatile (                      \
71         load "  0(%[src]), %%xmm1\n"    \
72         load " 16(%[src]), %%xmm2\n"    \
73         load " 32(%[src]), %%xmm3\n"    \
74         load " 48(%[src]), %%xmm4\n"    \
75         store " %%xmm1,    0(%[dst])\n" \
76         store " %%xmm2,   16(%[dst])\n" \
77         store " %%xmm3,   32(%[dst])\n" \
78         store " %%xmm4,   48(%[dst])\n" \
79         : : [dst]"r"(dstp), [src]"r"(srcp) : "memory", "xmm1", "xmm2", "xmm3", "xmm4")
80
81 #ifndef __SSE4_1__
82 # undef vlc_CPU_SSE4_1
83 # define vlc_CPU_SSE4_1() ((cpu & VLC_CPU_SSE4_1) != 0)
84 #endif
85
86 #ifndef __SSSE3__
87 # undef vlc_CPU_SSSE3
88 # define vlc_CPU_SSSE3() ((cpu & VLC_CPU_SSSE3) != 0)
89 #endif
90
91 #ifndef __SSE2__
92 # undef vlc_CPU_SSE2
93 # define vlc_CPU_SSE2() ((cpu & VLC_CPU_SSE2) != 0)
94 #endif
95
96 /* Optimized copy from "Uncacheable Speculative Write Combining" memory
97  * as used by some video surface.
98  * XXX It is really efficient only when SSE4.1 is available.
99  */
100 VLC_SSE
101 static void CopyFromUswc(uint8_t *dst, size_t dst_pitch,
102                          const uint8_t *src, size_t src_pitch,
103                          unsigned width, unsigned height,
104                          unsigned cpu)
105 {
106 #if defined (__SSE4_1__) || !defined(CAN_COMPILE_SSSE3)
107     VLC_UNUSED(cpu);
108 #endif
109     assert(((intptr_t)dst & 0x0f) == 0 && (dst_pitch & 0x0f) == 0);
110
111     asm volatile ("mfence");
112
113     for (unsigned y = 0; y < height; y++) {
114         const unsigned unaligned = (-(uintptr_t)src) & 0x0f;
115         unsigned x = unaligned;
116
117 #ifdef CAN_COMPILE_SSE4_1
118         if (vlc_CPU_SSE4_1()) {
119             if (!unaligned) {
120                 for (; x+63 < width; x += 64)
121                     COPY64(&dst[x], &src[x], "movntdqa", "movdqa");
122             } else {
123                 COPY16(dst, src, "movdqu", "movdqa");
124                 for (; x+63 < width; x += 64)
125                     COPY64(&dst[x], &src[x], "movntdqa", "movdqu");
126             }
127         } else
128 #endif
129         {
130             if (!unaligned) {
131                 for (; x+63 < width; x += 64)
132                     COPY64(&dst[x], &src[x], "movdqa", "movdqa");
133             } else {
134                 COPY16(dst, src, "movdqu", "movdqa");
135                 for (; x+63 < width; x += 64)
136                     COPY64(&dst[x], &src[x], "movdqa", "movdqu");
137             }
138         }
139
140         for (; x < width; x++)
141             dst[x] = src[x];
142
143         src += src_pitch;
144         dst += dst_pitch;
145     }
146     asm volatile ("mfence");
147 }
148
149 VLC_SSE
150 static void Copy2d(uint8_t *dst, size_t dst_pitch,
151                    const uint8_t *src, size_t src_pitch,
152                    unsigned width, unsigned height)
153 {
154     assert(((intptr_t)src & 0x0f) == 0 && (src_pitch & 0x0f) == 0);
155
156     for (unsigned y = 0; y < height; y++) {
157         unsigned x = 0;
158
159         bool unaligned = ((intptr_t)dst & 0x0f) != 0;
160         if (!unaligned) {
161             for (; x+63 < width; x += 64)
162                 COPY64(&dst[x], &src[x], "movdqa", "movntdq");
163         } else {
164             for (; x+63 < width; x += 64)
165                 COPY64(&dst[x], &src[x], "movdqa", "movdqu");
166         }
167
168         for (; x < width; x++)
169             dst[x] = src[x];
170
171         src += src_pitch;
172         dst += dst_pitch;
173     }
174 }
175
176 VLC_SSE
177 static void SSE_SplitUV(uint8_t *dstu, size_t dstu_pitch,
178                         uint8_t *dstv, size_t dstv_pitch,
179                         const uint8_t *src, size_t src_pitch,
180                         unsigned width, unsigned height, unsigned cpu)
181 {
182 #if defined(__SSSE3__) || !defined (CAN_COMPILE_SSSE3)
183     VLC_UNUSED(cpu);
184 #endif
185     const uint8_t shuffle[] = { 0, 2, 4, 6, 8, 10, 12, 14,
186                                 1, 3, 5, 7, 9, 11, 13, 15 };
187     const uint8_t mask[] = { 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00,
188                              0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00 };
189
190     assert(((intptr_t)src & 0xf) == 0 && (src_pitch & 0x0f) == 0);
191
192     for (unsigned y = 0; y < height; y++) {
193         unsigned x = 0;
194
195 #define LOAD64 \
196     "movdqa  0(%[src]), %%xmm0\n" \
197     "movdqa 16(%[src]), %%xmm1\n" \
198     "movdqa 32(%[src]), %%xmm2\n" \
199     "movdqa 48(%[src]), %%xmm3\n"
200
201 #define STORE2X32 \
202     "movq   %%xmm0,   0(%[dst1])\n" \
203     "movq   %%xmm1,   8(%[dst1])\n" \
204     "movhpd %%xmm0,   0(%[dst2])\n" \
205     "movhpd %%xmm1,   8(%[dst2])\n" \
206     "movq   %%xmm2,  16(%[dst1])\n" \
207     "movq   %%xmm3,  24(%[dst1])\n" \
208     "movhpd %%xmm2,  16(%[dst2])\n" \
209     "movhpd %%xmm3,  24(%[dst2])\n"
210
211 #ifdef CAN_COMPILE_SSSE3
212         if (vlc_CPU_SSSE3())
213         {
214             for (x = 0; x < (width & ~31); x += 32) {
215                 asm volatile (
216                     "movdqu (%[shuffle]), %%xmm7\n"
217                     LOAD64
218                     "pshufb  %%xmm7, %%xmm0\n"
219                     "pshufb  %%xmm7, %%xmm1\n"
220                     "pshufb  %%xmm7, %%xmm2\n"
221                     "pshufb  %%xmm7, %%xmm3\n"
222                     STORE2X32
223                     : : [dst1]"r"(&dstu[x]), [dst2]"r"(&dstv[x]), [src]"r"(&src[2*x]), [shuffle]"r"(shuffle) : "memory", "xmm0", "xmm1", "xmm2", "xmm3", "xmm7");
224             }
225         } else
226 #endif
227         {
228             for (x = 0; x < (width & ~31); x += 32) {
229                 asm volatile (
230                     "movdqu (%[mask]), %%xmm7\n"
231                     LOAD64
232                     "movdqa   %%xmm0, %%xmm4\n"
233                     "movdqa   %%xmm1, %%xmm5\n"
234                     "movdqa   %%xmm2, %%xmm6\n"
235                     "psrlw    $8,     %%xmm0\n"
236                     "psrlw    $8,     %%xmm1\n"
237                     "pand     %%xmm7, %%xmm4\n"
238                     "pand     %%xmm7, %%xmm5\n"
239                     "pand     %%xmm7, %%xmm6\n"
240                     "packuswb %%xmm4, %%xmm0\n"
241                     "packuswb %%xmm5, %%xmm1\n"
242                     "pand     %%xmm3, %%xmm7\n"
243                     "psrlw    $8,     %%xmm2\n"
244                     "psrlw    $8,     %%xmm3\n"
245                     "packuswb %%xmm6, %%xmm2\n"
246                     "packuswb %%xmm7, %%xmm3\n"
247                     STORE2X32
248                     : : [dst2]"r"(&dstu[x]), [dst1]"r"(&dstv[x]), [src]"r"(&src[2*x]), [mask]"r"(mask) : "memory", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7");
249             }
250         }
251 #undef STORE2X32
252 #undef LOAD64
253
254         for (; x < width; x++) {
255             dstu[x] = src[2*x+0];
256             dstv[x] = src[2*x+1];
257         }
258         src  += src_pitch;
259         dstu += dstu_pitch;
260         dstv += dstv_pitch;
261     }
262 }
263
264 static void SSE_CopyPlane(uint8_t *dst, size_t dst_pitch,
265                           const uint8_t *src, size_t src_pitch,
266                           uint8_t *cache, size_t cache_size,
267                           unsigned height, unsigned cpu)
268 {
269     const unsigned w16 = (src_pitch+15) & ~15;
270     const unsigned hstep = cache_size / w16;
271     assert(hstep > 0);
272
273     if (src_pitch == dst_pitch)
274         memcpy(dst, src, src_pitch * height);
275     else
276     for (unsigned y = 0; y < height; y += hstep) {
277         const unsigned hblock =  __MIN(hstep, height - y);
278
279         /* Copy a bunch of line into our cache */
280         CopyFromUswc(cache, w16,
281                      src, src_pitch,
282                      src_pitch, hblock, cpu);
283
284         /* Copy from our cache to the destination */
285         Copy2d(dst, dst_pitch,
286                cache, w16,
287                src_pitch, hblock);
288
289         /* */
290         src += src_pitch * hblock;
291         dst += dst_pitch * hblock;
292     }
293 }
294
295 static void SSE_SplitPlanes(uint8_t *dstu, size_t dstu_pitch,
296                             uint8_t *dstv, size_t dstv_pitch,
297                             const uint8_t *src, size_t src_pitch,
298                             uint8_t *cache, size_t cache_size,
299                             unsigned height, unsigned cpu)
300 {
301     const unsigned w16 = (src_pitch+15) & ~15;
302     const unsigned hstep = cache_size / w16;
303     assert(hstep > 0);
304
305     for (unsigned y = 0; y < height; y += hstep) {
306         const unsigned hblock =  __MIN(hstep, height - y);
307
308         /* Copy a bunch of line into our cache */
309         CopyFromUswc(cache, w16, src, src_pitch,
310                      src_pitch, hblock, cpu);
311
312         /* Copy from our cache to the destination */
313         SSE_SplitUV(dstu, dstu_pitch, dstv, dstv_pitch,
314                     cache, w16, src_pitch, hblock, cpu);
315
316         /* */
317         src  += src_pitch  * hblock;
318         dstu += dstu_pitch * hblock;
319         dstv += dstv_pitch * hblock;
320     }
321 }
322
323 static void SSE_CopyFromNv12ToYv12(picture_t *dst,
324                                    uint8_t *src[2], size_t src_pitch[2],
325                                    unsigned height,
326                                    copy_cache_t *cache, unsigned cpu)
327 {
328     SSE_CopyPlane(dst->p[0].p_pixels, dst->p[0].i_pitch,
329                   src[0], src_pitch[0],
330                   cache->buffer, cache->size,
331                   height, cpu);
332     SSE_SplitPlanes(dst->p[2].p_pixels, dst->p[2].i_pitch,
333                     dst->p[1].p_pixels, dst->p[1].i_pitch,
334                     src[1], src_pitch[1],
335                     cache->buffer, cache->size,
336                     (height+1)/2, cpu);
337     asm volatile ("emms");
338 }
339
340 static void SSE_CopyFromYv12ToYv12(picture_t *dst,
341                                    uint8_t *src[3], size_t src_pitch[3],
342                                    unsigned height,
343                                    copy_cache_t *cache, unsigned cpu)
344 {
345     for (unsigned n = 0; n < 3; n++) {
346         const unsigned d = n > 0 ? 2 : 1;
347         SSE_CopyPlane(dst->p[n].p_pixels, dst->p[n].i_pitch,
348                       src[n], src_pitch[n],
349                       cache->buffer, cache->size,
350                       (height+d-1)/d, cpu);
351     }
352     asm volatile ("emms");
353 }
354
355
356 static void SSE_CopyFromNv12ToNv12(picture_t *dst,
357                              uint8_t *src[2], size_t src_pitch[2],
358                              unsigned height,
359                              copy_cache_t *cache, unsigned cpu)
360 {
361     SSE_CopyPlane(dst->p[0].p_pixels, dst->p[0].i_pitch,
362                   src[0], src_pitch[0],
363                   cache->buffer, cache->size,
364                   height, cpu);
365     SSE_CopyPlane(dst->p[1].p_pixels, dst->p[1].i_pitch,
366                   src[1], src_pitch[1],
367                   cache->buffer, cache->size,
368                   height/2, cpu);
369     asm volatile ("emms");
370 }
371
372 static void SSE_CopyFromI420ToNv12(picture_t *dst,
373                              uint8_t *src[3], size_t src_pitch[3],
374                              unsigned height,
375                              copy_cache_t *cache, unsigned cpu)
376 {
377     SSE_CopyPlane(dst->p[0].p_pixels, dst->p[0].i_pitch,
378                   src[0], src_pitch[0],
379                   cache->buffer, cache->size,
380                   height, cpu);
381
382     /* TODO optimise the plane merging */
383     const unsigned copy_lines = height / 2;
384     const unsigned copy_pitch = src_pitch[1];
385
386     const int i_extra_pitch_uv = dst->p[1].i_pitch - 2 * copy_pitch;
387     const int i_extra_pitch_u  = src_pitch[U_PLANE] - copy_pitch;
388     const int i_extra_pitch_v  = src_pitch[V_PLANE] - copy_pitch;
389
390     uint8_t *dstUV = dst->p[1].p_pixels;
391     uint8_t *srcU  = src[U_PLANE];
392     uint8_t *srcV  = src[V_PLANE];
393     for ( unsigned int line = 0; line < copy_lines; line++ )
394     {
395         for ( unsigned int col = 0; col < copy_pitch; col++ )
396         {
397             *dstUV++ = *srcU++;
398             *dstUV++ = *srcV++;
399         }
400         dstUV += i_extra_pitch_uv;
401         srcU  += i_extra_pitch_u;
402         srcV  += i_extra_pitch_v;
403     }
404     asm volatile ("emms");
405 }
406 #undef COPY64
407 #endif /* CAN_COMPILE_SSE2 */
408
409 static void CopyPlane(uint8_t *dst, size_t dst_pitch,
410                       const uint8_t *src, size_t src_pitch,
411                       unsigned height)
412 {
413     if (src_pitch == dst_pitch)
414         memcpy(dst, src, src_pitch * height);
415     else
416     for (unsigned y = 0; y < height; y++) {
417         memcpy(dst, src, src_pitch);
418         src += src_pitch;
419         dst += dst_pitch;
420     }
421 }
422
423 static void SplitPlanes(uint8_t *dstu, size_t dstu_pitch,
424                         uint8_t *dstv, size_t dstv_pitch,
425                         const uint8_t *src, size_t src_pitch,
426                         unsigned height)
427 {
428     for (unsigned y = 0; y < height; y++) {
429         for (unsigned x = 0; x < src_pitch / 2; x++) {
430             dstu[x] = src[2*x+0];
431             dstv[x] = src[2*x+1];
432         }
433         src  += src_pitch;
434         dstu += dstu_pitch;
435         dstv += dstv_pitch;
436     }
437 }
438
439 void CopyFromNv12ToYv12(picture_t *dst, uint8_t *src[2], size_t src_pitch[2],
440                         unsigned height, copy_cache_t *cache)
441 {
442 #ifdef CAN_COMPILE_SSE2
443     unsigned cpu = vlc_CPU();
444     if (vlc_CPU_SSE2())
445         return SSE_CopyFromNv12ToYv12(dst, src, src_pitch, height, cache, cpu);
446 #else
447     (void) cache;
448 #endif
449
450     CopyPlane(dst->p[0].p_pixels, dst->p[0].i_pitch,
451               src[0], src_pitch[0], height);
452     SplitPlanes(dst->p[2].p_pixels, dst->p[2].i_pitch,
453                 dst->p[1].p_pixels, dst->p[1].i_pitch,
454                 src[1], src_pitch[1], height/2);
455 }
456
457 void CopyFromNv12ToNv12(picture_t *dst, uint8_t *src[2], size_t src_pitch[2],
458                   unsigned height, copy_cache_t *cache)
459 {
460 #ifdef CAN_COMPILE_SSE2
461     unsigned cpu = vlc_CPU();
462     if (vlc_CPU_SSE2())
463         return SSE_CopyFromNv12ToNv12(dst, src, src_pitch, height,
464                                 cache, cpu);
465 #else
466     (void) cache;
467 #endif
468
469     CopyPlane(dst->p[0].p_pixels, dst->p[0].i_pitch,
470               src[0], src_pitch[0], height);
471     CopyPlane(dst->p[1].p_pixels, dst->p[1].i_pitch,
472               src[1], src_pitch[1], height/2);
473 }
474
475 void CopyFromNv12ToI420(picture_t *dst, uint8_t *src[2], size_t src_pitch[2],
476                         unsigned height)
477 {
478     CopyPlane(dst->p[0].p_pixels, dst->p[0].i_pitch,
479               src[0], src_pitch[0], height);
480     SplitPlanes(dst->p[1].p_pixels, dst->p[1].i_pitch,
481                 dst->p[2].p_pixels, dst->p[2].i_pitch,
482                 src[1], src_pitch[1], height/2);
483 }
484
485 void CopyFromI420ToNv12(picture_t *dst, uint8_t *src[3], size_t src_pitch[3],
486                         unsigned height, copy_cache_t *cache)
487 {
488 #ifdef CAN_COMPILE_SSE2
489     unsigned cpu = vlc_CPU();
490     if (vlc_CPU_SSE2())
491         return SSE_CopyFromI420ToNv12(dst, src, src_pitch, height,
492                                 cache, cpu);
493 #else
494     (void) cache;
495 #endif
496
497     CopyPlane(dst->p[0].p_pixels, dst->p[0].i_pitch,
498               src[0], src_pitch[0], height);
499
500     const unsigned copy_lines = height / 2;
501     const unsigned copy_pitch = src_pitch[1];
502
503     const int i_extra_pitch_uv = dst->p[1].i_pitch - 2 * copy_pitch;
504     const int i_extra_pitch_u  = src_pitch[U_PLANE] - copy_pitch;
505     const int i_extra_pitch_v  = src_pitch[V_PLANE] - copy_pitch;
506
507     uint8_t *dstUV = dst->p[1].p_pixels;
508     uint8_t *srcU  = src[U_PLANE];
509     uint8_t *srcV  = src[V_PLANE];
510     for ( unsigned int line = 0; line < copy_lines; line++ )
511     {
512         for ( unsigned int col = 0; col < copy_pitch; col++ )
513         {
514             *dstUV++ = *srcU++;
515             *dstUV++ = *srcV++;
516         }
517         dstUV += i_extra_pitch_uv;
518         srcU  += i_extra_pitch_u;
519         srcV  += i_extra_pitch_v;
520     }
521 }
522
523 void CopyFromI420_10ToP010(picture_t *dst, uint8_t *src[3], size_t src_pitch[3],
524                         unsigned height, copy_cache_t *cache)
525 {
526     (void) cache;
527
528     const int i_extra_pitch_dst_y = (dst->p[0].i_pitch  - src_pitch[0]) / 2;
529     const int i_extra_pitch_src_y = (src_pitch[Y_PLANE] - src_pitch[0]) / 2;
530     uint16_t *dstY = dst->p[0].p_pixels;
531     uint16_t *srcY = src[Y_PLANE];
532     for (unsigned y = 0; y < height; y++) {
533         for (unsigned x = 0; x < (src_pitch[0] / 2); x++) {
534             *dstY++ = *srcY++ << 6;
535         }
536         dstY += i_extra_pitch_dst_y;
537         srcY += i_extra_pitch_src_y;
538     }
539
540     const unsigned copy_lines = height / 2;
541     const unsigned copy_pitch = src_pitch[1] / 2;
542
543     const int i_extra_pitch_uv = dst->p[1].i_pitch / 2 - 2 * copy_pitch;
544     const int i_extra_pitch_u  = src_pitch[U_PLANE] / 2 - copy_pitch;
545     const int i_extra_pitch_v  = src_pitch[V_PLANE] / 2 - copy_pitch;
546
547     uint16_t *dstUV = dst->p[1].p_pixels;
548     uint16_t *srcU  = src[U_PLANE];
549     uint16_t *srcV  = src[V_PLANE];
550     for ( unsigned int line = 0; line < copy_lines; line++ )
551     {
552         for ( unsigned int col = 0; col < copy_pitch; col++ )
553         {
554             *dstUV++ = *srcU++ << 6;
555             *dstUV++ = *srcV++ << 6;
556         }
557         dstUV += i_extra_pitch_uv;
558         srcU  += i_extra_pitch_u;
559         srcV  += i_extra_pitch_v;
560     }
561 }
562
563
564 void CopyFromYv12ToYv12(picture_t *dst, uint8_t *src[3], size_t src_pitch[3],
565                         unsigned height, copy_cache_t *cache)
566 {
567 #ifdef CAN_COMPILE_SSE2
568     unsigned cpu = vlc_CPU();
569     if (vlc_CPU_SSE2())
570         return SSE_CopyFromYv12ToYv12(dst, src, src_pitch, height, cache, cpu);
571 #else
572     (void) cache;
573 #endif
574
575      CopyPlane(dst->p[0].p_pixels, dst->p[0].i_pitch,
576                src[0], src_pitch[0], height);
577      CopyPlane(dst->p[1].p_pixels, dst->p[1].i_pitch,
578                src[1], src_pitch[1], height / 2);
579      CopyPlane(dst->p[2].p_pixels, dst->p[2].i_pitch,
580                src[2], src_pitch[2], height / 2);
581 }