846a4dcfa22c7db4d27d8058c0b6d298610d5488
[ffmpeg.git] / libswscale / ppc / yuv2rgb_altivec.c
1 /*
2  * AltiVec acceleration for colorspace conversion
3  *
4  * copyright (C) 2004 Marc Hoffman <marc.hoffman@analog.com>
5  *
6  * This file is part of Libav.
7  *
8  * Libav is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * Libav is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with Libav; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22
23 /*
24  * Convert I420 YV12 to RGB in various formats,
25  * it rejects images that are not in 420 formats,
26  * it rejects images that don't have widths of multiples of 16,
27  * it rejects images that don't have heights of multiples of 2.
28  * Reject defers to C simulation code.
29  *
30  * Lots of optimizations to be done here.
31  *
32  * 1. Need to fix saturation code. I just couldn't get it to fly with packs
33  * and adds, so we currently use max/min to clip.
34  *
35  * 2. The inefficient use of chroma loading needs a bit of brushing up.
36  *
37  * 3. Analysis of pipeline stalls needs to be done. Use shark to identify
38  * pipeline stalls.
39  *
40  *
41  * MODIFIED to calculate coeffs from currently selected color space.
42  * MODIFIED core to be a macro where you specify the output format.
43  * ADDED UYVY conversion which is never called due to some thing in swscale.
44  * CORRECTED algorithim selection to be strict on input formats.
45  * ADDED runtime detection of AltiVec.
46  *
47  * ADDED altivec_yuv2packedX vertical scl + RGB converter
48  *
49  * March 27,2004
50  * PERFORMANCE ANALYSIS
51  *
52  * The C version uses 25% of the processor or ~250Mips for D1 video rawvideo
53  * used as test.
54  * The AltiVec version uses 10% of the processor or ~100Mips for D1 video
55  * same sequence.
56  *
57  * 720 * 480 * 30  ~10MPS
58  *
59  * so we have roughly 10 clocks per pixel. This is too high, something has
60  * to be wrong.
61  *
62  * OPTIMIZED clip codes to utilize vec_max and vec_packs removing the
63  * need for vec_min.
64  *
65  * OPTIMIZED DST OUTPUT cache/DMA controls. We are pretty much guaranteed to
66  * have the input video frame, it was just decompressed so it probably resides
67  * in L1 caches. However, we are creating the output video stream. This needs
68  * to use the DSTST instruction to optimize for the cache. We couple this with
69  * the fact that we are not going to be visiting the input buffer again so we
70  * mark it Least Recently Used. This shaves 25% of the processor cycles off.
71  *
72  * Now memcpy is the largest mips consumer in the system, probably due
73  * to the inefficient X11 stuff.
74  *
75  * GL libraries seem to be very slow on this machine 1.33Ghz PB running
76  * Jaguar, this is not the case for my 1Ghz PB.  I thought it might be
77  * a versioning issue, however I have libGL.1.2.dylib for both
78  * machines. (We need to figure this out now.)
79  *
80  * GL2 libraries work now with patch for RGB32.
81  *
82  * NOTE: quartz vo driver ARGB32_to_RGB24 consumes 30% of the processor.
83  *
84  * Integrated luma prescaling adjustment for saturation/contrast/brightness
85  * adjustment.
86  */
87
88 #include <stdio.h>
89 #include <stdlib.h>
90 #include <string.h>
91 #include <inttypes.h>
92 #include <assert.h>
93
94 #include "config.h"
95 #include "libswscale/rgb2rgb.h"
96 #include "libswscale/swscale.h"
97 #include "libswscale/swscale_internal.h"
98 #include "libavutil/attributes.h"
99 #include "libavutil/cpu.h"
100 #include "yuv2rgb_altivec.h"
101
102 #undef PROFILE_THE_BEAST
103 #undef INC_SCALING
104
105 typedef unsigned char ubyte;
106 typedef signed char   sbyte;
107
108 /* RGB interleaver, 16 planar pels 8-bit samples per channel in
109  * homogeneous vector registers x0,x1,x2 are interleaved with the
110  * following technique:
111  *
112  *    o0 = vec_mergeh(x0, x1);
113  *    o1 = vec_perm(o0, x2, perm_rgb_0);
114  *    o2 = vec_perm(o0, x2, perm_rgb_1);
115  *    o3 = vec_mergel(x0, x1);
116  *    o4 = vec_perm(o3, o2, perm_rgb_2);
117  *    o5 = vec_perm(o3, o2, perm_rgb_3);
118  *
119  * perm_rgb_0:   o0(RG).h v1(B) --> o1*
120  *            0   1  2   3   4
121  *           rgbr|gbrg|brgb|rgbr
122  *           0010 0100 1001 0010
123  *           0102 3145 2673 894A
124  *
125  * perm_rgb_1:   o0(RG).h v1(B) --> o2
126  *            0   1  2   3   4
127  *           gbrg|brgb|bbbb|bbbb
128  *           0100 1001 1111 1111
129  *           B5CD 6EF7 89AB CDEF
130  *
131  * perm_rgb_2:   o3(RG).l o2(rgbB.l) --> o4*
132  *            0   1  2   3   4
133  *           gbrg|brgb|rgbr|gbrg
134  *           1111 1111 0010 0100
135  *           89AB CDEF 0182 3945
136  *
137  * perm_rgb_2:   o3(RG).l o2(rgbB.l) ---> o5*
138  *            0   1  2   3   4
139  *           brgb|rgbr|gbrg|brgb
140  *           1001 0010 0100 1001
141  *           a67b 89cA BdCD eEFf
142  *
143  */
144 static const vector unsigned char
145     perm_rgb_0 = { 0x00, 0x01, 0x10, 0x02, 0x03, 0x11, 0x04, 0x05,
146                    0x12, 0x06, 0x07, 0x13, 0x08, 0x09, 0x14, 0x0a },
147     perm_rgb_1 = { 0x0b, 0x15, 0x0c, 0x0d, 0x16, 0x0e, 0x0f, 0x17,
148                    0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
149     perm_rgb_2 = { 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
150                    0x00, 0x01, 0x18, 0x02, 0x03, 0x19, 0x04, 0x05 },
151     perm_rgb_3 = { 0x1a, 0x06, 0x07, 0x1b, 0x08, 0x09, 0x1c, 0x0a,
152                    0x0b, 0x1d, 0x0c, 0x0d, 0x1e, 0x0e, 0x0f, 0x1f };
153
154 #define vec_merge3(x2, x1, x0, y0, y1, y2)     \
155     do {                                       \
156         __typeof__(x0) o0, o2, o3;             \
157         o0 = vec_mergeh(x0, x1);               \
158         y0 = vec_perm(o0, x2, perm_rgb_0);     \
159         o2 = vec_perm(o0, x2, perm_rgb_1);     \
160         o3 = vec_mergel(x0, x1);               \
161         y1 = vec_perm(o3, o2, perm_rgb_2);     \
162         y2 = vec_perm(o3, o2, perm_rgb_3);     \
163     } while (0)
164
165 #define vec_mstbgr24(x0, x1, x2, ptr)          \
166     do {                                       \
167         __typeof__(x0) _0, _1, _2;             \
168         vec_merge3(x0, x1, x2, _0, _1, _2);    \
169         vec_st(_0, 0, ptr++);                  \
170         vec_st(_1, 0, ptr++);                  \
171         vec_st(_2, 0, ptr++);                  \
172     } while (0)
173
174 #define vec_mstrgb24(x0, x1, x2, ptr)          \
175     do {                                       \
176         __typeof__(x0) _0, _1, _2;             \
177         vec_merge3(x2, x1, x0, _0, _1, _2);    \
178         vec_st(_0, 0, ptr++);                  \
179         vec_st(_1, 0, ptr++);                  \
180         vec_st(_2, 0, ptr++);                  \
181     } while (0)
182
183 /* pack the pixels in rgb0 format
184  * msb R
185  * lsb 0
186  */
187 #define vec_mstrgb32(T, x0, x1, x2, x3, ptr)                            \
188     do {                                                                \
189         T _0, _1, _2, _3;                                               \
190         _0 = vec_mergeh(x0, x1);                                        \
191         _1 = vec_mergeh(x2, x3);                                        \
192         _2 = (T) vec_mergeh((vector unsigned short) _0,                 \
193                             (vector unsigned short) _1);                \
194         _3 = (T) vec_mergel((vector unsigned short) _0,                 \
195                             (vector unsigned short) _1);                \
196         vec_st(_2, 0 * 16, (T *) ptr);                                  \
197         vec_st(_3, 1 * 16, (T *) ptr);                                  \
198         _0 = vec_mergel(x0, x1);                                        \
199         _1 = vec_mergel(x2, x3);                                        \
200         _2 = (T) vec_mergeh((vector unsigned short) _0,                 \
201                             (vector unsigned short) _1);                \
202         _3 = (T) vec_mergel((vector unsigned short) _0,                 \
203                             (vector unsigned short) _1);                \
204         vec_st(_2, 2 * 16, (T *) ptr);                                  \
205         vec_st(_3, 3 * 16, (T *) ptr);                                  \
206         ptr += 4;                                                       \
207     } while (0)
208
209 /*
210  * 1     0       1.4021   | | Y |
211  * 1    -0.3441 -0.7142   |x| Cb|
212  * 1     1.7718  0        | | Cr|
213  *
214  *
215  * Y:      [-128 127]
216  * Cb/Cr : [-128 127]
217  *
218  * typical YUV conversion works on Y: 0-255 this version has been
219  * optimized for JPEG decoding.
220  */
221
222 #define vec_unh(x)                                                      \
223     (vector signed short)                                               \
224         vec_perm(x, (__typeof__(x)) { 0 },                              \
225                  ((vector unsigned char) {                              \
226                      0x10, 0x00, 0x10, 0x01, 0x10, 0x02, 0x10, 0x03,    \
227                      0x10, 0x04, 0x10, 0x05, 0x10, 0x06, 0x10, 0x07 }))
228
229 #define vec_unl(x)                                                      \
230     (vector signed short)                                               \
231         vec_perm(x, (__typeof__(x)) { 0 },                              \
232                  ((vector unsigned char) {                              \
233                      0x10, 0x08, 0x10, 0x09, 0x10, 0x0A, 0x10, 0x0B,    \
234                      0x10, 0x0C, 0x10, 0x0D, 0x10, 0x0E, 0x10, 0x0F }))
235
236 #define vec_clip_s16(x)                                                 \
237     vec_max(vec_min(x, ((vector signed short) {                         \
238                     235, 235, 235, 235, 235, 235, 235, 235 })),         \
239             ((vector signed short) { 16, 16, 16, 16, 16, 16, 16, 16 }))
240
241 #define vec_packclp(x, y)                                               \
242     (vector unsigned char)                                              \
243         vec_packs((vector unsigned short)                               \
244                       vec_max(x, ((vector signed short) { 0 })),        \
245                   (vector unsigned short)                               \
246                       vec_max(y, ((vector signed short) { 0 })))
247
248 static inline void cvtyuvtoRGB(SwsContext *c, vector signed short Y,
249                                vector signed short U, vector signed short V,
250                                vector signed short *R, vector signed short *G,
251                                vector signed short *B)
252 {
253     vector signed short vx, ux, uvx;
254
255     Y = vec_mradds(Y, c->CY, c->OY);
256     U = vec_sub(U, (vector signed short)
257                        vec_splat((vector signed short) { 128 }, 0));
258     V = vec_sub(V, (vector signed short)
259                        vec_splat((vector signed short) { 128 }, 0));
260
261     // ux  = (CBU * (u << c->CSHIFT) + 0x4000) >> 15;
262     ux = vec_sl(U, c->CSHIFT);
263     *B = vec_mradds(ux, c->CBU, Y);
264
265     // vx  = (CRV * (v << c->CSHIFT) + 0x4000) >> 15;
266     vx = vec_sl(V, c->CSHIFT);
267     *R = vec_mradds(vx, c->CRV, Y);
268
269     // uvx = ((CGU * u) + (CGV * v)) >> 15;
270     uvx = vec_mradds(U, c->CGU, Y);
271     *G  = vec_mradds(V, c->CGV, uvx);
272 }
273
274 /*
275  * ------------------------------------------------------------------------------
276  * CS converters
277  * ------------------------------------------------------------------------------
278  */
279
280 #define DEFCSP420_CVT(name, out_pixels)                                       \
281 static int altivec_ ## name(SwsContext *c, const unsigned char **in,          \
282                             int *instrides, int srcSliceY, int srcSliceH,     \
283                             unsigned char **oplanes, int *outstrides)         \
284 {                                                                             \
285     int w = c->srcW;                                                          \
286     int h = srcSliceH;                                                        \
287     int i, j;                                                                 \
288     int instrides_scl[3];                                                     \
289     vector unsigned char y0, y1;                                              \
290                                                                               \
291     vector signed char u, v;                                                  \
292                                                                               \
293     vector signed short Y0, Y1, Y2, Y3;                                       \
294     vector signed short U, V;                                                 \
295     vector signed short vx, ux, uvx;                                          \
296     vector signed short vx0, ux0, uvx0;                                       \
297     vector signed short vx1, ux1, uvx1;                                       \
298     vector signed short R0, G0, B0;                                           \
299     vector signed short R1, G1, B1;                                           \
300     vector unsigned char R, G, B;                                             \
301                                                                               \
302     const vector unsigned char *y1ivP, *y2ivP, *uivP, *vivP;                  \
303     vector unsigned char align_perm;                                          \
304                                                                               \
305     vector signed short lCY       = c->CY;                                    \
306     vector signed short lOY       = c->OY;                                    \
307     vector signed short lCRV      = c->CRV;                                   \
308     vector signed short lCBU      = c->CBU;                                   \
309     vector signed short lCGU      = c->CGU;                                   \
310     vector signed short lCGV      = c->CGV;                                   \
311     vector unsigned short lCSHIFT = c->CSHIFT;                                \
312                                                                               \
313     const ubyte *y1i = in[0];                                                 \
314     const ubyte *y2i = in[0] + instrides[0];                                  \
315     const ubyte *ui  = in[1];                                                 \
316     const ubyte *vi  = in[2];                                                 \
317                                                                               \
318     vector unsigned char *oute =                                              \
319         (vector unsigned char *)                                              \
320             (oplanes[0] + srcSliceY * outstrides[0]);                         \
321     vector unsigned char *outo =                                              \
322         (vector unsigned char *)                                              \
323             (oplanes[0] + srcSliceY * outstrides[0] + outstrides[0]);         \
324                                                                               \
325     /* loop moves y{1, 2}i by w */                                            \
326     instrides_scl[0] = instrides[0] * 2 - w;                                  \
327     /* loop moves ui by w / 2 */                                              \
328     instrides_scl[1] = instrides[1] - w / 2;                                  \
329     /* loop moves vi by w / 2 */                                              \
330     instrides_scl[2] = instrides[2] - w / 2;                                  \
331                                                                               \
332     for (i = 0; i < h / 2; i++) {                                             \
333         vec_dstst(outo, (0x02000002 | (((w * 3 + 32) / 32) << 16)), 0);       \
334         vec_dstst(oute, (0x02000002 | (((w * 3 + 32) / 32) << 16)), 1);       \
335                                                                               \
336         for (j = 0; j < w / 16; j++) {                                        \
337             y1ivP = (const vector unsigned char *) y1i;                       \
338             y2ivP = (const vector unsigned char *) y2i;                       \
339             uivP  = (const vector unsigned char *) ui;                        \
340             vivP  = (const vector unsigned char *) vi;                        \
341                                                                               \
342             align_perm = vec_lvsl(0, y1i);                                    \
343             y0 = (vector unsigned char)                                       \
344                      vec_perm(y1ivP[0], y1ivP[1], align_perm);                \
345                                                                               \
346             align_perm = vec_lvsl(0, y2i);                                    \
347             y1 = (vector unsigned char)                                       \
348                      vec_perm(y2ivP[0], y2ivP[1], align_perm);                \
349                                                                               \
350             align_perm = vec_lvsl(0, ui);                                     \
351             u = (vector signed char)                                          \
352                     vec_perm(uivP[0], uivP[1], align_perm);                   \
353                                                                               \
354             align_perm = vec_lvsl(0, vi);                                     \
355             v = (vector signed char)                                          \
356                     vec_perm(vivP[0], vivP[1], align_perm);                   \
357                                                                               \
358             u = (vector signed char)                                          \
359                     vec_sub(u,                                                \
360                             (vector signed char)                              \
361                                 vec_splat((vector signed char) { 128 }, 0));  \
362             v = (vector signed char)                                          \
363                     vec_sub(v,                                                \
364                             (vector signed char)                              \
365                                 vec_splat((vector signed char) { 128 }, 0));  \
366                                                                               \
367             U = vec_unpackh(u);                                               \
368             V = vec_unpackh(v);                                               \
369                                                                               \
370             Y0 = vec_unh(y0);                                                 \
371             Y1 = vec_unl(y0);                                                 \
372             Y2 = vec_unh(y1);                                                 \
373             Y3 = vec_unl(y1);                                                 \
374                                                                               \
375             Y0 = vec_mradds(Y0, lCY, lOY);                                    \
376             Y1 = vec_mradds(Y1, lCY, lOY);                                    \
377             Y2 = vec_mradds(Y2, lCY, lOY);                                    \
378             Y3 = vec_mradds(Y3, lCY, lOY);                                    \
379                                                                               \
380             /* ux  = (CBU * (u << CSHIFT) + 0x4000) >> 15 */                  \
381             ux  = vec_sl(U, lCSHIFT);                                         \
382             ux  = vec_mradds(ux, lCBU, (vector signed short) { 0 });          \
383             ux0 = vec_mergeh(ux, ux);                                         \
384             ux1 = vec_mergel(ux, ux);                                         \
385                                                                               \
386             /* vx  = (CRV * (v << CSHIFT) + 0x4000) >> 15; */                 \
387             vx  = vec_sl(V, lCSHIFT);                                         \
388             vx  = vec_mradds(vx, lCRV, (vector signed short) { 0 });          \
389             vx0 = vec_mergeh(vx, vx);                                         \
390             vx1 = vec_mergel(vx, vx);                                         \
391                                                                               \
392             /* uvx = ((CGU * u) + (CGV * v)) >> 15 */                         \
393             uvx  = vec_mradds(U, lCGU, (vector signed short) { 0 });          \
394             uvx  = vec_mradds(V, lCGV, uvx);                                  \
395             uvx0 = vec_mergeh(uvx, uvx);                                      \
396             uvx1 = vec_mergel(uvx, uvx);                                      \
397                                                                               \
398             R0 = vec_add(Y0, vx0);                                            \
399             G0 = vec_add(Y0, uvx0);                                           \
400             B0 = vec_add(Y0, ux0);                                            \
401             R1 = vec_add(Y1, vx1);                                            \
402             G1 = vec_add(Y1, uvx1);                                           \
403             B1 = vec_add(Y1, ux1);                                            \
404                                                                               \
405             R = vec_packclp(R0, R1);                                          \
406             G = vec_packclp(G0, G1);                                          \
407             B = vec_packclp(B0, B1);                                          \
408                                                                               \
409             out_pixels(R, G, B, oute);                                        \
410                                                                               \
411             R0 = vec_add(Y2, vx0);                                            \
412             G0 = vec_add(Y2, uvx0);                                           \
413             B0 = vec_add(Y2, ux0);                                            \
414             R1 = vec_add(Y3, vx1);                                            \
415             G1 = vec_add(Y3, uvx1);                                           \
416             B1 = vec_add(Y3, ux1);                                            \
417             R  = vec_packclp(R0, R1);                                         \
418             G  = vec_packclp(G0, G1);                                         \
419             B  = vec_packclp(B0, B1);                                         \
420                                                                               \
421                                                                               \
422             out_pixels(R, G, B, outo);                                        \
423                                                                               \
424             y1i += 16;                                                        \
425             y2i += 16;                                                        \
426             ui  += 8;                                                         \
427             vi  += 8;                                                         \
428         }                                                                     \
429                                                                               \
430         outo += (outstrides[0]) >> 4;                                         \
431         oute += (outstrides[0]) >> 4;                                         \
432                                                                               \
433         ui  += instrides_scl[1];                                              \
434         vi  += instrides_scl[2];                                              \
435         y1i += instrides_scl[0];                                              \
436         y2i += instrides_scl[0];                                              \
437     }                                                                         \
438     return srcSliceH;                                                         \
439 }
440
441 #define out_abgr(a, b, c, ptr)                                          \
442     vec_mstrgb32(__typeof__(a), ((__typeof__(a)) { 255 }), c, b, a, ptr)
443 #define out_bgra(a, b, c, ptr)                                          \
444     vec_mstrgb32(__typeof__(a), c, b, a, ((__typeof__(a)) { 255 }), ptr)
445 #define out_rgba(a, b, c, ptr)                                          \
446     vec_mstrgb32(__typeof__(a), a, b, c, ((__typeof__(a)) { 255 }), ptr)
447 #define out_argb(a, b, c, ptr)                                          \
448     vec_mstrgb32(__typeof__(a), ((__typeof__(a)) { 255 }), a, b, c, ptr)
449 #define out_rgb24(a, b, c, ptr) vec_mstrgb24(a, b, c, ptr)
450 #define out_bgr24(a, b, c, ptr) vec_mstbgr24(a, b, c, ptr)
451
452 DEFCSP420_CVT(yuv2_abgr,  out_abgr)
453 DEFCSP420_CVT(yuv2_bgra,  out_bgra)
454 DEFCSP420_CVT(yuv2_rgba,  out_rgba)
455 DEFCSP420_CVT(yuv2_argb,  out_argb)
456 DEFCSP420_CVT(yuv2_rgb24, out_rgb24)
457 DEFCSP420_CVT(yuv2_bgr24, out_bgr24)
458
459 // uyvy|uyvy|uyvy|uyvy
460 // 0123 4567 89ab cdef
461 static const vector unsigned char
462     demux_u = { 0x10, 0x00, 0x10, 0x00,
463                 0x10, 0x04, 0x10, 0x04,
464                 0x10, 0x08, 0x10, 0x08,
465                 0x10, 0x0c, 0x10, 0x0c },
466     demux_v = { 0x10, 0x02, 0x10, 0x02,
467                 0x10, 0x06, 0x10, 0x06,
468                 0x10, 0x0A, 0x10, 0x0A,
469                 0x10, 0x0E, 0x10, 0x0E },
470     demux_y = { 0x10, 0x01, 0x10, 0x03,
471                 0x10, 0x05, 0x10, 0x07,
472                 0x10, 0x09, 0x10, 0x0B,
473                 0x10, 0x0D, 0x10, 0x0F };
474
475 /*
476  * this is so I can play live CCIR raw video
477  */
478 static int altivec_uyvy_rgb32(SwsContext *c, const unsigned char **in,
479                               int *instrides, int srcSliceY, int srcSliceH,
480                               unsigned char **oplanes, int *outstrides)
481 {
482     int w = c->srcW;
483     int h = srcSliceH;
484     int i, j;
485     vector unsigned char uyvy;
486     vector signed short Y, U, V;
487     vector signed short R0, G0, B0, R1, G1, B1;
488     vector unsigned char R, G, B;
489     vector unsigned char *out;
490     const ubyte *img;
491
492     img = in[0];
493     out = (vector unsigned char *) (oplanes[0] + srcSliceY * outstrides[0]);
494
495     for (i = 0; i < h; i++)
496         for (j = 0; j < w / 16; j++) {
497             uyvy = vec_ld(0, img);
498
499             U = (vector signed short)
500                     vec_perm(uyvy, (vector unsigned char) { 0 }, demux_u);
501             V = (vector signed short)
502                     vec_perm(uyvy, (vector unsigned char) { 0 }, demux_v);
503             Y = (vector signed short)
504                     vec_perm(uyvy, (vector unsigned char) { 0 }, demux_y);
505
506             cvtyuvtoRGB(c, Y, U, V, &R0, &G0, &B0);
507
508             uyvy = vec_ld(16, img);
509
510             U = (vector signed short)
511                     vec_perm(uyvy, (vector unsigned char) { 0 }, demux_u);
512             V = (vector signed short)
513                     vec_perm(uyvy, (vector unsigned char) { 0 }, demux_v);
514             Y = (vector signed short)
515                     vec_perm(uyvy, (vector unsigned char) { 0 }, demux_y);
516
517             cvtyuvtoRGB(c, Y, U, V, &R1, &G1, &B1);
518
519             R = vec_packclp(R0, R1);
520             G = vec_packclp(G0, G1);
521             B = vec_packclp(B0, B1);
522
523             // vec_mstbgr24 (R,G,B, out);
524             out_rgba(R, G, B, out);
525
526             img += 32;
527         }
528     return srcSliceH;
529 }
530
531 /* Ok currently the acceleration routine only supports
532  * inputs of widths a multiple of 16
533  * and heights a multiple 2
534  *
535  * So we just fall back to the C codes for this.
536  */
537 av_cold SwsFunc ff_yuv2rgb_init_altivec(SwsContext *c)
538 {
539     if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC))
540         return NULL;
541
542     /*
543      * and this seems not to matter too much I tried a bunch of
544      * videos with abnormal widths and MPlayer crashes elsewhere.
545      * mplayer -vo x11 -rawvideo on:w=350:h=240 raw-350x240.eyuv
546      * boom with X11 bad match.
547      *
548      */
549     if ((c->srcW & 0xf) != 0)
550         return NULL;
551
552     switch (c->srcFormat) {
553     case AV_PIX_FMT_YUV410P:
554     case AV_PIX_FMT_YUV420P:
555     /*case IMGFMT_CLPL:        ??? */
556     case AV_PIX_FMT_GRAY8:
557     case AV_PIX_FMT_NV12:
558     case AV_PIX_FMT_NV21:
559         if ((c->srcH & 0x1) != 0)
560             return NULL;
561
562         switch (c->dstFormat) {
563         case AV_PIX_FMT_RGB24:
564             av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space RGB24\n");
565             return altivec_yuv2_rgb24;
566         case AV_PIX_FMT_BGR24:
567             av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space BGR24\n");
568             return altivec_yuv2_bgr24;
569         case AV_PIX_FMT_ARGB:
570             av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space ARGB\n");
571             return altivec_yuv2_argb;
572         case AV_PIX_FMT_ABGR:
573             av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space ABGR\n");
574             return altivec_yuv2_abgr;
575         case AV_PIX_FMT_RGBA:
576             av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space RGBA\n");
577             return altivec_yuv2_rgba;
578         case AV_PIX_FMT_BGRA:
579             av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space BGRA\n");
580             return altivec_yuv2_bgra;
581         default: return NULL;
582         }
583         break;
584
585     case AV_PIX_FMT_UYVY422:
586         switch (c->dstFormat) {
587         case AV_PIX_FMT_BGR32:
588             av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space UYVY -> RGB32\n");
589             return altivec_uyvy_rgb32;
590         default: return NULL;
591         }
592         break;
593     }
594     return NULL;
595 }
596
597 av_cold void ff_yuv2rgb_init_tables_altivec(SwsContext *c,
598                                             const int inv_table[4],
599                                             int brightness,
600                                             int contrast,
601                                             int saturation)
602 {
603     union {
604         DECLARE_ALIGNED(16, signed short, tmp)[8];
605         vector signed short vec;
606     } buf;
607
608     buf.tmp[0] = ((0xffffLL) * contrast >> 8) >> 9;                               // cy
609     buf.tmp[1] = -256 * brightness;                                               // oy
610     buf.tmp[2] =   (inv_table[0] >> 3) * (contrast >> 16) * (saturation >> 16);   // crv
611     buf.tmp[3] =   (inv_table[1] >> 3) * (contrast >> 16) * (saturation >> 16);   // cbu
612     buf.tmp[4] = -((inv_table[2] >> 1) * (contrast >> 16) * (saturation >> 16));  // cgu
613     buf.tmp[5] = -((inv_table[3] >> 1) * (contrast >> 16) * (saturation >> 16));  // cgv
614
615     c->CSHIFT = (vector unsigned short) vec_splat_u16(2);
616     c->CY     = vec_splat((vector signed short) buf.vec, 0);
617     c->OY     = vec_splat((vector signed short) buf.vec, 1);
618     c->CRV    = vec_splat((vector signed short) buf.vec, 2);
619     c->CBU    = vec_splat((vector signed short) buf.vec, 3);
620     c->CGU    = vec_splat((vector signed short) buf.vec, 4);
621     c->CGV    = vec_splat((vector signed short) buf.vec, 5);
622     return;
623 }
624
625 static av_always_inline void yuv2packedX_altivec(SwsContext *c,
626                                                  const int16_t *lumFilter,
627                                                  const int16_t **lumSrc,
628                                                  int lumFilterSize,
629                                                  const int16_t *chrFilter,
630                                                  const int16_t **chrUSrc,
631                                                  const int16_t **chrVSrc,
632                                                  int chrFilterSize,
633                                                  const int16_t **alpSrc,
634                                                  uint8_t *dest,
635                                                  int dstW, int dstY,
636                                                  enum AVPixelFormat target)
637 {
638     int i, j;
639     vector signed short X, X0, X1, Y0, U0, V0, Y1, U1, V1, U, V;
640     vector signed short R0, G0, B0, R1, G1, B1;
641
642     vector unsigned char R, G, B;
643     vector unsigned char *out, *nout;
644
645     vector signed short RND   = vec_splat_s16(1 << 3);
646     vector unsigned short SCL = vec_splat_u16(4);
647     DECLARE_ALIGNED(16, unsigned int, scratch)[16];
648
649     vector signed short *YCoeffs, *CCoeffs;
650
651     YCoeffs = c->vYCoeffsBank + dstY * lumFilterSize;
652     CCoeffs = c->vCCoeffsBank + dstY * chrFilterSize;
653
654     out = (vector unsigned char *) dest;
655
656     for (i = 0; i < dstW; i += 16) {
657         Y0 = RND;
658         Y1 = RND;
659         /* extract 16 coeffs from lumSrc */
660         for (j = 0; j < lumFilterSize; j++) {
661             X0 = vec_ld(0, &lumSrc[j][i]);
662             X1 = vec_ld(16, &lumSrc[j][i]);
663             Y0 = vec_mradds(X0, YCoeffs[j], Y0);
664             Y1 = vec_mradds(X1, YCoeffs[j], Y1);
665         }
666
667         U = RND;
668         V = RND;
669         /* extract 8 coeffs from U,V */
670         for (j = 0; j < chrFilterSize; j++) {
671             X = vec_ld(0, &chrUSrc[j][i / 2]);
672             U = vec_mradds(X, CCoeffs[j], U);
673             X = vec_ld(0, &chrVSrc[j][i / 2]);
674             V = vec_mradds(X, CCoeffs[j], V);
675         }
676
677         /* scale and clip signals */
678         Y0 = vec_sra(Y0, SCL);
679         Y1 = vec_sra(Y1, SCL);
680         U  = vec_sra(U, SCL);
681         V  = vec_sra(V, SCL);
682
683         Y0 = vec_clip_s16(Y0);
684         Y1 = vec_clip_s16(Y1);
685         U  = vec_clip_s16(U);
686         V  = vec_clip_s16(V);
687
688         /* now we have
689          * Y0 = y0 y1 y2 y3 y4 y5 y6 y7    Y1 = y8 y9 y10 y11 y12 y13 y14 y15
690          * U  = u0 u1 u2 u3 u4 u5 u6 u7    V  = v0 v1 v2 v3 v4 v5 v6 v7
691          *
692          * Y0 = y0 y1 y2 y3 y4 y5 y6 y7    Y1 = y8 y9 y10 y11 y12 y13 y14 y15
693          * U0 = u0 u0 u1 u1 u2 u2 u3 u3    U1 = u4 u4 u5 u5 u6 u6 u7 u7
694          * V0 = v0 v0 v1 v1 v2 v2 v3 v3    V1 = v4 v4 v5 v5 v6 v6 v7 v7
695          */
696
697         U0 = vec_mergeh(U, U);
698         V0 = vec_mergeh(V, V);
699
700         U1 = vec_mergel(U, U);
701         V1 = vec_mergel(V, V);
702
703         cvtyuvtoRGB(c, Y0, U0, V0, &R0, &G0, &B0);
704         cvtyuvtoRGB(c, Y1, U1, V1, &R1, &G1, &B1);
705
706         R = vec_packclp(R0, R1);
707         G = vec_packclp(G0, G1);
708         B = vec_packclp(B0, B1);
709
710         switch (target) {
711         case AV_PIX_FMT_ABGR:
712             out_abgr(R, G, B, out);
713             break;
714         case AV_PIX_FMT_BGRA:
715             out_bgra(R, G, B, out);
716             break;
717         case AV_PIX_FMT_RGBA:
718             out_rgba(R, G, B, out);
719             break;
720         case AV_PIX_FMT_ARGB:
721             out_argb(R, G, B, out);
722             break;
723         case AV_PIX_FMT_RGB24:
724             out_rgb24(R, G, B, out);
725             break;
726         case AV_PIX_FMT_BGR24:
727             out_bgr24(R, G, B, out);
728             break;
729         default:
730         {
731             /* If this is reached, the caller should have called yuv2packedXinC
732              * instead. */
733             static int printed_error_message;
734             if (!printed_error_message) {
735                 av_log(c, AV_LOG_ERROR,
736                        "altivec_yuv2packedX doesn't support %s output\n",
737                        sws_format_name(c->dstFormat));
738                 printed_error_message = 1;
739             }
740             return;
741         }
742         }
743     }
744
745     if (i < dstW) {
746         i -= 16;
747
748         Y0 = RND;
749         Y1 = RND;
750         /* extract 16 coeffs from lumSrc */
751         for (j = 0; j < lumFilterSize; j++) {
752             X0 = vec_ld(0, &lumSrc[j][i]);
753             X1 = vec_ld(16, &lumSrc[j][i]);
754             Y0 = vec_mradds(X0, YCoeffs[j], Y0);
755             Y1 = vec_mradds(X1, YCoeffs[j], Y1);
756         }
757
758         U = RND;
759         V = RND;
760         /* extract 8 coeffs from U,V */
761         for (j = 0; j < chrFilterSize; j++) {
762             X = vec_ld(0, &chrUSrc[j][i / 2]);
763             U = vec_mradds(X, CCoeffs[j], U);
764             X = vec_ld(0, &chrVSrc[j][i / 2]);
765             V = vec_mradds(X, CCoeffs[j], V);
766         }
767
768         /* scale and clip signals */
769         Y0 = vec_sra(Y0, SCL);
770         Y1 = vec_sra(Y1, SCL);
771         U  = vec_sra(U, SCL);
772         V  = vec_sra(V, SCL);
773
774         Y0 = vec_clip_s16(Y0);
775         Y1 = vec_clip_s16(Y1);
776         U  = vec_clip_s16(U);
777         V  = vec_clip_s16(V);
778
779         /* now we have
780          * Y0 = y0 y1 y2 y3 y4 y5 y6 y7    Y1 = y8 y9 y10 y11 y12 y13 y14 y15
781          * U  = u0 u1 u2 u3 u4 u5 u6 u7    V  = v0 v1 v2 v3 v4 v5 v6 v7
782          *
783          * Y0 = y0 y1 y2 y3 y4 y5 y6 y7    Y1 = y8 y9 y10 y11 y12 y13 y14 y15
784          * U0 = u0 u0 u1 u1 u2 u2 u3 u3    U1 = u4 u4 u5 u5 u6 u6 u7 u7
785          * V0 = v0 v0 v1 v1 v2 v2 v3 v3    V1 = v4 v4 v5 v5 v6 v6 v7 v7
786          */
787
788         U0 = vec_mergeh(U, U);
789         V0 = vec_mergeh(V, V);
790
791         U1 = vec_mergel(U, U);
792         V1 = vec_mergel(V, V);
793
794         cvtyuvtoRGB(c, Y0, U0, V0, &R0, &G0, &B0);
795         cvtyuvtoRGB(c, Y1, U1, V1, &R1, &G1, &B1);
796
797         R = vec_packclp(R0, R1);
798         G = vec_packclp(G0, G1);
799         B = vec_packclp(B0, B1);
800
801         nout = (vector unsigned char *) scratch;
802         switch (target) {
803         case AV_PIX_FMT_ABGR:
804             out_abgr(R, G, B, nout);
805             break;
806         case AV_PIX_FMT_BGRA:
807             out_bgra(R, G, B, nout);
808             break;
809         case AV_PIX_FMT_RGBA:
810             out_rgba(R, G, B, nout);
811             break;
812         case AV_PIX_FMT_ARGB:
813             out_argb(R, G, B, nout);
814             break;
815         case AV_PIX_FMT_RGB24:
816             out_rgb24(R, G, B, nout);
817             break;
818         case AV_PIX_FMT_BGR24:
819             out_bgr24(R, G, B, nout);
820             break;
821         default:
822             /* Unreachable, I think. */
823             av_log(c, AV_LOG_ERROR,
824                    "altivec_yuv2packedX doesn't support %s output\n",
825                    sws_format_name(c->dstFormat));
826             return;
827         }
828
829         memcpy(&((uint32_t *) dest)[i], scratch, (dstW - i) / 4);
830     }
831 }
832
833 #define YUV2PACKEDX_WRAPPER(suffix, pixfmt)                             \
834 void ff_yuv2 ## suffix ## _X_altivec(SwsContext *c,                     \
835                                      const int16_t *lumFilter,          \
836                                      const int16_t **lumSrc,            \
837                                      int lumFilterSize,                 \
838                                      const int16_t *chrFilter,          \
839                                      const int16_t **chrUSrc,           \
840                                      const int16_t **chrVSrc,           \
841                                      int chrFilterSize,                 \
842                                      const int16_t **alpSrc,            \
843                                      uint8_t *dest, int dstW, int dstY) \
844 {                                                                       \
845     yuv2packedX_altivec(c, lumFilter, lumSrc, lumFilterSize,            \
846                         chrFilter, chrUSrc, chrVSrc,                    \
847                         chrFilterSize, alpSrc,                          \
848                         dest, dstW, dstY, pixfmt);                      \
849 }
850
851 YUV2PACKEDX_WRAPPER(abgr,  AV_PIX_FMT_ABGR);
852 YUV2PACKEDX_WRAPPER(bgra,  AV_PIX_FMT_BGRA);
853 YUV2PACKEDX_WRAPPER(argb,  AV_PIX_FMT_ARGB);
854 YUV2PACKEDX_WRAPPER(rgba,  AV_PIX_FMT_RGBA);
855 YUV2PACKEDX_WRAPPER(rgb24, AV_PIX_FMT_RGB24);
856 YUV2PACKEDX_WRAPPER(bgr24, AV_PIX_FMT_BGR24);