arm/aarch64: vp9lpf: Keep the comparison to E within 8 bit
[ffmpeg.git] / libswscale / ppc / yuv2rgb_altivec.c
1 /*
2  * AltiVec acceleration for colorspace conversion
3  *
4  * copyright (C) 2004 Marc Hoffman <marc.hoffman@analog.com>
5  *
6  * This file is part of Libav.
7  *
8  * Libav is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * Libav is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with Libav; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22
23 /*
24  * Convert I420 YV12 to RGB in various formats,
25  * it rejects images that are not in 420 formats,
26  * it rejects images that don't have widths of multiples of 16,
27  * it rejects images that don't have heights of multiples of 2.
28  * Reject defers to C simulation code.
29  *
30  * Lots of optimizations to be done here.
31  *
32  * 1. Need to fix saturation code. I just couldn't get it to fly with packs
33  * and adds, so we currently use max/min to clip.
34  *
35  * 2. The inefficient use of chroma loading needs a bit of brushing up.
36  *
37  * 3. Analysis of pipeline stalls needs to be done. Use shark to identify
38  * pipeline stalls.
39  *
40  *
41  * MODIFIED to calculate coeffs from currently selected color space.
42  * MODIFIED core to be a macro where you specify the output format.
43  * ADDED UYVY conversion which is never called due to some thing in swscale.
44  * CORRECTED algorithim selection to be strict on input formats.
45  * ADDED runtime detection of AltiVec.
46  *
47  * ADDED altivec_yuv2packedX vertical scl + RGB converter
48  *
49  * March 27,2004
50  * PERFORMANCE ANALYSIS
51  *
52  * The C version uses 25% of the processor or ~250Mips for D1 video rawvideo
53  * used as test.
54  * The AltiVec version uses 10% of the processor or ~100Mips for D1 video
55  * same sequence.
56  *
57  * 720 * 480 * 30  ~10MPS
58  *
59  * so we have roughly 10 clocks per pixel. This is too high, something has
60  * to be wrong.
61  *
62  * OPTIMIZED clip codes to utilize vec_max and vec_packs removing the
63  * need for vec_min.
64  *
65  * OPTIMIZED DST OUTPUT cache/DMA controls. We are pretty much guaranteed to
66  * have the input video frame, it was just decompressed so it probably resides
67  * in L1 caches. However, we are creating the output video stream. This needs
68  * to use the DSTST instruction to optimize for the cache. We couple this with
69  * the fact that we are not going to be visiting the input buffer again so we
70  * mark it Least Recently Used. This shaves 25% of the processor cycles off.
71  *
72  * Now memcpy is the largest mips consumer in the system, probably due
73  * to the inefficient X11 stuff.
74  *
75  * GL libraries seem to be very slow on this machine 1.33Ghz PB running
76  * Jaguar, this is not the case for my 1Ghz PB.  I thought it might be
77  * a versioning issue, however I have libGL.1.2.dylib for both
78  * machines. (We need to figure this out now.)
79  *
80  * GL2 libraries work now with patch for RGB32.
81  *
82  * NOTE: quartz vo driver ARGB32_to_RGB24 consumes 30% of the processor.
83  *
84  * Integrated luma prescaling adjustment for saturation/contrast/brightness
85  * adjustment.
86  */
87
88 #include <stdio.h>
89 #include <stdlib.h>
90 #include <string.h>
91 #include <inttypes.h>
92
93 #include "config.h"
94 #include "libswscale/rgb2rgb.h"
95 #include "libswscale/swscale.h"
96 #include "libswscale/swscale_internal.h"
97 #include "libavutil/attributes.h"
98 #include "libavutil/cpu.h"
99 #include "yuv2rgb_altivec.h"
100
101 #if HAVE_ALTIVEC
102
103 #undef PROFILE_THE_BEAST
104 #undef INC_SCALING
105
106 typedef unsigned char ubyte;
107 typedef signed char   sbyte;
108
109 /* RGB interleaver, 16 planar pels 8-bit samples per channel in
110  * homogeneous vector registers x0,x1,x2 are interleaved with the
111  * following technique:
112  *
113  *    o0 = vec_mergeh(x0, x1);
114  *    o1 = vec_perm(o0, x2, perm_rgb_0);
115  *    o2 = vec_perm(o0, x2, perm_rgb_1);
116  *    o3 = vec_mergel(x0, x1);
117  *    o4 = vec_perm(o3, o2, perm_rgb_2);
118  *    o5 = vec_perm(o3, o2, perm_rgb_3);
119  *
120  * perm_rgb_0:   o0(RG).h v1(B) --> o1*
121  *            0   1  2   3   4
122  *           rgbr|gbrg|brgb|rgbr
123  *           0010 0100 1001 0010
124  *           0102 3145 2673 894A
125  *
126  * perm_rgb_1:   o0(RG).h v1(B) --> o2
127  *            0   1  2   3   4
128  *           gbrg|brgb|bbbb|bbbb
129  *           0100 1001 1111 1111
130  *           B5CD 6EF7 89AB CDEF
131  *
132  * perm_rgb_2:   o3(RG).l o2(rgbB.l) --> o4*
133  *            0   1  2   3   4
134  *           gbrg|brgb|rgbr|gbrg
135  *           1111 1111 0010 0100
136  *           89AB CDEF 0182 3945
137  *
138  * perm_rgb_2:   o3(RG).l o2(rgbB.l) ---> o5*
139  *            0   1  2   3   4
140  *           brgb|rgbr|gbrg|brgb
141  *           1001 0010 0100 1001
142  *           a67b 89cA BdCD eEFf
143  */
144 static const vector unsigned char
145     perm_rgb_0 = { 0x00, 0x01, 0x10, 0x02, 0x03, 0x11, 0x04, 0x05,
146                    0x12, 0x06, 0x07, 0x13, 0x08, 0x09, 0x14, 0x0a },
147     perm_rgb_1 = { 0x0b, 0x15, 0x0c, 0x0d, 0x16, 0x0e, 0x0f, 0x17,
148                    0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
149     perm_rgb_2 = { 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
150                    0x00, 0x01, 0x18, 0x02, 0x03, 0x19, 0x04, 0x05 },
151     perm_rgb_3 = { 0x1a, 0x06, 0x07, 0x1b, 0x08, 0x09, 0x1c, 0x0a,
152                    0x0b, 0x1d, 0x0c, 0x0d, 0x1e, 0x0e, 0x0f, 0x1f };
153
154 #define vec_merge3(x2, x1, x0, y0, y1, y2)     \
155     do {                                       \
156         __typeof__(x0) o0, o2, o3;             \
157         o0 = vec_mergeh(x0, x1);               \
158         y0 = vec_perm(o0, x2, perm_rgb_0);     \
159         o2 = vec_perm(o0, x2, perm_rgb_1);     \
160         o3 = vec_mergel(x0, x1);               \
161         y1 = vec_perm(o3, o2, perm_rgb_2);     \
162         y2 = vec_perm(o3, o2, perm_rgb_3);     \
163     } while (0)
164
165 #define vec_mstbgr24(x0, x1, x2, ptr)          \
166     do {                                       \
167         __typeof__(x0) _0, _1, _2;             \
168         vec_merge3(x0, x1, x2, _0, _1, _2);    \
169         vec_st(_0, 0, ptr++);                  \
170         vec_st(_1, 0, ptr++);                  \
171         vec_st(_2, 0, ptr++);                  \
172     } while (0)
173
174 #define vec_mstrgb24(x0, x1, x2, ptr)          \
175     do {                                       \
176         __typeof__(x0) _0, _1, _2;             \
177         vec_merge3(x2, x1, x0, _0, _1, _2);    \
178         vec_st(_0, 0, ptr++);                  \
179         vec_st(_1, 0, ptr++);                  \
180         vec_st(_2, 0, ptr++);                  \
181     } while (0)
182
183 /* pack the pixels in rgb0 format
184  * msb R
185  * lsb 0
186  */
187 #define vec_mstrgb32(T, x0, x1, x2, x3, ptr)                            \
188     do {                                                                \
189         T _0, _1, _2, _3;                                               \
190         _0 = vec_mergeh(x0, x1);                                        \
191         _1 = vec_mergeh(x2, x3);                                        \
192         _2 = (T) vec_mergeh((vector unsigned short) _0,                 \
193                             (vector unsigned short) _1);                \
194         _3 = (T) vec_mergel((vector unsigned short) _0,                 \
195                             (vector unsigned short) _1);                \
196         vec_st(_2, 0 * 16, (T *) ptr);                                  \
197         vec_st(_3, 1 * 16, (T *) ptr);                                  \
198         _0 = vec_mergel(x0, x1);                                        \
199         _1 = vec_mergel(x2, x3);                                        \
200         _2 = (T) vec_mergeh((vector unsigned short) _0,                 \
201                             (vector unsigned short) _1);                \
202         _3 = (T) vec_mergel((vector unsigned short) _0,                 \
203                             (vector unsigned short) _1);                \
204         vec_st(_2, 2 * 16, (T *) ptr);                                  \
205         vec_st(_3, 3 * 16, (T *) ptr);                                  \
206         ptr += 4;                                                       \
207     } while (0)
208
209 /*
210  * 1     0       1.4021   | | Y |
211  * 1    -0.3441 -0.7142   |x| Cb|
212  * 1     1.7718  0        | | Cr|
213  *
214  *
215  * Y:      [-128 127]
216  * Cb/Cr : [-128 127]
217  *
218  * typical YUV conversion works on Y: 0-255 this version has been
219  * optimized for JPEG decoding.
220  */
221
222 #define vec_unh(x)                                                      \
223     (vector signed short)                                               \
224         vec_perm(x, (__typeof__(x)) { 0 },                              \
225                  ((vector unsigned char) {                              \
226                      0x10, 0x00, 0x10, 0x01, 0x10, 0x02, 0x10, 0x03,    \
227                      0x10, 0x04, 0x10, 0x05, 0x10, 0x06, 0x10, 0x07 }))
228
229 #define vec_unl(x)                                                      \
230     (vector signed short)                                               \
231         vec_perm(x, (__typeof__(x)) { 0 },                              \
232                  ((vector unsigned char) {                              \
233                      0x10, 0x08, 0x10, 0x09, 0x10, 0x0A, 0x10, 0x0B,    \
234                      0x10, 0x0C, 0x10, 0x0D, 0x10, 0x0E, 0x10, 0x0F }))
235
236 #define vec_clip_s16(x)                                                 \
237     vec_max(vec_min(x, ((vector signed short) {                         \
238                     235, 235, 235, 235, 235, 235, 235, 235 })),         \
239             ((vector signed short) { 16, 16, 16, 16, 16, 16, 16, 16 }))
240
241 #define vec_packclp(x, y)                                               \
242     (vector unsigned char)                                              \
243         vec_packs((vector unsigned short)                               \
244                       vec_max(x, ((vector signed short) { 0 })),        \
245                   (vector unsigned short)                               \
246                       vec_max(y, ((vector signed short) { 0 })))
247
248 static inline void cvtyuvtoRGB(SwsContext *c, vector signed short Y,
249                                vector signed short U, vector signed short V,
250                                vector signed short *R, vector signed short *G,
251                                vector signed short *B)
252 {
253     vector signed short vx, ux, uvx;
254
255     Y = vec_mradds(Y, c->CY, c->OY);
256     U = vec_sub(U, (vector signed short)
257                        vec_splat((vector signed short) { 128 }, 0));
258     V = vec_sub(V, (vector signed short)
259                        vec_splat((vector signed short) { 128 }, 0));
260
261     // ux  = (CBU * (u << c->CSHIFT) + 0x4000) >> 15;
262     ux = vec_sl(U, c->CSHIFT);
263     *B = vec_mradds(ux, c->CBU, Y);
264
265     // vx  = (CRV * (v << c->CSHIFT) + 0x4000) >> 15;
266     vx = vec_sl(V, c->CSHIFT);
267     *R = vec_mradds(vx, c->CRV, Y);
268
269     // uvx = ((CGU * u) + (CGV * v)) >> 15;
270     uvx = vec_mradds(U, c->CGU, Y);
271     *G  = vec_mradds(V, c->CGV, uvx);
272 }
273
274 /*
275  * ------------------------------------------------------------------------------
276  * CS converters
277  * ------------------------------------------------------------------------------
278  */
279
280 #define DEFCSP420_CVT(name, out_pixels)                                       \
281 static int altivec_ ## name(SwsContext *c, const unsigned char **in,          \
282                             int *instrides, int srcSliceY, int srcSliceH,     \
283                             unsigned char **oplanes, int *outstrides)         \
284 {                                                                             \
285     int w = c->srcW;                                                          \
286     int h = srcSliceH;                                                        \
287     int i, j;                                                                 \
288     int instrides_scl[3];                                                     \
289     vector unsigned char y0, y1;                                              \
290                                                                               \
291     vector signed char u, v;                                                  \
292                                                                               \
293     vector signed short Y0, Y1, Y2, Y3;                                       \
294     vector signed short U, V;                                                 \
295     vector signed short vx, ux, uvx;                                          \
296     vector signed short vx0, ux0, uvx0;                                       \
297     vector signed short vx1, ux1, uvx1;                                       \
298     vector signed short R0, G0, B0;                                           \
299     vector signed short R1, G1, B1;                                           \
300     vector unsigned char R, G, B;                                             \
301                                                                               \
302     const vector unsigned char *y1ivP, *y2ivP, *uivP, *vivP;                  \
303     vector unsigned char align_perm;                                          \
304                                                                               \
305     vector signed short lCY       = c->CY;                                    \
306     vector signed short lOY       = c->OY;                                    \
307     vector signed short lCRV      = c->CRV;                                   \
308     vector signed short lCBU      = c->CBU;                                   \
309     vector signed short lCGU      = c->CGU;                                   \
310     vector signed short lCGV      = c->CGV;                                   \
311     vector unsigned short lCSHIFT = c->CSHIFT;                                \
312                                                                               \
313     const ubyte *y1i = in[0];                                                 \
314     const ubyte *y2i = in[0] + instrides[0];                                  \
315     const ubyte *ui  = in[1];                                                 \
316     const ubyte *vi  = in[2];                                                 \
317                                                                               \
318     vector unsigned char *oute =                                              \
319         (vector unsigned char *)                                              \
320             (oplanes[0] + srcSliceY * outstrides[0]);                         \
321     vector unsigned char *outo =                                              \
322         (vector unsigned char *)                                              \
323             (oplanes[0] + srcSliceY * outstrides[0] + outstrides[0]);         \
324                                                                               \
325     /* loop moves y{1, 2}i by w */                                            \
326     instrides_scl[0] = instrides[0] * 2 - w;                                  \
327     /* loop moves ui by w / 2 */                                              \
328     instrides_scl[1] = instrides[1] - w / 2;                                  \
329     /* loop moves vi by w / 2 */                                              \
330     instrides_scl[2] = instrides[2] - w / 2;                                  \
331                                                                               \
332     for (i = 0; i < h / 2; i++) {                                             \
333         vec_dstst(outo, (0x02000002 | (((w * 3 + 32) / 32) << 16)), 0);       \
334         vec_dstst(oute, (0x02000002 | (((w * 3 + 32) / 32) << 16)), 1);       \
335                                                                               \
336         for (j = 0; j < w / 16; j++) {                                        \
337             y1ivP = (const vector unsigned char *) y1i;                       \
338             y2ivP = (const vector unsigned char *) y2i;                       \
339             uivP  = (const vector unsigned char *) ui;                        \
340             vivP  = (const vector unsigned char *) vi;                        \
341                                                                               \
342             align_perm = vec_lvsl(0, y1i);                                    \
343             y0 = (vector unsigned char)                                       \
344                      vec_perm(y1ivP[0], y1ivP[1], align_perm);                \
345                                                                               \
346             align_perm = vec_lvsl(0, y2i);                                    \
347             y1 = (vector unsigned char)                                       \
348                      vec_perm(y2ivP[0], y2ivP[1], align_perm);                \
349                                                                               \
350             align_perm = vec_lvsl(0, ui);                                     \
351             u = (vector signed char)                                          \
352                     vec_perm(uivP[0], uivP[1], align_perm);                   \
353                                                                               \
354             align_perm = vec_lvsl(0, vi);                                     \
355             v = (vector signed char)                                          \
356                     vec_perm(vivP[0], vivP[1], align_perm);                   \
357                                                                               \
358             u = (vector signed char)                                          \
359                     vec_sub(u,                                                \
360                             (vector signed char)                              \
361                                 vec_splat((vector signed char) { 128 }, 0));  \
362             v = (vector signed char)                                          \
363                     vec_sub(v,                                                \
364                             (vector signed char)                              \
365                                 vec_splat((vector signed char) { 128 }, 0));  \
366                                                                               \
367             U = vec_unpackh(u);                                               \
368             V = vec_unpackh(v);                                               \
369                                                                               \
370             Y0 = vec_unh(y0);                                                 \
371             Y1 = vec_unl(y0);                                                 \
372             Y2 = vec_unh(y1);                                                 \
373             Y3 = vec_unl(y1);                                                 \
374                                                                               \
375             Y0 = vec_mradds(Y0, lCY, lOY);                                    \
376             Y1 = vec_mradds(Y1, lCY, lOY);                                    \
377             Y2 = vec_mradds(Y2, lCY, lOY);                                    \
378             Y3 = vec_mradds(Y3, lCY, lOY);                                    \
379                                                                               \
380             /* ux  = (CBU * (u << CSHIFT) + 0x4000) >> 15 */                  \
381             ux  = vec_sl(U, lCSHIFT);                                         \
382             ux  = vec_mradds(ux, lCBU, (vector signed short) { 0 });          \
383             ux0 = vec_mergeh(ux, ux);                                         \
384             ux1 = vec_mergel(ux, ux);                                         \
385                                                                               \
386             /* vx  = (CRV * (v << CSHIFT) + 0x4000) >> 15; */                 \
387             vx  = vec_sl(V, lCSHIFT);                                         \
388             vx  = vec_mradds(vx, lCRV, (vector signed short) { 0 });          \
389             vx0 = vec_mergeh(vx, vx);                                         \
390             vx1 = vec_mergel(vx, vx);                                         \
391                                                                               \
392             /* uvx = ((CGU * u) + (CGV * v)) >> 15 */                         \
393             uvx  = vec_mradds(U, lCGU, (vector signed short) { 0 });          \
394             uvx  = vec_mradds(V, lCGV, uvx);                                  \
395             uvx0 = vec_mergeh(uvx, uvx);                                      \
396             uvx1 = vec_mergel(uvx, uvx);                                      \
397                                                                               \
398             R0 = vec_add(Y0, vx0);                                            \
399             G0 = vec_add(Y0, uvx0);                                           \
400             B0 = vec_add(Y0, ux0);                                            \
401             R1 = vec_add(Y1, vx1);                                            \
402             G1 = vec_add(Y1, uvx1);                                           \
403             B1 = vec_add(Y1, ux1);                                            \
404                                                                               \
405             R = vec_packclp(R0, R1);                                          \
406             G = vec_packclp(G0, G1);                                          \
407             B = vec_packclp(B0, B1);                                          \
408                                                                               \
409             out_pixels(R, G, B, oute);                                        \
410                                                                               \
411             R0 = vec_add(Y2, vx0);                                            \
412             G0 = vec_add(Y2, uvx0);                                           \
413             B0 = vec_add(Y2, ux0);                                            \
414             R1 = vec_add(Y3, vx1);                                            \
415             G1 = vec_add(Y3, uvx1);                                           \
416             B1 = vec_add(Y3, ux1);                                            \
417             R  = vec_packclp(R0, R1);                                         \
418             G  = vec_packclp(G0, G1);                                         \
419             B  = vec_packclp(B0, B1);                                         \
420                                                                               \
421                                                                               \
422             out_pixels(R, G, B, outo);                                        \
423                                                                               \
424             y1i += 16;                                                        \
425             y2i += 16;                                                        \
426             ui  += 8;                                                         \
427             vi  += 8;                                                         \
428         }                                                                     \
429                                                                               \
430         outo += (outstrides[0]) >> 4;                                         \
431         oute += (outstrides[0]) >> 4;                                         \
432                                                                               \
433         ui  += instrides_scl[1];                                              \
434         vi  += instrides_scl[2];                                              \
435         y1i += instrides_scl[0];                                              \
436         y2i += instrides_scl[0];                                              \
437     }                                                                         \
438     return srcSliceH;                                                         \
439 }
440
441 #define out_abgr(a, b, c, ptr)                                          \
442     vec_mstrgb32(__typeof__(a), ((__typeof__(a)) { 255 }), c, b, a, ptr)
443 #define out_bgra(a, b, c, ptr)                                          \
444     vec_mstrgb32(__typeof__(a), c, b, a, ((__typeof__(a)) { 255 }), ptr)
445 #define out_rgba(a, b, c, ptr)                                          \
446     vec_mstrgb32(__typeof__(a), a, b, c, ((__typeof__(a)) { 255 }), ptr)
447 #define out_argb(a, b, c, ptr)                                          \
448     vec_mstrgb32(__typeof__(a), ((__typeof__(a)) { 255 }), a, b, c, ptr)
449 #define out_rgb24(a, b, c, ptr) vec_mstrgb24(a, b, c, ptr)
450 #define out_bgr24(a, b, c, ptr) vec_mstbgr24(a, b, c, ptr)
451
452 DEFCSP420_CVT(yuv2_abgr,  out_abgr)
453 DEFCSP420_CVT(yuv2_bgra,  out_bgra)
454 DEFCSP420_CVT(yuv2_rgba,  out_rgba)
455 DEFCSP420_CVT(yuv2_argb,  out_argb)
456 DEFCSP420_CVT(yuv2_rgb24, out_rgb24)
457 DEFCSP420_CVT(yuv2_bgr24, out_bgr24)
458
459 // uyvy|uyvy|uyvy|uyvy
460 // 0123 4567 89ab cdef
461 static const vector unsigned char
462     demux_u = { 0x10, 0x00, 0x10, 0x00,
463                 0x10, 0x04, 0x10, 0x04,
464                 0x10, 0x08, 0x10, 0x08,
465                 0x10, 0x0c, 0x10, 0x0c },
466     demux_v = { 0x10, 0x02, 0x10, 0x02,
467                 0x10, 0x06, 0x10, 0x06,
468                 0x10, 0x0A, 0x10, 0x0A,
469                 0x10, 0x0E, 0x10, 0x0E },
470     demux_y = { 0x10, 0x01, 0x10, 0x03,
471                 0x10, 0x05, 0x10, 0x07,
472                 0x10, 0x09, 0x10, 0x0B,
473                 0x10, 0x0D, 0x10, 0x0F };
474
475 /*
476  * this is so I can play live CCIR raw video
477  */
478 static int altivec_uyvy_rgb32(SwsContext *c, const unsigned char **in,
479                               int *instrides, int srcSliceY, int srcSliceH,
480                               unsigned char **oplanes, int *outstrides)
481 {
482     int w = c->srcW;
483     int h = srcSliceH;
484     int i, j;
485     vector unsigned char uyvy;
486     vector signed short Y, U, V;
487     vector signed short R0, G0, B0, R1, G1, B1;
488     vector unsigned char R, G, B;
489     vector unsigned char *out;
490     const ubyte *img;
491
492     img = in[0];
493     out = (vector unsigned char *) (oplanes[0] + srcSliceY * outstrides[0]);
494
495     for (i = 0; i < h; i++)
496         for (j = 0; j < w / 16; j++) {
497             uyvy = vec_ld(0, img);
498
499             U = (vector signed short)
500                     vec_perm(uyvy, (vector unsigned char) { 0 }, demux_u);
501             V = (vector signed short)
502                     vec_perm(uyvy, (vector unsigned char) { 0 }, demux_v);
503             Y = (vector signed short)
504                     vec_perm(uyvy, (vector unsigned char) { 0 }, demux_y);
505
506             cvtyuvtoRGB(c, Y, U, V, &R0, &G0, &B0);
507
508             uyvy = vec_ld(16, img);
509
510             U = (vector signed short)
511                     vec_perm(uyvy, (vector unsigned char) { 0 }, demux_u);
512             V = (vector signed short)
513                     vec_perm(uyvy, (vector unsigned char) { 0 }, demux_v);
514             Y = (vector signed short)
515                     vec_perm(uyvy, (vector unsigned char) { 0 }, demux_y);
516
517             cvtyuvtoRGB(c, Y, U, V, &R1, &G1, &B1);
518
519             R = vec_packclp(R0, R1);
520             G = vec_packclp(G0, G1);
521             B = vec_packclp(B0, B1);
522
523             // vec_mstbgr24 (R,G,B, out);
524             out_rgba(R, G, B, out);
525
526             img += 32;
527         }
528     return srcSliceH;
529 }
530
531 #endif /* HAVE_ALTIVEC */
532
533 /* Ok currently the acceleration routine only supports
534  * inputs of widths a multiple of 16
535  * and heights a multiple 2
536  *
537  * So we just fall back to the C codes for this.
538  */
539 av_cold SwsFunc ff_yuv2rgb_init_ppc(SwsContext *c)
540 {
541 #if HAVE_ALTIVEC
542     if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC))
543         return NULL;
544
545     /*
546      * and this seems not to matter too much I tried a bunch of
547      * videos with abnormal widths and MPlayer crashes elsewhere.
548      * mplayer -vo x11 -rawvideo on:w=350:h=240 raw-350x240.eyuv
549      * boom with X11 bad match.
550      *
551      */
552     if ((c->srcW & 0xf) != 0)
553         return NULL;
554
555     switch (c->srcFormat) {
556     case AV_PIX_FMT_YUV410P:
557     case AV_PIX_FMT_YUV420P:
558     /*case IMGFMT_CLPL:        ??? */
559     case AV_PIX_FMT_GRAY8:
560     case AV_PIX_FMT_NV12:
561     case AV_PIX_FMT_NV21:
562         if ((c->srcH & 0x1) != 0)
563             return NULL;
564
565         switch (c->dstFormat) {
566         case AV_PIX_FMT_RGB24:
567             av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space RGB24\n");
568             return altivec_yuv2_rgb24;
569         case AV_PIX_FMT_BGR24:
570             av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space BGR24\n");
571             return altivec_yuv2_bgr24;
572         case AV_PIX_FMT_ARGB:
573             av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space ARGB\n");
574             return altivec_yuv2_argb;
575         case AV_PIX_FMT_ABGR:
576             av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space ABGR\n");
577             return altivec_yuv2_abgr;
578         case AV_PIX_FMT_RGBA:
579             av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space RGBA\n");
580             return altivec_yuv2_rgba;
581         case AV_PIX_FMT_BGRA:
582             av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space BGRA\n");
583             return altivec_yuv2_bgra;
584         default: return NULL;
585         }
586         break;
587
588     case AV_PIX_FMT_UYVY422:
589         switch (c->dstFormat) {
590         case AV_PIX_FMT_BGR32:
591             av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space UYVY -> RGB32\n");
592             return altivec_uyvy_rgb32;
593         default: return NULL;
594         }
595         break;
596     }
597 #endif /* HAVE_ALTIVEC */
598
599     return NULL;
600 }
601
602 av_cold void ff_yuv2rgb_init_tables_ppc(SwsContext *c,
603                                         const int inv_table[4],
604                                         int brightness,
605                                         int contrast,
606                                         int saturation)
607 {
608 #if HAVE_ALTIVEC
609     union {
610         DECLARE_ALIGNED(16, signed short, tmp)[8];
611         vector signed short vec;
612     } buf;
613
614     if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC))
615         return;
616
617     buf.tmp[0] = ((0xffffLL) * contrast >> 8) >> 9;                               // cy
618     buf.tmp[1] = -256 * brightness;                                               // oy
619     buf.tmp[2] =   (inv_table[0] >> 3) * (contrast >> 16) * (saturation >> 16);   // crv
620     buf.tmp[3] =   (inv_table[1] >> 3) * (contrast >> 16) * (saturation >> 16);   // cbu
621     buf.tmp[4] = -((inv_table[2] >> 1) * (contrast >> 16) * (saturation >> 16));  // cgu
622     buf.tmp[5] = -((inv_table[3] >> 1) * (contrast >> 16) * (saturation >> 16));  // cgv
623
624     c->CSHIFT = (vector unsigned short) vec_splat_u16(2);
625     c->CY     = vec_splat((vector signed short) buf.vec, 0);
626     c->OY     = vec_splat((vector signed short) buf.vec, 1);
627     c->CRV    = vec_splat((vector signed short) buf.vec, 2);
628     c->CBU    = vec_splat((vector signed short) buf.vec, 3);
629     c->CGU    = vec_splat((vector signed short) buf.vec, 4);
630     c->CGV    = vec_splat((vector signed short) buf.vec, 5);
631     return;
632 #endif /* HAVE_ALTIVEC */
633 }
634
635 #if HAVE_ALTIVEC
636
637 static av_always_inline void yuv2packedX_altivec(SwsContext *c,
638                                                  const int16_t *lumFilter,
639                                                  const int16_t **lumSrc,
640                                                  int lumFilterSize,
641                                                  const int16_t *chrFilter,
642                                                  const int16_t **chrUSrc,
643                                                  const int16_t **chrVSrc,
644                                                  int chrFilterSize,
645                                                  const int16_t **alpSrc,
646                                                  uint8_t *dest,
647                                                  int dstW, int dstY,
648                                                  enum AVPixelFormat target)
649 {
650     int i, j;
651     vector signed short X, X0, X1, Y0, U0, V0, Y1, U1, V1, U, V;
652     vector signed short R0, G0, B0, R1, G1, B1;
653
654     vector unsigned char R, G, B;
655     vector unsigned char *out, *nout;
656
657     vector signed short RND   = vec_splat_s16(1 << 3);
658     vector unsigned short SCL = vec_splat_u16(4);
659     DECLARE_ALIGNED(16, unsigned int, scratch)[16];
660
661     vector signed short *YCoeffs, *CCoeffs;
662
663     YCoeffs = c->vYCoeffsBank + dstY * lumFilterSize;
664     CCoeffs = c->vCCoeffsBank + dstY * chrFilterSize;
665
666     out = (vector unsigned char *) dest;
667
668     for (i = 0; i < dstW; i += 16) {
669         Y0 = RND;
670         Y1 = RND;
671         /* extract 16 coeffs from lumSrc */
672         for (j = 0; j < lumFilterSize; j++) {
673             X0 = vec_ld(0, &lumSrc[j][i]);
674             X1 = vec_ld(16, &lumSrc[j][i]);
675             Y0 = vec_mradds(X0, YCoeffs[j], Y0);
676             Y1 = vec_mradds(X1, YCoeffs[j], Y1);
677         }
678
679         U = RND;
680         V = RND;
681         /* extract 8 coeffs from U,V */
682         for (j = 0; j < chrFilterSize; j++) {
683             X = vec_ld(0, &chrUSrc[j][i / 2]);
684             U = vec_mradds(X, CCoeffs[j], U);
685             X = vec_ld(0, &chrVSrc[j][i / 2]);
686             V = vec_mradds(X, CCoeffs[j], V);
687         }
688
689         /* scale and clip signals */
690         Y0 = vec_sra(Y0, SCL);
691         Y1 = vec_sra(Y1, SCL);
692         U  = vec_sra(U, SCL);
693         V  = vec_sra(V, SCL);
694
695         Y0 = vec_clip_s16(Y0);
696         Y1 = vec_clip_s16(Y1);
697         U  = vec_clip_s16(U);
698         V  = vec_clip_s16(V);
699
700         /* now we have
701          * Y0 = y0 y1 y2 y3 y4 y5 y6 y7    Y1 = y8 y9 y10 y11 y12 y13 y14 y15
702          * U  = u0 u1 u2 u3 u4 u5 u6 u7    V  = v0 v1 v2 v3 v4 v5 v6 v7
703          *
704          * Y0 = y0 y1 y2 y3 y4 y5 y6 y7    Y1 = y8 y9 y10 y11 y12 y13 y14 y15
705          * U0 = u0 u0 u1 u1 u2 u2 u3 u3    U1 = u4 u4 u5 u5 u6 u6 u7 u7
706          * V0 = v0 v0 v1 v1 v2 v2 v3 v3    V1 = v4 v4 v5 v5 v6 v6 v7 v7
707          */
708
709         U0 = vec_mergeh(U, U);
710         V0 = vec_mergeh(V, V);
711
712         U1 = vec_mergel(U, U);
713         V1 = vec_mergel(V, V);
714
715         cvtyuvtoRGB(c, Y0, U0, V0, &R0, &G0, &B0);
716         cvtyuvtoRGB(c, Y1, U1, V1, &R1, &G1, &B1);
717
718         R = vec_packclp(R0, R1);
719         G = vec_packclp(G0, G1);
720         B = vec_packclp(B0, B1);
721
722         switch (target) {
723         case AV_PIX_FMT_ABGR:
724             out_abgr(R, G, B, out);
725             break;
726         case AV_PIX_FMT_BGRA:
727             out_bgra(R, G, B, out);
728             break;
729         case AV_PIX_FMT_RGBA:
730             out_rgba(R, G, B, out);
731             break;
732         case AV_PIX_FMT_ARGB:
733             out_argb(R, G, B, out);
734             break;
735         case AV_PIX_FMT_RGB24:
736             out_rgb24(R, G, B, out);
737             break;
738         case AV_PIX_FMT_BGR24:
739             out_bgr24(R, G, B, out);
740             break;
741         default:
742         {
743             /* If this is reached, the caller should have called yuv2packedXinC
744              * instead. */
745             static int printed_error_message;
746             if (!printed_error_message) {
747                 av_log(c, AV_LOG_ERROR,
748                        "altivec_yuv2packedX doesn't support %s output\n",
749                        sws_format_name(c->dstFormat));
750                 printed_error_message = 1;
751             }
752             return;
753         }
754         }
755     }
756
757     if (i < dstW) {
758         i -= 16;
759
760         Y0 = RND;
761         Y1 = RND;
762         /* extract 16 coeffs from lumSrc */
763         for (j = 0; j < lumFilterSize; j++) {
764             X0 = vec_ld(0, &lumSrc[j][i]);
765             X1 = vec_ld(16, &lumSrc[j][i]);
766             Y0 = vec_mradds(X0, YCoeffs[j], Y0);
767             Y1 = vec_mradds(X1, YCoeffs[j], Y1);
768         }
769
770         U = RND;
771         V = RND;
772         /* extract 8 coeffs from U,V */
773         for (j = 0; j < chrFilterSize; j++) {
774             X = vec_ld(0, &chrUSrc[j][i / 2]);
775             U = vec_mradds(X, CCoeffs[j], U);
776             X = vec_ld(0, &chrVSrc[j][i / 2]);
777             V = vec_mradds(X, CCoeffs[j], V);
778         }
779
780         /* scale and clip signals */
781         Y0 = vec_sra(Y0, SCL);
782         Y1 = vec_sra(Y1, SCL);
783         U  = vec_sra(U, SCL);
784         V  = vec_sra(V, SCL);
785
786         Y0 = vec_clip_s16(Y0);
787         Y1 = vec_clip_s16(Y1);
788         U  = vec_clip_s16(U);
789         V  = vec_clip_s16(V);
790
791         /* now we have
792          * Y0 = y0 y1 y2 y3 y4 y5 y6 y7    Y1 = y8 y9 y10 y11 y12 y13 y14 y15
793          * U  = u0 u1 u2 u3 u4 u5 u6 u7    V  = v0 v1 v2 v3 v4 v5 v6 v7
794          *
795          * Y0 = y0 y1 y2 y3 y4 y5 y6 y7    Y1 = y8 y9 y10 y11 y12 y13 y14 y15
796          * U0 = u0 u0 u1 u1 u2 u2 u3 u3    U1 = u4 u4 u5 u5 u6 u6 u7 u7
797          * V0 = v0 v0 v1 v1 v2 v2 v3 v3    V1 = v4 v4 v5 v5 v6 v6 v7 v7
798          */
799
800         U0 = vec_mergeh(U, U);
801         V0 = vec_mergeh(V, V);
802
803         U1 = vec_mergel(U, U);
804         V1 = vec_mergel(V, V);
805
806         cvtyuvtoRGB(c, Y0, U0, V0, &R0, &G0, &B0);
807         cvtyuvtoRGB(c, Y1, U1, V1, &R1, &G1, &B1);
808
809         R = vec_packclp(R0, R1);
810         G = vec_packclp(G0, G1);
811         B = vec_packclp(B0, B1);
812
813         nout = (vector unsigned char *) scratch;
814         switch (target) {
815         case AV_PIX_FMT_ABGR:
816             out_abgr(R, G, B, nout);
817             break;
818         case AV_PIX_FMT_BGRA:
819             out_bgra(R, G, B, nout);
820             break;
821         case AV_PIX_FMT_RGBA:
822             out_rgba(R, G, B, nout);
823             break;
824         case AV_PIX_FMT_ARGB:
825             out_argb(R, G, B, nout);
826             break;
827         case AV_PIX_FMT_RGB24:
828             out_rgb24(R, G, B, nout);
829             break;
830         case AV_PIX_FMT_BGR24:
831             out_bgr24(R, G, B, nout);
832             break;
833         default:
834             /* Unreachable, I think. */
835             av_log(c, AV_LOG_ERROR,
836                    "altivec_yuv2packedX doesn't support %s output\n",
837                    sws_format_name(c->dstFormat));
838             return;
839         }
840
841         memcpy(&((uint32_t *) dest)[i], scratch, (dstW - i) / 4);
842     }
843 }
844
845 #define YUV2PACKEDX_WRAPPER(suffix, pixfmt)                             \
846 void ff_yuv2 ## suffix ## _X_altivec(SwsContext *c,                     \
847                                      const int16_t *lumFilter,          \
848                                      const int16_t **lumSrc,            \
849                                      int lumFilterSize,                 \
850                                      const int16_t *chrFilter,          \
851                                      const int16_t **chrUSrc,           \
852                                      const int16_t **chrVSrc,           \
853                                      int chrFilterSize,                 \
854                                      const int16_t **alpSrc,            \
855                                      uint8_t *dest, int dstW, int dstY) \
856 {                                                                       \
857     yuv2packedX_altivec(c, lumFilter, lumSrc, lumFilterSize,            \
858                         chrFilter, chrUSrc, chrVSrc,                    \
859                         chrFilterSize, alpSrc,                          \
860                         dest, dstW, dstY, pixfmt);                      \
861 }
862
863 YUV2PACKEDX_WRAPPER(abgr,  AV_PIX_FMT_ABGR);
864 YUV2PACKEDX_WRAPPER(bgra,  AV_PIX_FMT_BGRA);
865 YUV2PACKEDX_WRAPPER(argb,  AV_PIX_FMT_ARGB);
866 YUV2PACKEDX_WRAPPER(rgba,  AV_PIX_FMT_RGBA);
867 YUV2PACKEDX_WRAPPER(rgb24, AV_PIX_FMT_RGB24);
868 YUV2PACKEDX_WRAPPER(bgr24, AV_PIX_FMT_BGR24);
869
870 #endif /* HAVE_ALTIVEC */