ppc: dsputil: more K&R formatting cosmetics
[ffmpeg.git] / libavcodec / ppc / idct_altivec.c
1 /*
2  * Copyright (c) 2001 Michel Lespinasse
3  *
4  * This file is part of Libav.
5  *
6  * Libav is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * Libav is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with Libav; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 /* NOTE: This code is based on GPL code from the libmpeg2 project.  The
22  * author, Michel Lespinasses, has given explicit permission to release
23  * under LGPL as part of Libav.
24  *
25  * Libav integration by Dieter Shirley
26  *
27  * This file is a direct copy of the AltiVec IDCT module from the libmpeg2
28  * project.  I've deleted all of the libmpeg2-specific code, renamed the
29  * functions and reordered the function parameters.  The only change to the
30  * IDCT function itself was to factor out the partial transposition, and to
31  * perform a full transpose at the end of the function. */
32
33 #include <stdlib.h>
34 #include <string.h>
35 #include "config.h"
36 #if HAVE_ALTIVEC_H
37 #include <altivec.h>
38 #endif
39
40 #include "libavutil/ppc/types_altivec.h"
41 #include "dsputil_altivec.h"
42
43 #define IDCT_HALF                                       \
44     /* 1st stage */                                     \
45     t1 = vec_mradds(a1, vx7, vx1);                      \
46     t8 = vec_mradds(a1, vx1, vec_subs(zero, vx7));      \
47     t7 = vec_mradds(a2, vx5, vx3);                      \
48     t3 = vec_mradds(ma2, vx3, vx5);                     \
49                                                         \
50     /* 2nd stage */                                     \
51     t5 = vec_adds(vx0, vx4);                            \
52     t0 = vec_subs(vx0, vx4);                            \
53     t2 = vec_mradds(a0, vx6, vx2);                      \
54     t4 = vec_mradds(a0, vx2, vec_subs(zero, vx6));      \
55     t6 = vec_adds(t8, t3);                              \
56     t3 = vec_subs(t8, t3);                              \
57     t8 = vec_subs(t1, t7);                              \
58     t1 = vec_adds(t1, t7);                              \
59                                                         \
60     /* 3rd stage */                                     \
61     t7 = vec_adds(t5, t2);                              \
62     t2 = vec_subs(t5, t2);                              \
63     t5 = vec_adds(t0, t4);                              \
64     t0 = vec_subs(t0, t4);                              \
65     t4 = vec_subs(t8, t3);                              \
66     t3 = vec_adds(t8, t3);                              \
67                                                         \
68     /* 4th stage */                                     \
69     vy0 = vec_adds(t7, t1);                             \
70     vy7 = vec_subs(t7, t1);                             \
71     vy1 = vec_mradds(c4, t3, t5);                       \
72     vy6 = vec_mradds(mc4, t3, t5);                      \
73     vy2 = vec_mradds(c4, t4, t0);                       \
74     vy5 = vec_mradds(mc4, t4, t0);                      \
75     vy3 = vec_adds(t2, t6);                             \
76     vy4 = vec_subs(t2, t6);
77
78 #define IDCT                                                                \
79     vec_s16 vx0, vx1, vx2, vx3, vx4, vx5, vx6, vx7;                         \
80     vec_s16 vy0, vy1, vy2, vy3, vy4, vy5, vy6, vy7;                         \
81     vec_s16 a0, a1, a2, ma2, c4, mc4, zero, bias;                           \
82     vec_s16 t0, t1, t2, t3, t4, t5, t6, t7, t8;                             \
83     vec_u16 shift;                                                          \
84                                                                             \
85     c4   = vec_splat(constants[0], 0);                                      \
86     a0   = vec_splat(constants[0], 1);                                      \
87     a1   = vec_splat(constants[0], 2);                                      \
88     a2   = vec_splat(constants[0], 3);                                      \
89     mc4  = vec_splat(constants[0], 4);                                      \
90     ma2  = vec_splat(constants[0], 5);                                      \
91     bias = (vec_s16) vec_splat((vec_s32) constants[0], 3);                  \
92                                                                             \
93     zero  = vec_splat_s16(0);                                               \
94     shift = vec_splat_u16(4);                                               \
95                                                                             \
96     vx0 = vec_mradds(vec_sl(block[0], shift), constants[1], zero);          \
97     vx1 = vec_mradds(vec_sl(block[1], shift), constants[2], zero);          \
98     vx2 = vec_mradds(vec_sl(block[2], shift), constants[3], zero);          \
99     vx3 = vec_mradds(vec_sl(block[3], shift), constants[4], zero);          \
100     vx4 = vec_mradds(vec_sl(block[4], shift), constants[1], zero);          \
101     vx5 = vec_mradds(vec_sl(block[5], shift), constants[4], zero);          \
102     vx6 = vec_mradds(vec_sl(block[6], shift), constants[3], zero);          \
103     vx7 = vec_mradds(vec_sl(block[7], shift), constants[2], zero);          \
104                                                                             \
105     IDCT_HALF                                                               \
106                                                                             \
107     vx0 = vec_mergeh(vy0, vy4);                                             \
108     vx1 = vec_mergel(vy0, vy4);                                             \
109     vx2 = vec_mergeh(vy1, vy5);                                             \
110     vx3 = vec_mergel(vy1, vy5);                                             \
111     vx4 = vec_mergeh(vy2, vy6);                                             \
112     vx5 = vec_mergel(vy2, vy6);                                             \
113     vx6 = vec_mergeh(vy3, vy7);                                             \
114     vx7 = vec_mergel(vy3, vy7);                                             \
115                                                                             \
116     vy0 = vec_mergeh(vx0, vx4);                                             \
117     vy1 = vec_mergel(vx0, vx4);                                             \
118     vy2 = vec_mergeh(vx1, vx5);                                             \
119     vy3 = vec_mergel(vx1, vx5);                                             \
120     vy4 = vec_mergeh(vx2, vx6);                                             \
121     vy5 = vec_mergel(vx2, vx6);                                             \
122     vy6 = vec_mergeh(vx3, vx7);                                             \
123     vy7 = vec_mergel(vx3, vx7);                                             \
124                                                                             \
125     vx0 = vec_adds(vec_mergeh(vy0, vy4), bias);                             \
126     vx1 = vec_mergel(vy0, vy4);                                             \
127     vx2 = vec_mergeh(vy1, vy5);                                             \
128     vx3 = vec_mergel(vy1, vy5);                                             \
129     vx4 = vec_mergeh(vy2, vy6);                                             \
130     vx5 = vec_mergel(vy2, vy6);                                             \
131     vx6 = vec_mergeh(vy3, vy7);                                             \
132     vx7 = vec_mergel(vy3, vy7);                                             \
133                                                                             \
134     IDCT_HALF                                                               \
135                                                                             \
136     shift = vec_splat_u16(6);                                               \
137     vx0 = vec_sra(vy0, shift);                                              \
138     vx1 = vec_sra(vy1, shift);                                              \
139     vx2 = vec_sra(vy2, shift);                                              \
140     vx3 = vec_sra(vy3, shift);                                              \
141     vx4 = vec_sra(vy4, shift);                                              \
142     vx5 = vec_sra(vy5, shift);                                              \
143     vx6 = vec_sra(vy6, shift);                                              \
144     vx7 = vec_sra(vy7, shift);
145
146 static const vec_s16 constants[5] = {
147     { 23170, 13573,  6518, 21895, -23170, -21895,    32,    31 },
148     { 16384, 22725, 21407, 19266,  16384,  19266, 21407, 22725 },
149     { 22725, 31521, 29692, 26722,  22725,  26722, 29692, 31521 },
150     { 21407, 29692, 27969, 25172,  21407,  25172, 27969, 29692 },
151     { 19266, 26722, 25172, 22654,  19266,  22654, 25172, 26722 }
152 };
153
154 void ff_idct_put_altivec(uint8_t *dest, int stride, int16_t *blk)
155 {
156     vec_s16 *block = (vec_s16 *) blk;
157     vec_u8 tmp;
158
159     IDCT
160
161 #define COPY(dest, src)                                     \
162     tmp = vec_packsu(src, src);                             \
163     vec_ste((vec_u32) tmp, 0, (unsigned int *) dest);       \
164     vec_ste((vec_u32) tmp, 4, (unsigned int *) dest);
165
166     COPY(dest, vx0)
167     dest += stride;
168     COPY(dest, vx1)
169     dest += stride;
170     COPY(dest, vx2)
171     dest += stride;
172     COPY(dest, vx3)
173     dest += stride;
174     COPY(dest, vx4)
175     dest += stride;
176     COPY(dest, vx5)
177     dest += stride;
178     COPY(dest, vx6)
179     dest += stride;
180     COPY(dest, vx7)
181 }
182
183 void ff_idct_add_altivec(uint8_t *dest, int stride, int16_t *blk)
184 {
185     vec_s16 *block = (vec_s16 *) blk;
186     vec_u8 tmp;
187     vec_s16 tmp2, tmp3;
188     vec_u8 perm0;
189     vec_u8 perm1;
190     vec_u8 p0, p1, p;
191
192     IDCT
193
194     p0    = vec_lvsl(0, dest);
195     p1    = vec_lvsl(stride, dest);
196     p     = vec_splat_u8(-1);
197     perm0 = vec_mergeh(p, p0);
198     perm1 = vec_mergeh(p, p1);
199
200 #define ADD(dest, src, perm)                                \
201     /* *(uint64_t *) &tmp = *(uint64_t *) dest; */          \
202     tmp  = vec_ld(0, dest);                                 \
203     tmp2 = (vec_s16) vec_perm(tmp, (vec_u8) zero, perm);    \
204     tmp3 = vec_adds(tmp2, src);                             \
205     tmp  = vec_packsu(tmp3, tmp3);                          \
206     vec_ste((vec_u32) tmp, 0, (unsigned int *) dest);       \
207     vec_ste((vec_u32) tmp, 4, (unsigned int *) dest);
208
209     ADD(dest, vx0, perm0)
210     dest += stride;
211     ADD(dest, vx1, perm1)
212     dest += stride;
213     ADD(dest, vx2, perm0)
214     dest += stride;
215     ADD(dest, vx3, perm1)
216     dest += stride;
217     ADD(dest, vx4, perm0)
218     dest += stride;
219     ADD(dest, vx5, perm1)
220     dest += stride;
221     ADD(dest, vx6, perm0)
222     dest += stride;
223     ADD(dest, vx7, perm1)
224 }