f6027897b7f66010d10e34ce01b3b8b8dbe3abe4
[ffmpeg.git] / libavcodec / ppc / idct_altivec.c
1 /*
2  * Copyright (c) 2001 Michel Lespinasse
3  *
4  * This file is part of Libav.
5  *
6  * Libav is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * Libav is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with Libav; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 /* NOTE: This code is based on GPL code from the libmpeg2 project.  The
22  * author, Michel Lespinasses, has given explicit permission to release
23  * under LGPL as part of Libav.
24  *
25  * Libav integration by Dieter Shirley
26  *
27  * This file is a direct copy of the AltiVec IDCT module from the libmpeg2
28  * project.  I've deleted all of the libmpeg2-specific code, renamed the
29  * functions and reordered the function parameters.  The only change to the
30  * IDCT function itself was to factor out the partial transposition, and to
31  * perform a full transpose at the end of the function. */
32
33 #include <stdlib.h>
34 #include <string.h>
35 #include "config.h"
36 #if HAVE_ALTIVEC_H
37 #include <altivec.h>
38 #endif
39 #include "libavutil/ppc/types_altivec.h"
40 #include "dsputil_altivec.h"
41
42 #define IDCT_HALF                                       \
43     /* 1st stage */                                     \
44     t1 = vec_mradds (a1, vx7, vx1 );                    \
45     t8 = vec_mradds (a1, vx1, vec_subs (zero, vx7));    \
46     t7 = vec_mradds (a2, vx5, vx3);                     \
47     t3 = vec_mradds (ma2, vx3, vx5);                    \
48                                                         \
49     /* 2nd stage */                                     \
50     t5 = vec_adds (vx0, vx4);                           \
51     t0 = vec_subs (vx0, vx4);                           \
52     t2 = vec_mradds (a0, vx6, vx2);                     \
53     t4 = vec_mradds (a0, vx2, vec_subs (zero, vx6));    \
54     t6 = vec_adds (t8, t3);                             \
55     t3 = vec_subs (t8, t3);                             \
56     t8 = vec_subs (t1, t7);                             \
57     t1 = vec_adds (t1, t7);                             \
58                                                         \
59     /* 3rd stage */                                     \
60     t7 = vec_adds (t5, t2);                             \
61     t2 = vec_subs (t5, t2);                             \
62     t5 = vec_adds (t0, t4);                             \
63     t0 = vec_subs (t0, t4);                             \
64     t4 = vec_subs (t8, t3);                             \
65     t3 = vec_adds (t8, t3);                             \
66                                                         \
67     /* 4th stage */                                     \
68     vy0 = vec_adds (t7, t1);                            \
69     vy7 = vec_subs (t7, t1);                            \
70     vy1 = vec_mradds (c4, t3, t5);                      \
71     vy6 = vec_mradds (mc4, t3, t5);                     \
72     vy2 = vec_mradds (c4, t4, t0);                      \
73     vy5 = vec_mradds (mc4, t4, t0);                     \
74     vy3 = vec_adds (t2, t6);                            \
75     vy4 = vec_subs (t2, t6);
76
77
78 #define IDCT                                                            \
79     vec_s16 vx0, vx1, vx2, vx3, vx4, vx5, vx6, vx7;                \
80     vec_s16 vy0, vy1, vy2, vy3, vy4, vy5, vy6, vy7;                \
81     vec_s16 a0, a1, a2, ma2, c4, mc4, zero, bias;                  \
82     vec_s16 t0, t1, t2, t3, t4, t5, t6, t7, t8;                    \
83     vec_u16 shift;                                                 \
84                                                                         \
85     c4 = vec_splat (constants[0], 0);                                   \
86     a0 = vec_splat (constants[0], 1);                                   \
87     a1 = vec_splat (constants[0], 2);                                   \
88     a2 = vec_splat (constants[0], 3);                                   \
89     mc4 = vec_splat (constants[0], 4);                                  \
90     ma2 = vec_splat (constants[0], 5);                                  \
91     bias = (vec_s16)vec_splat ((vec_s32)constants[0], 3);     \
92                                                                         \
93     zero = vec_splat_s16 (0);                                           \
94     shift = vec_splat_u16 (4);                                          \
95                                                                         \
96     vx0 = vec_mradds (vec_sl (block[0], shift), constants[1], zero);    \
97     vx1 = vec_mradds (vec_sl (block[1], shift), constants[2], zero);    \
98     vx2 = vec_mradds (vec_sl (block[2], shift), constants[3], zero);    \
99     vx3 = vec_mradds (vec_sl (block[3], shift), constants[4], zero);    \
100     vx4 = vec_mradds (vec_sl (block[4], shift), constants[1], zero);    \
101     vx5 = vec_mradds (vec_sl (block[5], shift), constants[4], zero);    \
102     vx6 = vec_mradds (vec_sl (block[6], shift), constants[3], zero);    \
103     vx7 = vec_mradds (vec_sl (block[7], shift), constants[2], zero);    \
104                                                                         \
105     IDCT_HALF                                                           \
106                                                                         \
107     vx0 = vec_mergeh (vy0, vy4);                                        \
108     vx1 = vec_mergel (vy0, vy4);                                        \
109     vx2 = vec_mergeh (vy1, vy5);                                        \
110     vx3 = vec_mergel (vy1, vy5);                                        \
111     vx4 = vec_mergeh (vy2, vy6);                                        \
112     vx5 = vec_mergel (vy2, vy6);                                        \
113     vx6 = vec_mergeh (vy3, vy7);                                        \
114     vx7 = vec_mergel (vy3, vy7);                                        \
115                                                                         \
116     vy0 = vec_mergeh (vx0, vx4);                                        \
117     vy1 = vec_mergel (vx0, vx4);                                        \
118     vy2 = vec_mergeh (vx1, vx5);                                        \
119     vy3 = vec_mergel (vx1, vx5);                                        \
120     vy4 = vec_mergeh (vx2, vx6);                                        \
121     vy5 = vec_mergel (vx2, vx6);                                        \
122     vy6 = vec_mergeh (vx3, vx7);                                        \
123     vy7 = vec_mergel (vx3, vx7);                                        \
124                                                                         \
125     vx0 = vec_adds (vec_mergeh (vy0, vy4), bias);                       \
126     vx1 = vec_mergel (vy0, vy4);                                        \
127     vx2 = vec_mergeh (vy1, vy5);                                        \
128     vx3 = vec_mergel (vy1, vy5);                                        \
129     vx4 = vec_mergeh (vy2, vy6);                                        \
130     vx5 = vec_mergel (vy2, vy6);                                        \
131     vx6 = vec_mergeh (vy3, vy7);                                        \
132     vx7 = vec_mergel (vy3, vy7);                                        \
133                                                                         \
134     IDCT_HALF                                                           \
135                                                                         \
136     shift = vec_splat_u16 (6);                                          \
137     vx0 = vec_sra (vy0, shift);                                         \
138     vx1 = vec_sra (vy1, shift);                                         \
139     vx2 = vec_sra (vy2, shift);                                         \
140     vx3 = vec_sra (vy3, shift);                                         \
141     vx4 = vec_sra (vy4, shift);                                         \
142     vx5 = vec_sra (vy5, shift);                                         \
143     vx6 = vec_sra (vy6, shift);                                         \
144     vx7 = vec_sra (vy7, shift);
145
146
147 static const vec_s16 constants[5] = {
148     {23170, 13573,  6518, 21895, -23170, -21895,    32,    31},
149     {16384, 22725, 21407, 19266,  16384,  19266, 21407, 22725},
150     {22725, 31521, 29692, 26722,  22725,  26722, 29692, 31521},
151     {21407, 29692, 27969, 25172,  21407,  25172, 27969, 29692},
152     {19266, 26722, 25172, 22654,  19266,  22654, 25172, 26722}
153 };
154
155 void ff_idct_put_altivec(uint8_t* dest, int stride, int16_t *blk)
156 {
157     vec_s16 *block = (vec_s16*)blk;
158     vec_u8 tmp;
159
160     IDCT
161
162 #define COPY(dest,src)                                          \
163     tmp = vec_packsu (src, src);                                \
164     vec_ste ((vec_u32)tmp, 0, (unsigned int *)dest);       \
165     vec_ste ((vec_u32)tmp, 4, (unsigned int *)dest);
166
167     COPY (dest, vx0)    dest += stride;
168     COPY (dest, vx1)    dest += stride;
169     COPY (dest, vx2)    dest += stride;
170     COPY (dest, vx3)    dest += stride;
171     COPY (dest, vx4)    dest += stride;
172     COPY (dest, vx5)    dest += stride;
173     COPY (dest, vx6)    dest += stride;
174     COPY (dest, vx7)
175 }
176
177 void ff_idct_add_altivec(uint8_t* dest, int stride, int16_t *blk)
178 {
179     vec_s16 *block = (vec_s16*)blk;
180     vec_u8 tmp;
181     vec_s16 tmp2, tmp3;
182     vec_u8 perm0;
183     vec_u8 perm1;
184     vec_u8 p0, p1, p;
185
186     IDCT
187
188     p0 = vec_lvsl (0, dest);
189     p1 = vec_lvsl (stride, dest);
190     p = vec_splat_u8 (-1);
191     perm0 = vec_mergeh (p, p0);
192     perm1 = vec_mergeh (p, p1);
193
194 #define ADD(dest,src,perm)                                              \
195     /* *(uint64_t *)&tmp = *(uint64_t *)dest; */                        \
196     tmp = vec_ld (0, dest);                                             \
197     tmp2 = (vec_s16)vec_perm (tmp, (vec_u8)zero, perm);       \
198     tmp3 = vec_adds (tmp2, src);                                        \
199     tmp = vec_packsu (tmp3, tmp3);                                      \
200     vec_ste ((vec_u32)tmp, 0, (unsigned int *)dest);               \
201     vec_ste ((vec_u32)tmp, 4, (unsigned int *)dest);
202
203     ADD (dest, vx0, perm0)      dest += stride;
204     ADD (dest, vx1, perm1)      dest += stride;
205     ADD (dest, vx2, perm0)      dest += stride;
206     ADD (dest, vx3, perm1)      dest += stride;
207     ADD (dest, vx4, perm0)      dest += stride;
208     ADD (dest, vx5, perm1)      dest += stride;
209     ADD (dest, vx6, perm0)      dest += stride;
210     ADD (dest, vx7, perm1)
211 }