de1da55c2e6a5ea7c0c747a45bf077e371abbda6
[ffmpeg.git] / libavcodec / aarch64 / vp9itxfm_16bpp_neon.S
1 /*
2  * Copyright (c) 2017 Google Inc.
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 #include "libavutil/aarch64/asm.S"
22 #include "neon.S"
23
24 const itxfm4_coeffs, align=4
25         .short  11585, 0, 6270, 15137
26 iadst4_coeffs:
27         .short  5283, 15212, 9929, 13377
28 endconst
29
30 const iadst8_coeffs, align=4
31         .short  16305, 1606, 14449, 7723, 10394, 12665, 4756, 15679
32 idct_coeffs:
33         .short  11585, 0, 6270, 15137, 3196, 16069, 13623, 9102
34         .short  1606, 16305, 12665, 10394, 7723, 14449, 15679, 4756
35         .short  804, 16364, 12140, 11003, 7005, 14811, 15426, 5520
36         .short  3981, 15893, 14053, 8423, 9760, 13160, 16207, 2404
37 endconst
38
39 const iadst16_coeffs, align=4
40         .short  16364, 804, 15893, 3981, 11003, 12140, 8423, 14053
41         .short  14811, 7005, 13160, 9760, 5520, 15426, 2404, 16207
42 endconst
43
44 .macro transpose_4x4s r0, r1, r2, r3, r4, r5, r6, r7
45         trn1            \r4\().4s,  \r0\().4s,  \r1\().4s
46         trn2            \r5\().4s,  \r0\().4s,  \r1\().4s
47         trn1            \r6\().4s,  \r2\().4s,  \r3\().4s
48         trn2            \r7\().4s,  \r2\().4s,  \r3\().4s
49         trn1            \r0\().2d,  \r4\().2d,  \r6\().2d
50         trn2            \r2\().2d,  \r4\().2d,  \r6\().2d
51         trn1            \r1\().2d,  \r5\().2d,  \r7\().2d
52         trn2            \r3\().2d,  \r5\().2d,  \r7\().2d
53 .endm
54
55 // Transpose a 8x8 matrix of 32 bit elements, where each row is spread out
56 // over two registers.
57 .macro transpose_8x8s r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15, t0, t1, t2, t3
58         transpose_4x4s  \r0,  \r2,  \r4,  \r6,  \t0, \t1, \t2, \t3
59         transpose_4x4s  \r9,  \r11, \r13, \r15, \t0, \t1, \t2, \t3
60
61         // Do 4x4 transposes of r1,r3,r5,r7 and r8,r10,r12,r14
62         // while swapping the two 4x4 matrices between each other
63
64         // First step of the 4x4 transpose of r1-r7, into t0-t3
65         trn1            \t0\().4s,  \r1\().4s,  \r3\().4s
66         trn2            \t1\().4s,  \r1\().4s,  \r3\().4s
67         trn1            \t2\().4s,  \r5\().4s,  \r7\().4s
68         trn2            \t3\().4s,  \r5\().4s,  \r7\().4s
69
70         // First step of the 4x4 transpose of r8-r12, into r1-r7
71         trn1            \r1\().4s,  \r8\().4s,  \r10\().4s
72         trn2            \r3\().4s,  \r8\().4s,  \r10\().4s
73         trn1            \r5\().4s,  \r12\().4s, \r14\().4s
74         trn2            \r7\().4s,  \r12\().4s, \r14\().4s
75
76         // Second step of the 4x4 transpose of r1-r7 (now in t0-r3), into r8-r12
77         trn1            \r8\().2d,  \t0\().2d,  \t2\().2d
78         trn2            \r12\().2d, \t0\().2d,  \t2\().2d
79         trn1            \r10\().2d, \t1\().2d,  \t3\().2d
80         trn2            \r14\().2d, \t1\().2d,  \t3\().2d
81
82         // Second step of the 4x4 transpose of r8-r12 (now in r1-r7), in place as far as possible
83         trn1            \t0\().2d,  \r1\().2d,  \r5\().2d
84         trn2            \r5\().2d,  \r1\().2d,  \r5\().2d
85         trn1            \t1\().2d,  \r3\().2d,  \r7\().2d
86         trn2            \r7\().2d,  \r3\().2d,  \r7\().2d
87
88         // Move the outputs of trn1 back in place
89         mov             \r1\().16b,  \t0\().16b
90         mov             \r3\().16b,  \t1\().16b
91 .endm
92
93 // out1 = ((in1 + in2) * d0[0] + (1 << 13)) >> 14
94 // out2 = ((in1 - in2) * d0[0] + (1 << 13)) >> 14
95 // in/out are .4s registers; this can do with 4 temp registers, but is
96 // more efficient if 6 temp registers are available.
97 .macro dmbutterfly0 out1, out2, in1, in2, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, neg=0
98 .if \neg > 0
99         neg             \tmp4\().4s, v0.4s
100 .endif
101         add             \tmp1\().4s, \in1\().4s,  \in2\().4s
102         sub             \tmp2\().4s, \in1\().4s,  \in2\().4s
103 .if \neg > 0
104         smull           \tmp3\().2d, \tmp1\().2s, \tmp4\().s[0]
105         smull2          \tmp4\().2d, \tmp1\().4s, \tmp4\().s[0]
106 .else
107         smull           \tmp3\().2d, \tmp1\().2s, v0.s[0]
108         smull2          \tmp4\().2d, \tmp1\().4s, v0.s[0]
109 .endif
110 .ifb \tmp5
111         rshrn           \out1\().2s, \tmp3\().2d, #14
112         rshrn2          \out1\().4s, \tmp4\().2d, #14
113         smull           \tmp3\().2d, \tmp2\().2s, v0.s[0]
114         smull2          \tmp4\().2d, \tmp2\().4s, v0.s[0]
115         rshrn           \out2\().2s, \tmp3\().2d, #14
116         rshrn2          \out2\().4s, \tmp4\().2d, #14
117 .else
118         smull           \tmp5\().2d, \tmp2\().2s, v0.s[0]
119         smull2          \tmp6\().2d, \tmp2\().4s, v0.s[0]
120         rshrn           \out1\().2s, \tmp3\().2d, #14
121         rshrn2          \out1\().4s, \tmp4\().2d, #14
122         rshrn           \out2\().2s, \tmp5\().2d, #14
123         rshrn2          \out2\().4s, \tmp6\().2d, #14
124 .endif
125 .endm
126
127 // out1,out2 = in1 * coef1 - in2 * coef2
128 // out3,out4 = in1 * coef2 + in2 * coef1
129 // out are 4 x .2d registers, in are 2 x .4s registers
130 .macro dmbutterfly_l out1, out2, out3, out4, in1, in2, coef1, coef2
131         smull           \out1\().2d, \in1\().2s, \coef1
132         smull2          \out2\().2d, \in1\().4s, \coef1
133         smull           \out3\().2d, \in1\().2s, \coef2
134         smull2          \out4\().2d, \in1\().4s, \coef2
135         smlsl           \out1\().2d, \in2\().2s, \coef2
136         smlsl2          \out2\().2d, \in2\().4s, \coef2
137         smlal           \out3\().2d, \in2\().2s, \coef1
138         smlal2          \out4\().2d, \in2\().4s, \coef1
139 .endm
140
141 // inout1 = (inout1 * coef1 - inout2 * coef2 + (1 << 13)) >> 14
142 // inout2 = (inout1 * coef2 + inout2 * coef1 + (1 << 13)) >> 14
143 // inout are 2 x .4s registers
144 .macro dmbutterfly inout1, inout2, coef1, coef2, tmp1, tmp2, tmp3, tmp4, neg=0
145         dmbutterfly_l   \tmp1, \tmp2, \tmp3, \tmp4, \inout1, \inout2, \coef1, \coef2
146 .if \neg > 0
147         neg             \tmp3\().2d, \tmp3\().2d
148         neg             \tmp4\().2d, \tmp4\().2d
149 .endif
150         rshrn           \inout1\().2s, \tmp1\().2d,  #14
151         rshrn2          \inout1\().4s, \tmp2\().2d,  #14
152         rshrn           \inout2\().2s, \tmp3\().2d,  #14
153         rshrn2          \inout2\().4s, \tmp4\().2d,  #14
154 .endm
155
156 // out1 = in1 + in2
157 // out2 = in1 - in2
158 .macro butterfly_4s out1, out2, in1, in2
159         add             \out1\().4s, \in1\().4s, \in2\().4s
160         sub             \out2\().4s, \in1\().4s, \in2\().4s
161 .endm
162
163 // out1 = in1 - in2
164 // out2 = in1 + in2
165 .macro butterfly_4s_r out1, out2, in1, in2
166         sub             \out1\().4s, \in1\().4s, \in2\().4s
167         add             \out2\().4s, \in1\().4s, \in2\().4s
168 .endm
169
170 // out1 = (in1,in2 + in3,in4 + (1 << 13)) >> 14
171 // out2 = (in1,in2 - in3,in4 + (1 << 13)) >> 14
172 // out are 2 x .4s registers, in are 4 x .2d registers
173 .macro dbutterfly_n out1, out2, in1, in2, in3, in4, tmp1, tmp2, tmp3, tmp4
174         add             \tmp1\().2d, \in1\().2d, \in3\().2d
175         add             \tmp2\().2d, \in2\().2d, \in4\().2d
176         sub             \tmp3\().2d, \in1\().2d, \in3\().2d
177         sub             \tmp4\().2d, \in2\().2d, \in4\().2d
178         rshrn           \out1\().2s, \tmp1\().2d,  #14
179         rshrn2          \out1\().4s, \tmp2\().2d,  #14
180         rshrn           \out2\().2s, \tmp3\().2d,  #14
181         rshrn2          \out2\().4s, \tmp4\().2d,  #14
182 .endm
183
184 .macro iwht4_10 c0, c1, c2, c3
185         add             \c0\().4s, \c0\().4s, \c1\().4s
186         sub             v17.4s,    \c2\().4s, \c3\().4s
187         sub             v16.4s,    \c0\().4s, v17.4s
188         sshr            v16.4s,    v16.4s,    #1
189         sub             \c2\().4s, v16.4s,    \c1\().4s
190         sub             \c1\().4s, v16.4s,    \c3\().4s
191         add             \c3\().4s, v17.4s,    \c2\().4s
192         sub             \c0\().4s, \c0\().4s, \c1\().4s
193 .endm
194
195 .macro iwht4_12 c0, c1, c2, c3
196         iwht4_10        \c0, \c1, \c2, \c3
197 .endm
198
199 .macro idct4_10 c0, c1, c2, c3
200         mul             v22.4s,    \c1\().4s, v0.s[3]
201         mul             v20.4s,    \c1\().4s, v0.s[2]
202         add             v16.4s,    \c0\().4s, \c2\().4s
203         sub             v17.4s,    \c0\().4s, \c2\().4s
204         mla             v22.4s,    \c3\().4s, v0.s[2]
205         mul             v18.4s,    v16.4s,    v0.s[0]
206         mul             v24.4s,    v17.4s,    v0.s[0]
207         mls             v20.4s,    \c3\().4s, v0.s[3]
208         srshr           v22.4s,    v22.4s,    #14
209         srshr           v18.4s,    v18.4s,    #14
210         srshr           v24.4s,    v24.4s,    #14
211         srshr           v20.4s,    v20.4s,    #14
212         add             \c0\().4s, v18.4s,    v22.4s
213         sub             \c3\().4s, v18.4s,    v22.4s
214         add             \c1\().4s, v24.4s,    v20.4s
215         sub             \c2\().4s, v24.4s,    v20.4s
216 .endm
217
218 .macro idct4_12 c0, c1, c2, c3
219         smull           v22.2d,    \c1\().2s, v0.s[3]
220         smull2          v23.2d,    \c1\().4s, v0.s[3]
221         smull           v20.2d,    \c1\().2s, v0.s[2]
222         smull2          v21.2d,    \c1\().4s, v0.s[2]
223         add             v16.4s,    \c0\().4s, \c2\().4s
224         sub             v17.4s,    \c0\().4s, \c2\().4s
225         smlal           v22.2d,    \c3\().2s, v0.s[2]
226         smlal2          v23.2d,    \c3\().4s, v0.s[2]
227         smull           v18.2d,    v16.2s,    v0.s[0]
228         smull2          v19.2d,    v16.4s,    v0.s[0]
229         smull           v24.2d,    v17.2s,    v0.s[0]
230         smull2          v25.2d,    v17.4s,    v0.s[0]
231         smlsl           v20.2d,    \c3\().2s, v0.s[3]
232         smlsl2          v21.2d,    \c3\().4s, v0.s[3]
233         rshrn           v22.2s,    v22.2d,    #14
234         rshrn2          v22.4s,    v23.2d,    #14
235         rshrn           v18.2s,    v18.2d,    #14
236         rshrn2          v18.4s,    v19.2d,    #14
237         rshrn           v24.2s,    v24.2d,    #14
238         rshrn2          v24.4s,    v25.2d,    #14
239         rshrn           v20.2s,    v20.2d,    #14
240         rshrn2          v20.4s,    v21.2d,    #14
241         add             \c0\().4s, v18.4s,    v22.4s
242         sub             \c3\().4s, v18.4s,    v22.4s
243         add             \c1\().4s, v24.4s,    v20.4s
244         sub             \c2\().4s, v24.4s,    v20.4s
245 .endm
246
247 .macro iadst4_10 c0, c1, c2, c3
248         mul             v16.4s,    \c0\().4s, v1.s[0]
249         mla             v16.4s,    \c2\().4s, v1.s[1]
250         mla             v16.4s,    \c3\().4s, v1.s[2]
251         mul             v18.4s,    \c0\().4s, v1.s[2]
252         mls             v18.4s,    \c2\().4s, v1.s[0]
253         sub             \c0\().4s, \c0\().4s, \c2\().4s
254         mls             v18.4s,    \c3\().4s, v1.s[1]
255         add             \c0\().4s, \c0\().4s, \c3\().4s
256         mul             v22.4s,    \c1\().4s, v1.s[3]
257         mul             v20.4s,    \c0\().4s, v1.s[3]
258         add             v24.4s,    v16.4s,    v22.4s
259         add             v26.4s,    v18.4s,    v22.4s
260         srshr           \c0\().4s, v24.4s,    #14
261         add             v16.4s,    v16.4s,    v18.4s
262         srshr           \c1\().4s, v26.4s,    #14
263         sub             v16.4s,    v16.4s,    v22.4s
264         srshr           \c2\().4s, v20.4s,    #14
265         srshr           \c3\().4s, v16.4s,    #14
266 .endm
267
268 .macro iadst4_12 c0, c1, c2, c3
269         smull           v16.2d,    \c0\().2s, v1.s[0]
270         smull2          v17.2d,    \c0\().4s, v1.s[0]
271         smlal           v16.2d,    \c2\().2s, v1.s[1]
272         smlal2          v17.2d,    \c2\().4s, v1.s[1]
273         smlal           v16.2d,    \c3\().2s, v1.s[2]
274         smlal2          v17.2d,    \c3\().4s, v1.s[2]
275         smull           v18.2d,    \c0\().2s, v1.s[2]
276         smull2          v19.2d,    \c0\().4s, v1.s[2]
277         smlsl           v18.2d,    \c2\().2s, v1.s[0]
278         smlsl2          v19.2d,    \c2\().4s, v1.s[0]
279         sub             \c0\().4s, \c0\().4s, \c2\().4s
280         smlsl           v18.2d,    \c3\().2s, v1.s[1]
281         smlsl2          v19.2d,    \c3\().4s, v1.s[1]
282         add             \c0\().4s, \c0\().4s, \c3\().4s
283         smull           v22.2d,    \c1\().2s, v1.s[3]
284         smull2          v23.2d,    \c1\().4s, v1.s[3]
285         smull           v20.2d,    \c0\().2s, v1.s[3]
286         smull2          v21.2d,    \c0\().4s, v1.s[3]
287         add             v24.2d,    v16.2d,    v22.2d
288         add             v25.2d,    v17.2d,    v23.2d
289         add             v26.2d,    v18.2d,    v22.2d
290         add             v27.2d,    v19.2d,    v23.2d
291         rshrn           \c0\().2s, v24.2d,    #14
292         rshrn2          \c0\().4s, v25.2d,    #14
293         add             v16.2d,    v16.2d,    v18.2d
294         add             v17.2d,    v17.2d,    v19.2d
295         rshrn           \c1\().2s, v26.2d,    #14
296         rshrn2          \c1\().4s, v27.2d,    #14
297         sub             v16.2d,    v16.2d,    v22.2d
298         sub             v17.2d,    v17.2d,    v23.2d
299         rshrn           \c2\().2s, v20.2d,    #14
300         rshrn2          \c2\().4s, v21.2d,    #14
301         rshrn           \c3\().2s, v16.2d,    #14
302         rshrn2          \c3\().4s, v17.2d,    #14
303 .endm
304
305 // The public functions in this file have got the following signature:
306 // void itxfm_add(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
307
308 .macro itxfm_func4x4 txfm1, txfm2, bpp
309 function ff_vp9_\txfm1\()_\txfm2\()_4x4_add_\bpp\()_neon, export=1
310 .ifc \txfm1,\txfm2
311 .ifc \txfm1,idct
312         movrel          x4,  itxfm4_coeffs
313         ld1             {v0.4h}, [x4]
314         sxtl            v0.4s,  v0.4h
315 .endif
316 .ifc \txfm1,iadst
317         movrel          x4,  iadst4_coeffs
318         ld1             {v0.d}[1], [x4]
319         sxtl2           v1.4s,  v0.8h
320 .endif
321 .else
322         movrel          x4,  itxfm4_coeffs
323         ld1             {v0.8h}, [x4]
324         sxtl2           v1.4s,  v0.8h
325         sxtl            v0.4s,  v0.4h
326 .endif
327
328         movi            v30.4s, #0
329         movi            v31.4s, #0
330 .ifc \txfm1\()_\txfm2,idct_idct
331         cmp             w3,  #1
332         b.ne            1f
333         // DC-only for idct/idct
334         ld1             {v2.s}[0],  [x2]
335         smull           v2.2d,  v2.2s, v0.s[0]
336         rshrn           v2.2s,  v2.2d, #14
337         smull           v2.2d,  v2.2s, v0.s[0]
338         rshrn           v2.2s,  v2.2d, #14
339         st1             {v31.s}[0], [x2]
340         dup             v4.4s,  v2.s[0]
341         mov             v5.16b, v4.16b
342         mov             v6.16b, v4.16b
343         mov             v7.16b, v4.16b
344         b               2f
345 .endif
346
347 1:
348         ld1             {v4.4s,v5.4s,v6.4s,v7.4s},  [x2]
349         st1             {v30.4s,v31.4s}, [x2], #32
350
351 .ifc \txfm1,iwht
352         sshr            v4.4s,  v4.4s,  #2
353         sshr            v5.4s,  v5.4s,  #2
354         sshr            v6.4s,  v6.4s,  #2
355         sshr            v7.4s,  v7.4s,  #2
356 .endif
357
358         \txfm1\()4_\bpp v4,  v5,  v6,  v7
359
360         st1             {v30.4s,v31.4s}, [x2], #32
361         // Transpose 4x4 with 32 bit elements
362         transpose_4x4s  v4,  v5,  v6,  v7,  v16, v17, v18, v19
363
364         \txfm2\()4_\bpp v4,  v5,  v6,  v7
365 2:
366         mvni            v31.8h, #((0xff << (\bpp - 8)) & 0xff), lsl #8
367         ld1             {v0.4h},   [x0], x1
368         ld1             {v1.4h},   [x0], x1
369 .ifnc \txfm1,iwht
370         srshr           v4.4s,  v4.4s,  #4
371         srshr           v5.4s,  v5.4s,  #4
372         srshr           v6.4s,  v6.4s,  #4
373         srshr           v7.4s,  v7.4s,  #4
374 .endif
375         uaddw           v4.4s,  v4.4s,  v0.4h
376         uaddw           v5.4s,  v5.4s,  v1.4h
377         ld1             {v2.4h},   [x0], x1
378         ld1             {v3.4h},   [x0], x1
379         sqxtun          v0.4h,  v4.4s
380         sqxtun2         v0.8h,  v5.4s
381         sub             x0,  x0,  x1, lsl #2
382
383         uaddw           v6.4s,  v6.4s,  v2.4h
384         umin            v0.8h,  v0.8h,  v31.8h
385         uaddw           v7.4s,  v7.4s,  v3.4h
386         st1             {v0.4h},   [x0], x1
387         sqxtun          v2.4h,  v6.4s
388         sqxtun2         v2.8h,  v7.4s
389         umin            v2.8h,  v2.8h,  v31.8h
390
391         st1             {v0.d}[1], [x0], x1
392         st1             {v2.4h},   [x0], x1
393         st1             {v2.d}[1], [x0], x1
394
395         ret
396 endfunc
397 .endm
398
399 .macro itxfm_funcs4x4 bpp
400 itxfm_func4x4 idct,  idct,  \bpp
401 itxfm_func4x4 iadst, idct,  \bpp
402 itxfm_func4x4 idct,  iadst, \bpp
403 itxfm_func4x4 iadst, iadst, \bpp
404 itxfm_func4x4 iwht,  iwht,  \bpp
405 .endm
406
407 itxfm_funcs4x4 10
408 itxfm_funcs4x4 12
409
410 function idct8x8_dc_add_neon
411         movrel          x4,  idct_coeffs
412         ld1             {v0.4h}, [x4]
413
414         movi            v1.4h,  #0
415         sxtl            v0.4s,  v0.4h
416
417         ld1             {v2.s}[0],  [x2]
418         smull           v2.2d,  v2.2s,  v0.s[0]
419         rshrn           v2.2s,  v2.2d,  #14
420         smull           v2.2d,  v2.2s,  v0.s[0]
421         rshrn           v2.2s,  v2.2d,  #14
422         st1             {v1.s}[0],  [x2]
423         dup             v2.4s,  v2.s[0]
424
425         srshr           v2.4s,  v2.4s,  #5
426
427         mov             x4,  #8
428         mov             x3,  x0
429         dup             v31.8h, w5
430 1:
431         // Loop to add the constant from v2 into all 8x8 outputs
432         subs            x4,  x4,  #2
433         ld1             {v3.8h},  [x0], x1
434         ld1             {v4.8h},  [x0], x1
435         uaddw           v16.4s, v2.4s,  v3.4h
436         uaddw2          v17.4s, v2.4s,  v3.8h
437         uaddw           v18.4s, v2.4s,  v4.4h
438         uaddw2          v19.4s, v2.4s,  v4.8h
439         sqxtun          v3.4h,  v16.4s
440         sqxtun2         v3.8h,  v17.4s
441         sqxtun          v4.4h,  v18.4s
442         sqxtun2         v4.8h,  v19.4s
443         umin            v3.8h,  v3.8h,  v31.8h
444         umin            v4.8h,  v4.8h,  v31.8h
445         st1             {v3.8h},  [x3], x1
446         st1             {v4.8h},  [x3], x1
447         b.ne            1b
448
449         ret
450 endfunc
451
452 .macro idct8 r0, r1, r2, r3, r4, r5, r6, r7, t0, t1, t2, t3, t4, t5
453         dmbutterfly0    \r0, \r4, \r0, \r4, \t0, \t1, \t2, \t3, \t4, \t5 // r0 = t0a, r4 = t1a
454         dmbutterfly     \r2, \r6, v0.s[2], v0.s[3], \t0, \t1, \t2, \t3   // r2 = t2a, r6 = t3a
455         dmbutterfly     \r1, \r7, v1.s[0], v1.s[1], \t0, \t1, \t2, \t3   // r1 = t4a, r7 = t7a
456         dmbutterfly     \r5, \r3, v1.s[2], v1.s[3], \t0, \t1, \t2, \t3   // r5 = t5a, r3 = t6a
457
458         butterfly_4s    \t0, \t1, \r0, \r6 // t0 = t0, t1 = t3
459         butterfly_4s    \t2, \r5, \r1, \r5 // t2 = t4, r5 = t5a
460         butterfly_4s    \t3, \r6, \r7, \r3 // t3 = t7, r6 = t6a
461         butterfly_4s    \r7, \r4, \r4, \r2 // r7 = t1, r4 = t2
462
463         dmbutterfly0    \r6, \r5, \r6, \r5, \r0, \r1, \r2, \r3, \t4, \t5 // r6 = t6, r5 = t5
464
465         butterfly_4s    \r1, \r6, \r7, \r6 // r1 = out[1], r6 = out[6]
466         butterfly_4s    \r0, \r7, \t0, \t3 // r0 = out[0], r7 = out[7]
467         butterfly_4s    \r2, \r5, \r4, \r5 // r2 = out[2], r5 = out[5]
468         butterfly_4s    \r3, \r4, \t1, \t2 // r3 = out[3], r4 = out[4]
469 .endm
470
471 .macro iadst8 r0, r1, r2, r3, r4, r5, r6, r7, t0, t1, t2, t3, t4, t5
472         dmbutterfly_l   \t2, \t3, \t0, \t1, \r7, \r0, v2.s[1], v2.s[0]   // t2,t3 = t1a, t0,t1 = t0a
473         dmbutterfly_l   \r0, \r7, \t4, \t5, \r3, \r4, v3.s[1], v3.s[0]   // r0,r7 = t5a, t4,t5 = t4a
474
475         dbutterfly_n    \r3, \t0, \t0, \t1, \t4, \t5, \r3, \r4, \t0, \t1 // r3 = t0, t0 = t4
476         dbutterfly_n    \r4, \t1, \t2, \t3, \r0, \r7, \r4, \t1, \t4, \t5 // r4 = t1, t1 = t5
477
478         dmbutterfly_l   \t4, \t5, \t2, \t3, \r5, \r2, v2.s[3], v2.s[2]   // t4,t5 = t3a, t2,t3 = t2a
479         dmbutterfly_l   \r2, \r5, \r0, \r7, \r1, \r6, v3.s[3], v3.s[2]   // r2,r5 = t7a, r0,r7 = t6a
480
481         dbutterfly_n    \r1, \t2, \t2, \t3, \r0, \r7, \r1, \r6, \t2, \t3 // r1 = t2, t2 = t6
482         dbutterfly_n    \r0, \t4, \t4, \t5, \r2, \r5, \r0, \r7, \t4, \t5 // r0 = t3, t4 = t7
483
484         butterfly_4s    \r7, \r4, \r4, \r0   // r7 = -out[7], r4 = t3
485         neg             \r7\().4s, \r7\().4s // r7 = out[7]
486         butterfly_4s    \r0, \r1, \r3, \r1   // r0 = out[0],  r1 = t2
487
488         dmbutterfly_l   \r2, \r3, \t3, \t5, \t0, \t1, v0.s[2], v0.s[3]   // r2,r3 = t5a, t3,t5 = t4a
489         dmbutterfly_l   \t0, \t1, \r5, \r6, \t4, \t2, v0.s[3], v0.s[2]   // t0,t1 = t6a, r5,r6 = t7a
490
491         dbutterfly_n    \r6, \t2, \r2, \r3, \r5, \r6, \t2, \t4, \r2, \r3 // r6 = out[6],  t2 = t7
492
493         dmbutterfly0    \r3, \r4, \r1, \r4, \t4, \r5, \r1, \r2           // r3 = -out[3], r4 = out[4]
494         neg             \r3\().4s, \r3\().4s  // r3 = out[3]
495
496         dbutterfly_n    \r1, \t0, \t3, \t5, \t0, \t1, \r1, \r2, \t0, \t1 // r1 = -out[1], t0 = t6
497         neg             \r1\().4s, \r1\().4s  // r1 = out[1]
498
499         dmbutterfly0    \r2, \r5, \t0, \t2, \t1, \t3, \t4, \t5           // r2 = out[2],  r5 = -out[5]
500         neg             \r5\().4s, \r5\().4s  // r5 = out[5]
501 .endm
502
503
504 .macro itxfm_func8x8 txfm1, txfm2
505 function vp9_\txfm1\()_\txfm2\()_8x8_add_16_neon
506 .ifc \txfm1\()_\txfm2,idct_idct
507         cmp             w3,  #1
508         b.eq            idct8x8_dc_add_neon
509 .endif
510         // The iadst also uses a few coefficients from
511         // idct, so those always need to be loaded.
512 .ifc \txfm1\()_\txfm2,idct_idct
513         movrel          x4,  idct_coeffs
514 .else
515         movrel          x4,  iadst8_coeffs
516         ld1             {v1.8h}, [x4], #16
517         stp             d8,  d9,  [sp, #-0x10]!
518         sxtl2           v3.4s,  v1.8h
519         sxtl            v2.4s,  v1.4h
520 .endif
521         ld1             {v0.8h}, [x4]
522         sxtl2           v1.4s,  v0.8h
523         sxtl            v0.4s,  v0.4h
524
525         movi            v4.4s, #0
526         movi            v5.4s, #0
527         movi            v6.4s, #0
528         movi            v7.4s, #0
529
530 1:
531         ld1             {v16.4s,v17.4s,v18.4s,v19.4s},  [x2], #64
532         ld1             {v20.4s,v21.4s,v22.4s,v23.4s},  [x2], #64
533         ld1             {v24.4s,v25.4s,v26.4s,v27.4s},  [x2], #64
534         ld1             {v28.4s,v29.4s,v30.4s,v31.4s},  [x2], #64
535         sub             x2,  x2,  #256
536         st1             {v4.4s,v5.4s,v6.4s,v7.4s},      [x2], #64
537         st1             {v4.4s,v5.4s,v6.4s,v7.4s},      [x2], #64
538         st1             {v4.4s,v5.4s,v6.4s,v7.4s},      [x2], #64
539         st1             {v4.4s,v5.4s,v6.4s,v7.4s},      [x2], #64
540
541 .ifc \txfm1\()_\txfm2,idct_idct
542         idct8           v16, v18, v20, v22, v24, v26, v28, v30, v2,  v3,  v4,  v5,  v6,  v7
543         idct8           v17, v19, v21, v23, v25, v27, v29, v31, v2,  v3,  v4,  v5,  v6,  v7
544 .else
545         \txfm1\()8      v16, v18, v20, v22, v24, v26, v28, v30, v4,  v5,  v6,  v7,  v8,  v9
546         \txfm1\()8      v17, v19, v21, v23, v25, v27, v29, v31, v4,  v5,  v6,  v7,  v8,  v9
547 .endif
548
549         // Transpose 8x8 with 16 bit elements
550         transpose_8x8s  v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v4, v5, v6, v7
551
552 .ifc \txfm1\()_\txfm2,idct_idct
553         idct8           v16, v18, v20, v22, v24, v26, v28, v30, v2,  v3,  v4,  v5,  v6,  v7
554         idct8           v17, v19, v21, v23, v25, v27, v29, v31, v2,  v3,  v4,  v5,  v6,  v7
555 .else
556         \txfm2\()8      v16, v18, v20, v22, v24, v26, v28, v30, v4,  v5,  v6,  v7,  v8,  v9
557         \txfm2\()8      v17, v19, v21, v23, v25, v27, v29, v31, v4,  v5,  v6,  v7,  v8,  v9
558 .endif
559 2:
560         mov             x3,  x0
561         // Add into the destination
562         ld1             {v0.8h},  [x0], x1
563         srshr           v16.4s, v16.4s, #5
564         srshr           v17.4s, v17.4s, #5
565         ld1             {v1.8h},  [x0], x1
566         srshr           v18.4s, v18.4s, #5
567         srshr           v19.4s, v19.4s, #5
568         ld1             {v2.8h},  [x0], x1
569         srshr           v20.4s, v20.4s, #5
570         srshr           v21.4s, v21.4s, #5
571         uaddw           v16.4s, v16.4s, v0.4h
572         uaddw2          v17.4s, v17.4s, v0.8h
573         ld1             {v3.8h},  [x0], x1
574         srshr           v22.4s, v22.4s, #5
575         srshr           v23.4s, v23.4s, #5
576         uaddw           v18.4s, v18.4s, v1.4h
577         uaddw2          v19.4s, v19.4s, v1.8h
578         ld1             {v4.8h},  [x0], x1
579         srshr           v24.4s, v24.4s, #5
580         srshr           v25.4s, v25.4s, #5
581         uaddw           v20.4s, v20.4s, v2.4h
582         uaddw2          v21.4s, v21.4s, v2.8h
583         sqxtun          v0.4h,  v16.4s
584         sqxtun2         v0.8h,  v17.4s
585         dup             v16.8h, w5
586         ld1             {v5.8h},  [x0], x1
587         srshr           v26.4s, v26.4s, #5
588         srshr           v27.4s, v27.4s, #5
589         uaddw           v22.4s, v22.4s, v3.4h
590         uaddw2          v23.4s, v23.4s, v3.8h
591         sqxtun          v1.4h,  v18.4s
592         sqxtun2         v1.8h,  v19.4s
593         umin            v0.8h,  v0.8h,  v16.8h
594         ld1             {v6.8h},  [x0], x1
595         srshr           v28.4s, v28.4s, #5
596         srshr           v29.4s, v29.4s, #5
597         uaddw           v24.4s, v24.4s, v4.4h
598         uaddw2          v25.4s, v25.4s, v4.8h
599         sqxtun          v2.4h,  v20.4s
600         sqxtun2         v2.8h,  v21.4s
601         umin            v1.8h,  v1.8h,  v16.8h
602         ld1             {v7.8h},  [x0], x1
603         srshr           v30.4s, v30.4s, #5
604         srshr           v31.4s, v31.4s, #5
605         uaddw           v26.4s, v26.4s, v5.4h
606         uaddw2          v27.4s, v27.4s, v5.8h
607         sqxtun          v3.4h,  v22.4s
608         sqxtun2         v3.8h,  v23.4s
609         umin            v2.8h,  v2.8h,  v16.8h
610
611         st1             {v0.8h},  [x3], x1
612         uaddw           v28.4s, v28.4s, v6.4h
613         uaddw2          v29.4s, v29.4s, v6.8h
614         st1             {v1.8h},  [x3], x1
615         sqxtun          v4.4h,  v24.4s
616         sqxtun2         v4.8h,  v25.4s
617         umin            v3.8h,  v3.8h,  v16.8h
618         st1             {v2.8h},  [x3], x1
619         uaddw           v30.4s, v30.4s, v7.4h
620         uaddw2          v31.4s, v31.4s, v7.8h
621         st1             {v3.8h},  [x3], x1
622         sqxtun          v5.4h,  v26.4s
623         sqxtun2         v5.8h,  v27.4s
624         umin            v4.8h,  v4.8h,  v16.8h
625         st1             {v4.8h},  [x3], x1
626         sqxtun          v6.4h,  v28.4s
627         sqxtun2         v6.8h,  v29.4s
628         umin            v5.8h,  v5.8h,  v16.8h
629         st1             {v5.8h},  [x3], x1
630         sqxtun          v7.4h,  v30.4s
631         sqxtun2         v7.8h,  v31.4s
632         umin            v6.8h,  v6.8h,  v16.8h
633
634         st1             {v6.8h},  [x3], x1
635         umin            v7.8h,  v7.8h,  v16.8h
636         st1             {v7.8h},  [x3], x1
637
638 .ifnc \txfm1\()_\txfm2,idct_idct
639         ldp             d8,  d9,  [sp], 0x10
640 .endif
641         ret
642 endfunc
643
644 function ff_vp9_\txfm1\()_\txfm2\()_8x8_add_10_neon, export=1
645         mov             x5,  #0x03ff
646         b               vp9_\txfm1\()_\txfm2\()_8x8_add_16_neon
647 endfunc
648
649 function ff_vp9_\txfm1\()_\txfm2\()_8x8_add_12_neon, export=1
650         mov             x5,  #0x0fff
651         b               vp9_\txfm1\()_\txfm2\()_8x8_add_16_neon
652 endfunc
653 .endm
654
655 itxfm_func8x8 idct,  idct
656 itxfm_func8x8 iadst, idct
657 itxfm_func8x8 idct,  iadst
658 itxfm_func8x8 iadst, iadst
659
660
661 function idct16x16_dc_add_neon
662         movrel          x4,  idct_coeffs
663         ld1             {v0.4h}, [x4]
664         sxtl            v0.4s,  v0.4h
665
666         movi            v1.4h,  #0
667
668         ld1             {v2.s}[0],  [x2]
669         smull           v2.2d,  v2.2s,  v0.s[0]
670         rshrn           v2.2s,  v2.2d,  #14
671         smull           v2.2d,  v2.2s,  v0.s[0]
672         rshrn           v2.2s,  v2.2d,  #14
673         st1             {v1.s}[0],  [x2]
674         dup             v2.4s,  v2.s[0]
675
676         srshr           v0.4s,  v2.4s,  #6
677
678         mov             x3, x0
679         mov             x4, #16
680         dup             v31.8h, w13
681 1:
682         // Loop to add the constant from v2 into all 16x16 outputs
683         subs            x4,  x4,  #2
684         ld1             {v1.8h,v2.8h},  [x0], x1
685         uaddw           v16.4s, v0.4s,  v1.4h
686         uaddw2          v17.4s, v0.4s,  v1.8h
687         ld1             {v3.8h,v4.8h},  [x0], x1
688         uaddw           v18.4s, v0.4s,  v2.4h
689         uaddw2          v19.4s, v0.4s,  v2.8h
690         uaddw           v20.4s, v0.4s,  v3.4h
691         uaddw2          v21.4s, v0.4s,  v3.8h
692         uaddw           v22.4s, v0.4s,  v4.4h
693         uaddw2          v23.4s, v0.4s,  v4.8h
694         sqxtun          v1.4h,  v16.4s
695         sqxtun2         v1.8h,  v17.4s
696         sqxtun          v2.4h,  v18.4s
697         sqxtun2         v2.8h,  v19.4s
698         sqxtun          v3.4h,  v20.4s
699         sqxtun2         v3.8h,  v21.4s
700         sqxtun          v4.4h,  v22.4s
701         sqxtun2         v4.8h,  v23.4s
702         umin            v1.8h,  v1.8h,  v31.8h
703         umin            v2.8h,  v2.8h,  v31.8h
704         st1             {v1.8h,v2.8h},  [x3], x1
705         umin            v3.8h,  v3.8h,  v31.8h
706         umin            v4.8h,  v4.8h,  v31.8h
707         st1             {v3.8h,v4.8h},  [x3], x1
708         b.ne            1b
709
710         ret
711 endfunc
712
713 function idct16
714         dmbutterfly0    v16, v24, v16, v24, v4, v5, v6, v7, v8, v9 // v16 = t0a,  v24 = t1a
715         dmbutterfly     v20, v28, v0.s[2], v0.s[3], v4, v5, v6, v7 // v20 = t2a,  v28 = t3a
716         dmbutterfly     v18, v30, v1.s[0], v1.s[1], v4, v5, v6, v7 // v18 = t4a,  v30 = t7a
717         dmbutterfly     v26, v22, v1.s[2], v1.s[3], v4, v5, v6, v7 // v26 = t5a,  v22 = t6a
718         dmbutterfly     v17, v31, v2.s[0], v2.s[1], v4, v5, v6, v7 // v17 = t8a,  v31 = t15a
719         dmbutterfly     v25, v23, v2.s[2], v2.s[3], v4, v5, v6, v7 // v25 = t9a,  v23 = t14a
720         dmbutterfly     v21, v27, v3.s[0], v3.s[1], v4, v5, v6, v7 // v21 = t10a, v27 = t13a
721         dmbutterfly     v29, v19, v3.s[2], v3.s[3], v4, v5, v6, v7 // v29 = t11a, v19 = t12a
722
723         butterfly_4s    v4,  v28, v16, v28               // v4  = t0,   v28 = t3
724         butterfly_4s    v5,  v20, v24, v20               // v5  = t1,   v20 = t2
725         butterfly_4s    v6,  v26, v18, v26               // v6  = t4,   v26 = t5
726         butterfly_4s    v7,  v22, v30, v22               // v7  = t7,   v22 = t6
727         butterfly_4s    v16, v25, v17, v25               // v16 = t8,   v25 = t9
728         butterfly_4s    v24, v21, v29, v21               // v24 = t11,  v21 = t10
729         butterfly_4s    v17, v27, v19, v27               // v17 = t12,  v27 = t13
730         butterfly_4s    v29, v23, v31, v23               // v29 = t15,  v23 = t14
731
732         dmbutterfly0    v22, v26, v22, v26, v8, v9, v18, v19, v30, v31        // v22 = t6a,  v26 = t5a
733         dmbutterfly     v23, v25, v0.s[2], v0.s[3], v18, v19, v30, v31        // v23 = t9a,  v25 = t14a
734         dmbutterfly     v27, v21, v0.s[2], v0.s[3], v18, v19, v30, v31, neg=1 // v27 = t13a, v21 = t10a
735
736         butterfly_4s    v18, v7,  v4,  v7                // v18 = t0a,  v7  = t7a
737         butterfly_4s    v19, v22, v5,  v22               // v19 = t1a,  v22 = t6
738         butterfly_4s    v4,  v26, v20, v26               // v4  = t2a,  v26 = t5
739         butterfly_4s    v5,  v6,  v28, v6                // v5  = t3a,  v6  = t4
740         butterfly_4s    v20, v28, v16, v24               // v20 = t8a,  v28 = t11a
741         butterfly_4s    v24, v21, v23, v21               // v24 = t9,   v21 = t10
742         butterfly_4s    v23, v27, v25, v27               // v23 = t14,  v27 = t13
743         butterfly_4s    v25, v29, v29, v17               // v25 = t15a, v29 = t12a
744
745         dmbutterfly0    v8,  v9,  v27, v21, v8,  v9,  v16, v17, v30, v31 // v8  = t13a, v9  = t10a
746         dmbutterfly0    v28, v27, v29, v28, v21, v29, v16, v17, v30, v31 // v28 = t12,  v27 = t11
747
748         butterfly_4s    v16, v31, v18, v25               // v16 = out[0], v31 = out[15]
749         butterfly_4s    v17, v30, v19, v23               // v17 = out[1], v30 = out[14]
750         butterfly_4s_r  v25, v22, v22, v24               // v25 = out[9], v22 = out[6]
751         butterfly_4s    v23, v24, v7,  v20               // v23 = out[7], v24 = out[8]
752         butterfly_4s    v18, v29, v4,  v8                // v18 = out[2], v29 = out[13]
753         butterfly_4s    v19, v28, v5,  v28               // v19 = out[3], v28 = out[12]
754         butterfly_4s    v20, v27, v6,  v27               // v20 = out[4], v27 = out[11]
755         butterfly_4s    v21, v26, v26, v9                // v21 = out[5], v26 = out[10]
756         ret
757 endfunc
758
759 function iadst16
760         ld1             {v0.8h,v1.8h}, [x11]
761         sxtl            v2.4s,  v1.4h
762         sxtl2           v3.4s,  v1.8h
763         sxtl2           v1.4s,  v0.8h
764         sxtl            v0.4s,  v0.4h
765
766         dmbutterfly_l   v6,  v7,  v4,  v5,  v31, v16, v0.s[1], v0.s[0]   // v6,v7   = t1,   v4,v5   = t0
767         dmbutterfly_l   v10, v11, v8,  v9,  v23, v24, v1.s[1], v1.s[0]   // v10,v11 = t9,   v8,v9   = t8
768         dbutterfly_n    v31, v24, v6,  v7,  v10, v11, v12, v13, v10, v11 // v31     = t1a,  v24     = t9a
769         dmbutterfly_l   v14, v15, v12, v13, v29, v18, v0.s[3], v0.s[2]   // v14,v15 = t3,   v12,v13 = t2
770         dbutterfly_n    v16, v23, v4,  v5,  v8,  v9,  v6,  v7,  v8,  v9  // v16     = t0a,  v23     = t8a
771
772         dmbutterfly_l   v6,  v7,  v4,  v5,  v21, v26, v1.s[3], v1.s[2]   // v6,v7   = t11,  v4,v5   = t10
773         dbutterfly_n    v29, v26, v14, v15, v6,  v7,  v8,  v9,  v6,  v7  // v29     = t3a,  v26     = t11a
774         dmbutterfly_l   v10, v11, v8,  v9,  v27, v20, v2.s[1], v2.s[0]   // v10,v11 = t5,   v8,v9   = t4
775         dbutterfly_n    v18, v21, v12, v13, v4,  v5,  v6,  v7,  v4,  v5  // v18     = t2a,  v21     = t10a
776
777         dmbutterfly_l   v14, v15, v12, v13, v19, v28, v3.s[1], v3.s[0]   // v14,v15 = t13,  v12,v13 = t12
778         dbutterfly_n    v20, v28, v10, v11, v14, v15, v4,  v5,  v14, v15 // v20     = t5a,  v28     = t13a
779         dmbutterfly_l   v6,  v7,  v4,  v5,  v25, v22, v2.s[3], v2.s[2]   // v6,v7   = t7,   v4,v5   = t6
780         dbutterfly_n    v27, v19, v8,  v9,  v12, v13, v10, v11, v12, v13 // v27     = t4a,  v19     = t12a
781
782         dmbutterfly_l   v10, v11, v8,  v9,  v17, v30, v3.s[3], v3.s[2]   // v10,v11 = t15,  v8,v9   = t14
783         ld1             {v0.8h}, [x10]
784         dbutterfly_n    v22, v30, v6,  v7,  v10, v11, v12, v13, v10, v11 // v22     = t7a,  v30     = t15a
785         sxtl2           v1.4s,  v0.8h
786         sxtl            v0.4s,  v0.4h
787         dmbutterfly_l   v14, v15, v12, v13, v23, v24, v1.s[0], v1.s[1]   // v14,v15 = t9,   v12,v13 = t8
788         dbutterfly_n    v25, v17, v4,  v5,  v8,  v9,  v6,  v7,  v8,  v9  // v25     = t6a,  v17     = t14a
789
790         dmbutterfly_l   v4,  v5,  v6,  v7,  v28, v19, v1.s[1], v1.s[0]   // v4,v5   = t12,  v6,v7   = t13
791         dbutterfly_n    v23, v19, v12, v13, v4,  v5,  v8,  v9,  v4,  v5  // v23     = t8a,  v19     = t12a
792         dmbutterfly_l   v10, v11, v8,  v9,  v21, v26, v1.s[2], v1.s[3]   // v10,v11 = t11,  v8,v9   = t10
793         butterfly_4s_r  v4,  v27, v16, v27               // v4  = t4,   v27 = t0
794         dbutterfly_n    v24, v28, v14, v15, v6,  v7,  v12, v13, v6,  v7  // v24     = t9a,  v28     = t13a
795
796         dmbutterfly_l   v12, v13, v14, v15, v30, v17, v1.s[3], v1.s[2]   // v12,v13 = t14,  v14,v15 = t15
797         butterfly_4s_r  v5,  v20, v31, v20               // v5  = t5, v20 = t1
798         dbutterfly_n    v21, v17, v8,  v9,  v12, v13, v6,  v7,  v12, v13 // v21     = t10a, v17     = t14a
799         dbutterfly_n    v26, v30, v10, v11, v14, v15, v8,  v9,  v14, v15 // v26     = t11a, v30     = t15a
800
801         butterfly_4s_r  v6,  v25, v18, v25               // v6  = t6, v25 = t2
802         butterfly_4s_r  v7,  v22, v29, v22               // v7  = t7, v22 = t3
803
804         dmbutterfly_l   v10, v11, v8,  v9,  v19, v28, v0.s[2], v0.s[3]   // v10,v11 = t13,  v8,v9   = t12
805         dmbutterfly_l   v12, v13, v14, v15, v30, v17, v0.s[3], v0.s[2]   // v12,v13 = t14,  v14,v15 = t15
806
807         dbutterfly_n    v18, v30, v8,  v9,  v12, v13, v16, v17, v12, v13 // v18   = out[2], v30     = t14a
808         dbutterfly_n    v29, v17, v10, v11, v14, v15, v12, v13, v14, v15 // v29 = -out[13], v17     = t15a
809         neg             v29.4s, v29.4s                   // v29 = out[13]
810
811         dmbutterfly_l   v10, v11, v8,  v9,  v4,  v5,  v0.s[2], v0.s[3]   // v10,v11 = t5a,  v8,v9   = t4a
812         dmbutterfly_l   v12, v13, v14, v15, v7,  v6,  v0.s[3], v0.s[2]   // v12,v13 = t6a,  v14,v15 = t7a
813
814         butterfly_4s    v2,  v6,  v27, v25               // v2 = out[0], v6 = t2a
815         butterfly_4s    v3,  v7,  v23, v21               // v3 =-out[1], v7 = t10
816
817         dbutterfly_n    v19, v31, v8,  v9,  v12, v13, v4,  v5,  v8,  v9  // v19 = -out[3],  v31 = t6
818         neg             v19.4s, v19.4s                   // v19 = out[3]
819         dbutterfly_n    v28, v16, v10, v11, v14, v15, v4,  v5,  v10, v11 // v28 = out[12],  v16 = t7
820
821         butterfly_4s    v5,  v8,  v20, v22               // v5 =-out[15],v8 = t3a
822         butterfly_4s    v4,  v9,  v24, v26               // v4 = out[14],v9 = t11
823
824         dmbutterfly0    v23, v24, v6,  v8,  v10, v11, v12, v13, v14, v15, 1 // v23 = out[7], v24 = out[8]
825         dmbutterfly0    v21, v26, v30, v17, v10, v11, v12, v13, v14, v15, 1 // v21 = out[5], v26 = out[10]
826         dmbutterfly0    v20, v27, v16, v31, v10, v11, v12, v13, v14, v15    // v20 = out[4], v27 = out[11]
827         dmbutterfly0    v22, v25, v9,  v7,  v10, v11, v12, v13, v14, v15    // v22 = out[6], v25 = out[9]
828
829         neg             v31.4s,  v5.4s                    // v31 = out[15]
830         neg             v17.4s,  v3.4s                    // v17 = out[1]
831
832         mov             v16.16b, v2.16b
833         mov             v30.16b, v4.16b
834         ret
835 endfunc
836
837 // Helper macros; we can't use these expressions directly within
838 // e.g. .irp due to the extra concatenation \(). Therefore wrap
839 // them in macros to allow using .irp below.
840 .macro load i, src, inc
841         ld1             {v\i\().4s},  [\src], \inc
842 .endm
843 .macro store i, dst, inc
844         st1             {v\i\().4s},  [\dst], \inc
845 .endm
846 .macro movi_v i, size, imm
847         movi            v\i\()\size,  \imm
848 .endm
849 .macro load_clear i, src, inc
850         ld1             {v\i\().4s}, [\src]
851         st1             {v4.4s},  [\src], \inc
852 .endm
853
854 // Read a vertical 4x16 slice out of a 16x16 matrix, do a transform on it,
855 // transpose into a horizontal 16x4 slice and store.
856 // x0 = dst (temp buffer)
857 // x1 = slice offset
858 // x2 = src
859 // x9 = input stride
860 .macro itxfm16_1d_funcs txfm
861 function \txfm\()16_1d_4x16_pass1_neon
862         mov             x14, x30
863
864         movi            v4.4s, #0
865 .irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
866         load_clear      \i,  x2,  x9
867 .endr
868
869         bl              \txfm\()16
870
871         // Do four 4x4 transposes. Originally, v16-v31 contain the
872         // 16 rows. Afterwards, v16-v19, v20-v23, v24-v27 and v28-v31
873         // contain the four transposed 4x4 blocks.
874         transpose_4x4s  v16, v17, v18, v19, v4, v5, v6, v7
875         transpose_4x4s  v20, v21, v22, v23, v4, v5, v6, v7
876         transpose_4x4s  v24, v25, v26, v27, v4, v5, v6, v7
877         transpose_4x4s  v28, v29, v30, v31, v4, v5, v6, v7
878
879         // Store the transposed 4x4 blocks horizontally.
880         cmp             x1,  #12
881         b.eq            1f
882 .irp i, 16, 20, 24, 28, 17, 21, 25, 29, 18, 22, 26, 30, 19, 23, 27, 31
883         store           \i,  x0,  #16
884 .endr
885         br              x14
886 1:
887         // Special case: For the last input column (x1 == 12),
888         // which would be stored as the last row in the temp buffer,
889         // don't store the first 4x4 block, but keep it in registers
890         // for the first slice of the second pass (where it is the
891         // last 4x4 block).
892         add             x0,  x0,  #16
893         st1             {v20.4s},  [x0], #16
894         st1             {v24.4s},  [x0], #16
895         st1             {v28.4s},  [x0], #16
896         add             x0,  x0,  #16
897         st1             {v21.4s},  [x0], #16
898         st1             {v25.4s},  [x0], #16
899         st1             {v29.4s},  [x0], #16
900         add             x0,  x0,  #16
901         st1             {v22.4s},  [x0], #16
902         st1             {v26.4s},  [x0], #16
903         st1             {v30.4s},  [x0], #16
904         add             x0,  x0,  #16
905         st1             {v23.4s},  [x0], #16
906         st1             {v27.4s},  [x0], #16
907         st1             {v31.4s},  [x0], #16
908
909         mov             v28.16b, v16.16b
910         mov             v29.16b, v17.16b
911         mov             v30.16b, v18.16b
912         mov             v31.16b, v19.16b
913         br              x14
914 endfunc
915
916 // Read a vertical 4x16 slice out of a 16x16 matrix, do a transform on it,
917 // load the destination pixels (from a similar 4x16 slice), add and store back.
918 // x0 = dst
919 // x1 = dst stride
920 // x2 = src (temp buffer)
921 // x3 = slice offset
922 // x9 = temp buffer stride
923 function \txfm\()16_1d_4x16_pass2_neon
924         mov             x14, x30
925
926 .irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27
927         load            \i,  x2,  x9
928 .endr
929         cbz             x3,  1f
930 .irp i, 28, 29, 30, 31
931         load            \i,  x2,  x9
932 .endr
933 1:
934
935         add             x3,  x0,  x1
936         lsl             x1,  x1,  #1
937         bl              \txfm\()16
938
939         dup             v8.8h, w13
940 .macro load_add_store coef0, coef1, coef2, coef3, coef4, coef5, coef6, coef7
941         srshr           \coef0, \coef0, #6
942         ld1             {v4.4h},   [x0], x1
943         srshr           \coef1, \coef1, #6
944         ld1             {v4.d}[1], [x3], x1
945         srshr           \coef2, \coef2, #6
946         ld1             {v5.4h},   [x0], x1
947         srshr           \coef3, \coef3, #6
948         uaddw           \coef0, \coef0, v4.4h
949         ld1             {v5.d}[1], [x3], x1
950         srshr           \coef4, \coef4, #6
951         uaddw2          \coef1, \coef1, v4.8h
952         ld1             {v6.4h},   [x0], x1
953         srshr           \coef5, \coef5, #6
954         uaddw           \coef2, \coef2, v5.4h
955         ld1             {v6.d}[1], [x3], x1
956         sqxtun          v4.4h,  \coef0
957         srshr           \coef6, \coef6, #6
958         uaddw2          \coef3, \coef3, v5.8h
959         ld1             {v7.4h},   [x0], x1
960         sqxtun2         v4.8h,  \coef1
961         srshr           \coef7, \coef7, #6
962         uaddw           \coef4, \coef4, v6.4h
963         ld1             {v7.d}[1], [x3], x1
964         umin            v4.8h,  v4.8h,  v8.8h
965         sub             x0,  x0,  x1, lsl #2
966         sub             x3,  x3,  x1, lsl #2
967         sqxtun          v5.4h,  \coef2
968         uaddw2          \coef5, \coef5, v6.8h
969         st1             {v4.4h},   [x0], x1
970         sqxtun2         v5.8h,  \coef3
971         uaddw           \coef6, \coef6, v7.4h
972         st1             {v4.d}[1], [x3], x1
973         umin            v5.8h,  v5.8h,  v8.8h
974         sqxtun          v6.4h,  \coef4
975         uaddw2          \coef7, \coef7, v7.8h
976         st1             {v5.4h},   [x0], x1
977         sqxtun2         v6.8h,  \coef5
978         st1             {v5.d}[1], [x3], x1
979         umin            v6.8h,  v6.8h,  v8.8h
980         sqxtun          v7.4h,  \coef6
981         st1             {v6.4h},   [x0], x1
982         sqxtun2         v7.8h,  \coef7
983         st1             {v6.d}[1], [x3], x1
984         umin            v7.8h,  v7.8h,  v8.8h
985         st1             {v7.4h},   [x0], x1
986         st1             {v7.d}[1], [x3], x1
987 .endm
988         load_add_store  v16.4s, v17.4s, v18.4s, v19.4s, v20.4s, v21.4s, v22.4s, v23.4s
989         load_add_store  v24.4s, v25.4s, v26.4s, v27.4s, v28.4s, v29.4s, v30.4s, v31.4s
990 .purgem load_add_store
991
992         br              x14
993 endfunc
994 .endm
995
996 itxfm16_1d_funcs idct
997 itxfm16_1d_funcs iadst
998
999 // This is the minimum eob value for each subpartition, in increments of 4
1000 const min_eob_idct_idct_16, align=4
1001         .short  0, 10, 38, 89
1002 endconst
1003
1004 .macro itxfm_func16x16 txfm1, txfm2
1005 function vp9_\txfm1\()_\txfm2\()_16x16_add_16_neon
1006 .ifc \txfm1\()_\txfm2,idct_idct
1007         cmp             w3,  #1
1008         b.eq            idct16x16_dc_add_neon
1009 .endif
1010         mov             x15, x30
1011         // iadst16 requires clobbering v8-v15, idct16 only clobbers v8-v9.
1012 .ifnc \txfm1\()_\txfm2,idct_idct
1013         stp             d14, d15, [sp, #-0x10]!
1014         stp             d12, d13, [sp, #-0x10]!
1015         stp             d10, d11, [sp, #-0x10]!
1016 .endif
1017         stp             d8,  d9,  [sp, #-0x10]!
1018
1019         sub             sp,  sp,  #1024
1020
1021         mov             x4,  x0
1022         mov             x5,  x1
1023         mov             x6,  x2
1024
1025         movrel          x10, idct_coeffs
1026 .ifnc \txfm1\()_\txfm2,idct_idct
1027         movrel          x11, iadst16_coeffs
1028 .endif
1029         movrel          x12, min_eob_idct_idct_16, 2
1030 .ifc \txfm1,idct
1031         ld1             {v0.8h,v1.8h}, [x10]
1032         sxtl            v2.4s,  v1.4h
1033         sxtl2           v3.4s,  v1.8h
1034         sxtl2           v1.4s,  v0.8h
1035         sxtl            v0.4s,  v0.4h
1036 .endif
1037         mov             x9,  #64
1038
1039 .irp i, 0, 4, 8, 12
1040         add             x0,  sp,  #(\i*64)
1041 .ifc \txfm1\()_\txfm2,idct_idct
1042 .if \i > 0
1043         ldrh            w1,  [x12], #2
1044         cmp             w3,  w1
1045         mov             x1,  #(16 - \i)/4
1046         b.le            1f
1047 .endif
1048 .endif
1049         mov             x1,  #\i
1050         add             x2,  x6,  #(\i*4)
1051         bl              \txfm1\()16_1d_4x16_pass1_neon
1052 .endr
1053 .ifc \txfm1\()_\txfm2,iadst_idct
1054         ld1             {v0.8h,v1.8h}, [x10]
1055         sxtl            v2.4s,  v1.4h
1056         sxtl2           v3.4s,  v1.8h
1057         sxtl2           v1.4s,  v0.8h
1058         sxtl            v0.4s,  v0.4h
1059 .endif
1060
1061 .ifc \txfm1\()_\txfm2,idct_idct
1062         b               3f
1063 1:
1064         // Set v28-v31 to zero, for the in-register passthrough of
1065         // coefficients to pass 2.
1066         movi            v28.4s,  #0
1067         movi            v29.4s,  #0
1068         movi            v30.4s,  #0
1069         movi            v31.4s,  #0
1070 2:
1071         subs            x1,  x1,  #1
1072 .rept 4
1073         st1             {v28.4s,v29.4s,v30.4s,v31.4s}, [x0], x9
1074 .endr
1075         b.ne            2b
1076 3:
1077 .endif
1078
1079 .irp i, 0, 4, 8, 12
1080         add             x0,  x4,  #(\i*2)
1081         mov             x1,  x5
1082         add             x2,  sp,  #(\i*4)
1083         mov             x3,  #\i
1084         bl              \txfm2\()16_1d_4x16_pass2_neon
1085 .endr
1086
1087         add             sp,  sp,  #1024
1088         ldp             d8,  d9,  [sp], 0x10
1089 .ifnc \txfm1\()_\txfm2,idct_idct
1090         ldp             d10, d11, [sp], 0x10
1091         ldp             d12, d13, [sp], 0x10
1092         ldp             d14, d15, [sp], 0x10
1093 .endif
1094         br              x15
1095 endfunc
1096
1097 function ff_vp9_\txfm1\()_\txfm2\()_16x16_add_10_neon, export=1
1098         mov             x13, #0x03ff
1099         b               vp9_\txfm1\()_\txfm2\()_16x16_add_16_neon
1100 endfunc
1101
1102 function ff_vp9_\txfm1\()_\txfm2\()_16x16_add_12_neon, export=1
1103         mov             x13, #0x0fff
1104         b               vp9_\txfm1\()_\txfm2\()_16x16_add_16_neon
1105 endfunc
1106 .endm
1107
1108 itxfm_func16x16 idct,  idct
1109 itxfm_func16x16 iadst, idct
1110 itxfm_func16x16 idct,  iadst
1111 itxfm_func16x16 iadst, iadst
1112
1113
1114 function idct32x32_dc_add_neon
1115         movrel          x4,  idct_coeffs
1116         ld1             {v0.4h}, [x4]
1117         sxtl            v0.4s,  v0.4h
1118
1119         movi            v1.4h,  #0
1120
1121         ld1             {v2.s}[0],  [x2]
1122         smull           v2.2d,  v2.2s,  v0.s[0]
1123         rshrn           v2.2s,  v2.2d,  #14
1124         smull           v2.2d,  v2.2s,  v0.s[0]
1125         rshrn           v2.2s,  v2.2d,  #14
1126         st1             {v1.s}[0],  [x2]
1127         dup             v2.4s,  v2.s[0]
1128
1129         srshr           v0.4s,  v2.4s,  #6
1130
1131         mov             x3,  x0
1132         mov             x4,  #32
1133         sub             x1,  x1,  #32
1134         dup             v31.8h, w13
1135 1:
1136         // Loop to add the constant v0 into all 32x32 outputs
1137         subs            x4,  x4,  #1
1138         ld1             {v1.8h,v2.8h},  [x0], #32
1139         uaddw           v16.4s, v0.4s,  v1.4h
1140         uaddw2          v17.4s, v0.4s,  v1.8h
1141         ld1             {v3.8h,v4.8h},  [x0], x1
1142         uaddw           v18.4s, v0.4s,  v2.4h
1143         uaddw2          v19.4s, v0.4s,  v2.8h
1144         uaddw           v20.4s, v0.4s,  v3.4h
1145         uaddw2          v21.4s, v0.4s,  v3.8h
1146         uaddw           v22.4s, v0.4s,  v4.4h
1147         uaddw2          v23.4s, v0.4s,  v4.8h
1148         sqxtun          v1.4h,  v16.4s
1149         sqxtun2         v1.8h,  v17.4s
1150         sqxtun          v2.4h,  v18.4s
1151         sqxtun2         v2.8h,  v19.4s
1152         sqxtun          v3.4h,  v20.4s
1153         sqxtun2         v3.8h,  v21.4s
1154         sqxtun          v4.4h,  v22.4s
1155         sqxtun2         v4.8h,  v23.4s
1156         umin            v1.8h,  v1.8h,  v31.8h
1157         umin            v2.8h,  v2.8h,  v31.8h
1158         st1             {v1.8h,v2.8h},  [x3], #32
1159         umin            v3.8h,  v3.8h,  v31.8h
1160         umin            v4.8h,  v4.8h,  v31.8h
1161         st1             {v3.8h,v4.8h},  [x3], x1
1162         b.ne            1b
1163
1164         ret
1165 endfunc
1166
1167 function idct32_odd
1168         dmbutterfly     v16, v31, v10.s[0], v10.s[1], v4, v5, v6, v7 // v16 = t16a, v31 = t31a
1169         dmbutterfly     v24, v23, v10.s[2], v10.s[3], v4, v5, v6, v7 // v24 = t17a, v23 = t30a
1170         dmbutterfly     v20, v27, v11.s[0], v11.s[1], v4, v5, v6, v7 // v20 = t18a, v27 = t29a
1171         dmbutterfly     v28, v19, v11.s[2], v11.s[3], v4, v5, v6, v7 // v28 = t19a, v19 = t28a
1172         dmbutterfly     v18, v29, v12.s[0], v12.s[1], v4, v5, v6, v7 // v18 = t20a, v29 = t27a
1173         dmbutterfly     v26, v21, v12.s[2], v12.s[3], v4, v5, v6, v7 // v26 = t21a, v21 = t26a
1174         dmbutterfly     v22, v25, v13.s[0], v13.s[1], v4, v5, v6, v7 // v22 = t22a, v25 = t25a
1175         dmbutterfly     v30, v17, v13.s[2], v13.s[3], v4, v5, v6, v7 // v30 = t23a, v17 = t24a
1176
1177         butterfly_4s    v4,  v24, v16, v24 // v4  = t16, v24 = t17
1178         butterfly_4s    v5,  v20, v28, v20 // v5  = t19, v20 = t18
1179         butterfly_4s    v6,  v26, v18, v26 // v6  = t20, v26 = t21
1180         butterfly_4s    v7,  v22, v30, v22 // v7  = t23, v22 = t22
1181         butterfly_4s    v28, v25, v17, v25 // v28 = t24, v25 = t25
1182         butterfly_4s    v30, v21, v29, v21 // v30 = t27, v21 = t26
1183         butterfly_4s    v29, v23, v31, v23 // v29 = t31, v23 = t30
1184         butterfly_4s    v31, v27, v19, v27 // v31 = t28, v27 = t29
1185
1186         dmbutterfly     v23, v24, v1.s[0], v1.s[1], v16, v17, v18, v19        // v23 = t17a, v24 = t30a
1187         dmbutterfly     v27, v20, v1.s[0], v1.s[1], v16, v17, v18, v19, neg=1 // v27 = t29a, v20 = t18a
1188         dmbutterfly     v21, v26, v1.s[2], v1.s[3], v16, v17, v18, v19        // v21 = t21a, v26 = t26a
1189         dmbutterfly     v25, v22, v1.s[2], v1.s[3], v16, v17, v18, v19, neg=1 // v25 = t25a, v22 = t22a
1190
1191         butterfly_4s    v16, v5,  v4,  v5  // v16 = t16a, v5  = t19a
1192         butterfly_4s    v17, v20, v23, v20 // v17 = t17,  v20 = t18
1193         butterfly_4s    v18, v6,  v7,  v6  // v18 = t23a, v6  = t20a
1194         butterfly_4s    v19, v21, v22, v21 // v19 = t22,  v21 = t21
1195         butterfly_4s    v4,  v28, v28, v30 // v4  = t24a, v28 = t27a
1196         butterfly_4s    v23, v26, v25, v26 // v23 = t25,  v26 = t26
1197         butterfly_4s    v7,  v8,  v29, v31 // v7  = t31a, v3  = t28a
1198         butterfly_4s    v22, v27, v24, v27 // v22 = t30,  v27 = t29
1199
1200         dmbutterfly     v27, v20, v0.s[2], v0.s[3], v24, v25, v30, v31        // v27 = t18a, v20 = t29a
1201         dmbutterfly     v8,  v5,  v0.s[2], v0.s[3], v24, v25, v30, v31        // v3  = t19,  v5  = t28
1202         dmbutterfly     v28, v6,  v0.s[2], v0.s[3], v24, v25, v30, v31, neg=1 // v28 = t27,  v6  = t20
1203         dmbutterfly     v26, v21, v0.s[2], v0.s[3], v24, v25, v30, v31, neg=1 // v26 = t26a, v21 = t21a
1204
1205         butterfly_4s    v31, v24, v7,  v4  // v31 = t31,  v24 = t24
1206         butterfly_4s    v30, v25, v22, v23 // v30 = t30a, v25 = t25a
1207         butterfly_4s_r  v23, v16, v16, v18 // v23 = t23,  v16 = t16
1208         butterfly_4s_r  v22, v17, v17, v19 // v22 = t22a, v17 = t17a
1209         butterfly_4s    v18, v21, v27, v21 // v18 = t18,  v21 = t21
1210         butterfly_4s_r  v27, v28, v5,  v28 // v27 = t27a, v28 = t28a
1211         butterfly_4s    v29, v26, v20, v26 // v29 = t29,  v26 = t26
1212         butterfly_4s    v19, v20, v8,  v6  // v19 = t19a, v20 = t20
1213
1214         dmbutterfly0    v27, v20, v27, v20, v4, v5, v6, v7, v8, v9 // v27 = t27,  v20 = t20
1215         dmbutterfly0    v26, v21, v26, v21, v4, v5, v6, v7, v8, v9 // v26 = t26a, v21 = t21a
1216         dmbutterfly0    v25, v22, v25, v22, v4, v5, v6, v7, v8, v9 // v25 = t25,  v22 = t22
1217         dmbutterfly0    v24, v23, v24, v23, v4, v5, v6, v7, v8, v9 // v24 = t24a, v23 = t23a
1218         ret
1219 endfunc
1220
1221 // Do an 32-point IDCT of a 4x32 slice out of a 32x32 matrix.
1222 // The 32-point IDCT can be decomposed into two 16-point IDCTs;
1223 // a normal IDCT16 with every other input component (the even ones, with
1224 // each output written twice), followed by a separate 16-point IDCT
1225 // of the odd inputs, added/subtracted onto the outputs of the first idct16.
1226 // x0 = dst (temp buffer)
1227 // x1 = unused
1228 // x2 = src
1229 // x9 = double input stride
1230 function idct32_1d_4x32_pass1_neon
1231         mov             x14, x30
1232
1233         movi            v4.4s,  #0
1234
1235         // v16 = IN(0), v17 = IN(2) ... v31 = IN(30)
1236 .irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
1237         ld1             {v\i\().4s}, [x2]
1238         st1             {v4.4s},  [x2], x9
1239 .endr
1240
1241         bl              idct16
1242
1243         // Do four 4x4 transposes. Originally, v16-v31 contain the
1244         // 16 rows. Afterwards, v16-v19, v20-v23, v24-v27 and v28-v31
1245         // contain the four transposed 4x4 blocks.
1246         transpose_4x4s  v16, v17, v18, v19, v4, v5, v6, v7
1247         transpose_4x4s  v20, v21, v22, v23, v4, v5, v6, v7
1248         transpose_4x4s  v24, v25, v26, v27, v4, v5, v6, v7
1249         transpose_4x4s  v28, v29, v30, v31, v4, v5, v6, v7
1250
1251         // Store the registers a, b, c, d horizontally, followed by the
1252         // same registers d, c, b, a mirrored.
1253 .macro store_rev a, b, c, d
1254         // There's no rev128 instruction, but we reverse each 64 bit
1255         // half, and then flip them using an ext with 8 bytes offset.
1256         rev64           v7.4s, \d
1257         st1             {\a},  [x0], #16
1258         ext             v7.16b, v7.16b, v7.16b, #8
1259         st1             {\b},  [x0], #16
1260         rev64           v6.4s, \c
1261         st1             {\c},  [x0], #16
1262         ext             v6.16b, v6.16b, v6.16b, #8
1263         st1             {\d},  [x0], #16
1264         rev64           v5.4s, \b
1265         st1             {v7.4s},  [x0], #16
1266         ext             v5.16b, v5.16b, v5.16b, #8
1267         st1             {v6.4s},  [x0], #16
1268         rev64           v4.4s, \a
1269         st1             {v5.4s},  [x0], #16
1270         ext             v4.16b, v4.16b, v4.16b, #8
1271         st1             {v4.4s},  [x0], #16
1272 .endm
1273         store_rev       v16.4s, v20.4s, v24.4s, v28.4s
1274         store_rev       v17.4s, v21.4s, v25.4s, v29.4s
1275         store_rev       v18.4s, v22.4s, v26.4s, v30.4s
1276         store_rev       v19.4s, v23.4s, v27.4s, v31.4s
1277         sub             x0,  x0,  #512
1278 .purgem store_rev
1279
1280         // Move x2 back to the start of the input, and move
1281         // to the first odd row
1282         sub             x2,  x2,  x9, lsl #4
1283         add             x2,  x2,  #128
1284
1285         movi            v4.4s,  #0
1286         // v16 = IN(1), v17 = IN(3) ... v31 = IN(31)
1287 .irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
1288         ld1             {v\i\().4s}, [x2]
1289         st1             {v4.4s},  [x2], x9
1290 .endr
1291
1292         bl              idct32_odd
1293
1294         transpose_4x4s  v31, v30, v29, v28, v4, v5, v6, v7
1295         transpose_4x4s  v27, v26, v25, v24, v4, v5, v6, v7
1296         transpose_4x4s  v23, v22, v21, v20, v4, v5, v6, v7
1297         transpose_4x4s  v19, v18, v17, v16, v4, v5, v6, v7
1298
1299         // Store the registers a, b, c, d horizontally,
1300         // adding into the output first, and the mirrored,
1301         // subtracted from the output.
1302 .macro store_rev a, b, c, d, a16b, b16b
1303         ld1             {v4.4s},  [x0]
1304         rev64           v9.4s, \d
1305         add             v4.4s, v4.4s, \a
1306         st1             {v4.4s},  [x0], #16
1307         rev64           v8.4s, \c
1308         ld1             {v4.4s},  [x0]
1309         ext             v9.16b, v9.16b, v9.16b, #8
1310         add             v4.4s, v4.4s, \b
1311         st1             {v4.4s},  [x0], #16
1312         ext             v8.16b, v8.16b, v8.16b, #8
1313         ld1             {v4.4s},  [x0]
1314         rev64           \b, \b
1315         add             v4.4s, v4.4s, \c
1316         st1             {v4.4s},  [x0], #16
1317         rev64           \a, \a
1318         ld1             {v4.4s},  [x0]
1319         ext             \b16b, \b16b, \b16b, #8
1320         add             v4.4s, v4.4s, \d
1321         st1             {v4.4s},  [x0], #16
1322         ext             \a16b, \a16b, \a16b, #8
1323         ld1             {v4.4s},  [x0]
1324         sub             v4.4s, v4.4s, v9.4s
1325         st1             {v4.4s},  [x0], #16
1326         ld1             {v4.4s},  [x0]
1327         sub             v4.4s, v4.4s, v8.4s
1328         st1             {v4.4s},  [x0], #16
1329         ld1             {v4.4s},  [x0]
1330         sub             v4.4s, v4.4s, \b
1331         st1             {v4.4s},  [x0], #16
1332         ld1             {v4.4s},  [x0]
1333         sub             v4.4s, v4.4s, \a
1334         st1             {v4.4s},  [x0], #16
1335 .endm
1336
1337         store_rev       v31.4s, v27.4s, v23.4s, v19.4s, v31.16b, v27.16b
1338         store_rev       v30.4s, v26.4s, v22.4s, v18.4s, v30.16b, v26.16b
1339         store_rev       v29.4s, v25.4s, v21.4s, v17.4s, v29.16b, v25.16b
1340         store_rev       v28.4s, v24.4s, v20.4s, v16.4s, v28.16b, v24.16b
1341 .purgem store_rev
1342         br              x14
1343 endfunc
1344
1345 // This is mostly the same as 4x32_pass1, but without the transpose,
1346 // and use the source as temp buffer between the two idct passes, and
1347 // add into the destination.
1348 // x0 = dst
1349 // x1 = dst stride
1350 // x2 = src (temp buffer)
1351 // x7 = negative double temp buffer stride
1352 // x9 = double temp buffer stride
1353 function idct32_1d_4x32_pass2_neon
1354         mov             x14, x30
1355
1356         // v16 = IN(0), v17 = IN(2) ... v31 = IN(30)
1357 .irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
1358         ld1             {v\i\().4s}, [x2], x9
1359 .endr
1360         sub             x2,  x2,  x9, lsl #4
1361
1362         bl              idct16
1363
1364 .irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
1365         st1             {v\i\().4s}, [x2], x9
1366 .endr
1367
1368         sub             x2,  x2,  x9, lsl #4
1369         add             x2,  x2,  #128
1370
1371         // v16 = IN(1), v17 = IN(3) ... v31 = IN(31)
1372 .irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
1373         ld1             {v\i\().4s}, [x2], x9
1374 .endr
1375         sub             x2,  x2,  x9, lsl #4
1376         sub             x2,  x2,  #128
1377
1378         bl              idct32_odd
1379
1380 .macro load_acc_store a, b, c, d, neg=0
1381 .if \neg == 0
1382         ld1             {v4.4s},  [x2], x9
1383         ld1             {v5.4s},  [x2], x9
1384         add             v4.4s, v4.4s, \a
1385         ld1             {v6.4s},  [x2], x9
1386         add             v5.4s, v5.4s, \b
1387         ld1             {v7.4s},  [x2], x9
1388         add             v6.4s, v6.4s, \c
1389         add             v7.4s, v7.4s, \d
1390 .else
1391         ld1             {v4.4s},  [x2], x7
1392         ld1             {v5.4s},  [x2], x7
1393         sub             v4.4s, v4.4s, \a
1394         ld1             {v6.4s},  [x2], x7
1395         sub             v5.4s, v5.4s, \b
1396         ld1             {v7.4s},  [x2], x7
1397         sub             v6.4s, v6.4s, \c
1398         sub             v7.4s, v7.4s, \d
1399 .endif
1400         ld1             {v8.4h},   [x0], x1
1401         ld1             {v8.d}[1], [x0], x1
1402         srshr           v4.4s, v4.4s, #6
1403         ld1             {v9.4h},   [x0], x1
1404         srshr           v5.4s, v5.4s, #6
1405         uaddw           v4.4s, v4.4s, v8.4h
1406         ld1             {v9.d}[1], [x0], x1
1407         srshr           v6.4s, v6.4s, #6
1408         uaddw2          v5.4s, v5.4s, v8.8h
1409         srshr           v7.4s, v7.4s, #6
1410         sub             x0,  x0,  x1, lsl #2
1411         uaddw           v6.4s, v6.4s, v9.4h
1412         sqxtun          v4.4h, v4.4s
1413         uaddw2          v7.4s, v7.4s, v9.8h
1414         sqxtun2         v4.8h, v5.4s
1415         umin            v4.8h, v4.8h, v15.8h
1416         st1             {v4.4h},   [x0], x1
1417         sqxtun          v5.4h, v6.4s
1418         st1             {v4.d}[1], [x0], x1
1419         sqxtun2         v5.8h, v7.4s
1420         umin            v5.8h, v5.8h, v15.8h
1421         st1             {v5.4h},   [x0], x1
1422         st1             {v5.d}[1], [x0], x1
1423 .endm
1424         load_acc_store  v31.4s, v30.4s, v29.4s, v28.4s
1425         load_acc_store  v27.4s, v26.4s, v25.4s, v24.4s
1426         load_acc_store  v23.4s, v22.4s, v21.4s, v20.4s
1427         load_acc_store  v19.4s, v18.4s, v17.4s, v16.4s
1428         sub             x2,  x2,  x9
1429         load_acc_store  v16.4s, v17.4s, v18.4s, v19.4s, 1
1430         load_acc_store  v20.4s, v21.4s, v22.4s, v23.4s, 1
1431         load_acc_store  v24.4s, v25.4s, v26.4s, v27.4s, 1
1432         load_acc_store  v28.4s, v29.4s, v30.4s, v31.4s, 1
1433 .purgem load_acc_store
1434         br              x14
1435 endfunc
1436
1437 const min_eob_idct_idct_32, align=4
1438         .short  0, 9, 34, 70, 135, 240, 336, 448
1439 endconst
1440
1441 function vp9_idct_idct_32x32_add_16_neon
1442         cmp             w3,  #1
1443         b.eq            idct32x32_dc_add_neon
1444
1445         movrel          x10, idct_coeffs
1446         movrel          x12, min_eob_idct_idct_32, 2
1447
1448         mov             x15, x30
1449         stp             d8,  d9,  [sp, #-0x10]!
1450         stp             d10, d11, [sp, #-0x10]!
1451         stp             d12, d13, [sp, #-0x10]!
1452         stp             d14, d15, [sp, #-0x10]!
1453
1454         sub             sp,  sp,  #4096
1455
1456         mov             x4,  x0
1457         mov             x5,  x1
1458         mov             x6,  x2
1459
1460         // Double stride of the input, since we only read every other line
1461         mov             x9,  #256
1462         neg             x7,  x9
1463
1464         ld1             {v0.8h,v1.8h},   [x10], #32
1465         sxtl            v2.4s,  v1.4h
1466         sxtl2           v3.4s,  v1.8h
1467         sxtl2           v1.4s,  v0.8h
1468         sxtl            v0.4s,  v0.4h
1469         ld1             {v10.8h,v11.8h}, [x10]
1470         sxtl            v12.4s, v11.4h
1471         sxtl2           v13.4s, v11.8h
1472         sxtl2           v11.4s, v10.8h
1473         sxtl            v10.4s, v10.4h
1474
1475         dup             v15.8h, w13
1476
1477 .irp i, 0, 4, 8, 12, 16, 20, 24, 28
1478         add             x0,  sp,  #(\i*128)
1479 .if \i > 0
1480         ldrh            w1,  [x12], #2
1481         cmp             w3,  w1
1482         mov             x1,  #(32 - \i)/4
1483         b.le            1f
1484 .endif
1485         add             x2,  x6,  #(\i*4)
1486         bl              idct32_1d_4x32_pass1_neon
1487 .endr
1488         b               3f
1489
1490 1:
1491         // Write zeros to the temp buffer for pass 2
1492         movi            v16.4s,  #0
1493         movi            v17.4s,  #0
1494         movi            v18.4s,  #0
1495         movi            v19.4s,  #0
1496 2:
1497         subs            x1,  x1,  #1
1498 .rept 4
1499         st1             {v16.4s-v19.4s},  [x0], #64
1500         st1             {v16.4s-v19.4s},  [x0], #64
1501 .endr
1502         b.ne            2b
1503 3:
1504 .irp i, 0, 4, 8, 12, 16, 20, 24, 28
1505         add             x0,  x4,  #(\i*2)
1506         mov             x1,  x5
1507         add             x2,  sp,  #(\i*4)
1508         bl              idct32_1d_4x32_pass2_neon
1509 .endr
1510
1511         add             sp,  sp,  #4096
1512         ldp             d14, d15, [sp], 0x10
1513         ldp             d12, d13, [sp], 0x10
1514         ldp             d10, d11, [sp], 0x10
1515         ldp             d8,  d9,  [sp], 0x10
1516
1517         br              x15
1518 endfunc
1519
1520 function ff_vp9_idct_idct_32x32_add_10_neon, export=1
1521         mov             x13, #0x03ff
1522         b               vp9_idct_idct_32x32_add_16_neon
1523 endfunc
1524
1525 function ff_vp9_idct_idct_32x32_add_12_neon, export=1
1526         mov             x13, #0x0fff
1527         b               vp9_idct_idct_32x32_add_16_neon
1528 endfunc