ARM: move NEON H264 chroma mc to a separate file
[ffmpeg.git] / libavcodec / arm / h264cmc_neon.S
1 /*
2  * Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
3  *
4  * This file is part of Libav.
5  *
6  * Libav is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * Libav is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with Libav; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 #include "asm.S"
22
23 /* chroma_mc8(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */
24 .macro  h264_chroma_mc8 type
25 function ff_\type\()_h264_chroma_mc8_neon, export=1
26         push            {r4-r7, lr}
27         ldrd            r4,  [sp, #20]
28   .ifc \type,avg
29         mov             lr,  r0
30   .endif
31         pld             [r1]
32         pld             [r1, r2]
33
34 A       muls            r7,  r4,  r5
35 T       mul             r7,  r4,  r5
36 T       cmp             r7,  #0
37         rsb             r6,  r7,  r5,  lsl #3
38         rsb             r12, r7,  r4,  lsl #3
39         sub             r4,  r7,  r4,  lsl #3
40         sub             r4,  r4,  r5,  lsl #3
41         add             r4,  r4,  #64
42
43         beq             2f
44
45         add             r5,  r1,  r2
46
47         vdup.8          d0,  r4
48         lsl             r4,  r2,  #1
49         vdup.8          d1,  r12
50         vld1.8          {d4, d5}, [r1], r4
51         vdup.8          d2,  r6
52         vld1.8          {d6, d7}, [r5], r4
53         vdup.8          d3,  r7
54
55         vext.8          d5,  d4,  d5,  #1
56         vext.8          d7,  d6,  d7,  #1
57
58 1:      pld             [r5]
59         vmull.u8        q8,  d4,  d0
60         vmlal.u8        q8,  d5,  d1
61         vld1.8          {d4, d5}, [r1], r4
62         vmlal.u8        q8,  d6,  d2
63         vext.8          d5,  d4,  d5,  #1
64         vmlal.u8        q8,  d7,  d3
65         vmull.u8        q9,  d6,  d0
66         subs            r3,  r3,  #2
67         vmlal.u8        q9,  d7,  d1
68         vmlal.u8        q9,  d4,  d2
69         vmlal.u8        q9,  d5,  d3
70         vrshrn.u16      d16, q8,  #6
71         vld1.8          {d6, d7}, [r5], r4
72         pld             [r1]
73         vrshrn.u16      d17, q9,  #6
74   .ifc \type,avg
75         vld1.8          {d20}, [lr,:64], r2
76         vld1.8          {d21}, [lr,:64], r2
77         vrhadd.u8       q8,  q8,  q10
78   .endif
79         vext.8          d7,  d6,  d7,  #1
80         vst1.8          {d16}, [r0,:64], r2
81         vst1.8          {d17}, [r0,:64], r2
82         bgt             1b
83
84         pop             {r4-r7, pc}
85
86 2:      tst             r6,  r6
87         add             r12, r12, r6
88         vdup.8          d0,  r4
89         vdup.8          d1,  r12
90
91         beq             4f
92
93         add             r5,  r1,  r2
94         lsl             r4,  r2,  #1
95         vld1.8          {d4}, [r1], r4
96         vld1.8          {d6}, [r5], r4
97
98 3:      pld             [r5]
99         vmull.u8        q8,  d4,  d0
100         vmlal.u8        q8,  d6,  d1
101         vld1.8          {d4}, [r1], r4
102         vmull.u8        q9,  d6,  d0
103         vmlal.u8        q9,  d4,  d1
104         vld1.8          {d6}, [r5], r4
105         vrshrn.u16      d16, q8,  #6
106         vrshrn.u16      d17, q9,  #6
107   .ifc \type,avg
108         vld1.8          {d20}, [lr,:64], r2
109         vld1.8          {d21}, [lr,:64], r2
110         vrhadd.u8       q8,  q8,  q10
111   .endif
112         subs            r3,  r3,  #2
113         pld             [r1]
114         vst1.8          {d16}, [r0,:64], r2
115         vst1.8          {d17}, [r0,:64], r2
116         bgt             3b
117
118         pop             {r4-r7, pc}
119
120 4:      vld1.8          {d4, d5}, [r1], r2
121         vld1.8          {d6, d7}, [r1], r2
122         vext.8          d5,  d4,  d5,  #1
123         vext.8          d7,  d6,  d7,  #1
124
125 5:      pld             [r1]
126         subs            r3,  r3,  #2
127         vmull.u8        q8,  d4,  d0
128         vmlal.u8        q8,  d5,  d1
129         vld1.8          {d4, d5}, [r1], r2
130         vmull.u8        q9,  d6,  d0
131         vmlal.u8        q9,  d7,  d1
132         pld             [r1]
133         vext.8          d5,  d4,  d5,  #1
134         vrshrn.u16      d16, q8,  #6
135         vrshrn.u16      d17, q9,  #6
136   .ifc \type,avg
137         vld1.8          {d20}, [lr,:64], r2
138         vld1.8          {d21}, [lr,:64], r2
139         vrhadd.u8       q8,  q8,  q10
140   .endif
141         vld1.8          {d6, d7}, [r1], r2
142         vext.8          d7,  d6,  d7,  #1
143         vst1.8          {d16}, [r0,:64], r2
144         vst1.8          {d17}, [r0,:64], r2
145         bgt             5b
146
147         pop             {r4-r7, pc}
148 endfunc
149 .endm
150
151 /* chroma_mc4(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */
152 .macro  h264_chroma_mc4 type
153 function ff_\type\()_h264_chroma_mc4_neon, export=1
154         push            {r4-r7, lr}
155         ldrd            r4,  [sp, #20]
156   .ifc \type,avg
157         mov             lr,  r0
158   .endif
159         pld             [r1]
160         pld             [r1, r2]
161
162 A       muls            r7,  r4,  r5
163 T       mul             r7,  r4,  r5
164 T       cmp             r7,  #0
165         rsb             r6,  r7,  r5,  lsl #3
166         rsb             r12, r7,  r4,  lsl #3
167         sub             r4,  r7,  r4,  lsl #3
168         sub             r4,  r4,  r5,  lsl #3
169         add             r4,  r4,  #64
170
171         beq             2f
172
173         add             r5,  r1,  r2
174
175         vdup.8          d0,  r4
176         lsl             r4,  r2,  #1
177         vdup.8          d1,  r12
178         vld1.8          {d4},     [r1], r4
179         vdup.8          d2,  r6
180         vld1.8          {d6},     [r5], r4
181         vdup.8          d3,  r7
182
183         vext.8          d5,  d4,  d5,  #1
184         vext.8          d7,  d6,  d7,  #1
185         vtrn.32         d4,  d5
186         vtrn.32         d6,  d7
187
188         vtrn.32         d0,  d1
189         vtrn.32         d2,  d3
190
191 1:      pld             [r5]
192         vmull.u8        q8,  d4,  d0
193         vmlal.u8        q8,  d6,  d2
194         vld1.8          {d4},     [r1], r4
195         vext.8          d5,  d4,  d5,  #1
196         vtrn.32         d4,  d5
197         vmull.u8        q9,  d6,  d0
198         vmlal.u8        q9,  d4,  d2
199         vld1.8          {d6},     [r5], r4
200         vadd.i16        d16, d16, d17
201         vadd.i16        d17, d18, d19
202         vrshrn.u16      d16, q8,  #6
203         subs            r3,  r3,  #2
204         pld             [r1]
205   .ifc \type,avg
206         vld1.32         {d20[0]}, [lr,:32], r2
207         vld1.32         {d20[1]}, [lr,:32], r2
208         vrhadd.u8       d16, d16, d20
209   .endif
210         vext.8          d7,  d6,  d7,  #1
211         vtrn.32         d6,  d7
212         vst1.32         {d16[0]}, [r0,:32], r2
213         vst1.32         {d16[1]}, [r0,:32], r2
214         bgt             1b
215
216         pop             {r4-r7, pc}
217
218 2:      tst             r6,  r6
219         add             r12, r12, r6
220         vdup.8          d0,  r4
221         vdup.8          d1,  r12
222         vtrn.32         d0,  d1
223
224         beq             4f
225
226         vext.32         d1,  d0,  d1,  #1
227         add             r5,  r1,  r2
228         lsl             r4,  r2,  #1
229         vld1.32         {d4[0]},  [r1], r4
230         vld1.32         {d4[1]},  [r5], r4
231
232 3:      pld             [r5]
233         vmull.u8        q8,  d4,  d0
234         vld1.32         {d4[0]},  [r1], r4
235         vmull.u8        q9,  d4,  d1
236         vld1.32         {d4[1]},  [r5], r4
237         vadd.i16        d16, d16, d17
238         vadd.i16        d17, d18, d19
239         vrshrn.u16      d16, q8,  #6
240   .ifc \type,avg
241         vld1.32         {d20[0]}, [lr,:32], r2
242         vld1.32         {d20[1]}, [lr,:32], r2
243         vrhadd.u8       d16, d16, d20
244   .endif
245         subs            r3,  r3,  #2
246         pld             [r1]
247         vst1.32         {d16[0]}, [r0,:32], r2
248         vst1.32         {d16[1]}, [r0,:32], r2
249         bgt             3b
250
251         pop             {r4-r7, pc}
252
253 4:      vld1.8          {d4},     [r1], r2
254         vld1.8          {d6},     [r1], r2
255         vext.8          d5,  d4,  d5,  #1
256         vext.8          d7,  d6,  d7,  #1
257         vtrn.32         d4,  d5
258         vtrn.32         d6,  d7
259
260 5:      vmull.u8        q8,  d4,  d0
261         vmull.u8        q9,  d6,  d0
262         subs            r3,  r3,  #2
263         vld1.8          {d4},     [r1], r2
264         vext.8          d5,  d4,  d5,  #1
265         vtrn.32         d4,  d5
266         vadd.i16        d16, d16, d17
267         vadd.i16        d17, d18, d19
268         pld             [r1]
269         vrshrn.u16      d16, q8,  #6
270   .ifc \type,avg
271         vld1.32         {d20[0]}, [lr,:32], r2
272         vld1.32         {d20[1]}, [lr,:32], r2
273         vrhadd.u8       d16, d16, d20
274   .endif
275         vld1.8          {d6},     [r1], r2
276         vext.8          d7,  d6,  d7,  #1
277         vtrn.32         d6,  d7
278         pld             [r1]
279         vst1.32         {d16[0]}, [r0,:32], r2
280         vst1.32         {d16[1]}, [r0,:32], r2
281         bgt             5b
282
283         pop             {r4-r7, pc}
284 endfunc
285 .endm
286
287 .macro  h264_chroma_mc2 type
288 function ff_\type\()_h264_chroma_mc2_neon, export=1
289         push            {r4-r6, lr}
290         ldr             r4,  [sp, #16]
291         ldr             lr,  [sp, #20]
292         pld             [r1]
293         pld             [r1, r2]
294         orrs            r5,  r4,  lr
295         beq             2f
296
297         mul             r5,  r4,  lr
298         rsb             r6,  r5,  lr,  lsl #3
299         rsb             r12, r5,  r4,  lsl #3
300         sub             r4,  r5,  r4,  lsl #3
301         sub             r4,  r4,  lr,  lsl #3
302         add             r4,  r4,  #64
303         vdup.8          d0,  r4
304         vdup.8          d2,  r12
305         vdup.8          d1,  r6
306         vdup.8          d3,  r5
307         vtrn.16         q0,  q1
308 1:
309         vld1.32         {d4[0]},  [r1], r2
310         vld1.32         {d4[1]},  [r1], r2
311         vrev64.32       d5,  d4
312         vld1.32         {d5[1]},  [r1]
313         vext.8          q3,  q2,  q2,  #1
314         vtrn.16         q2,  q3
315         vmull.u8        q8,  d4,  d0
316         vmlal.u8        q8,  d5,  d1
317   .ifc \type,avg
318         vld1.16         {d18[0]}, [r0,:16], r2
319         vld1.16         {d18[1]}, [r0,:16]
320         sub             r0,  r0,  r2
321   .endif
322         vtrn.32         d16, d17
323         vadd.i16        d16, d16, d17
324         vrshrn.u16      d16, q8,  #6
325   .ifc \type,avg
326         vrhadd.u8       d16, d16, d18
327   .endif
328         vst1.16         {d16[0]}, [r0,:16], r2
329         vst1.16         {d16[1]}, [r0,:16], r2
330         subs            r3,  r3,  #2
331         bgt             1b
332         pop             {r4-r6, pc}
333 2:
334   .ifc \type,put
335         ldrh_post       r5,  r1,  r2
336         strh_post       r5,  r0,  r2
337         ldrh_post       r6,  r1,  r2
338         strh_post       r6,  r0,  r2
339   .else
340         vld1.16         {d16[0]}, [r1], r2
341         vld1.16         {d16[1]}, [r1], r2
342         vld1.16         {d18[0]}, [r0,:16], r2
343         vld1.16         {d18[1]}, [r0,:16]
344         sub             r0,  r0,  r2
345         vrhadd.u8       d16, d16, d18
346         vst1.16         {d16[0]}, [r0,:16], r2
347         vst1.16         {d16[1]}, [r0,:16], r2
348   .endif
349         subs            r3,  r3,  #2
350         bgt             2b
351         pop             {r4-r6, pc}
352 endfunc
353 .endm
354
355         h264_chroma_mc8 put
356         h264_chroma_mc8 avg
357         h264_chroma_mc4 put
358         h264_chroma_mc4 avg
359         h264_chroma_mc2 put
360         h264_chroma_mc2 avg