3aeb5458c3ba72d6acb66d49b0e8fb49f75133cb
[ffmpeg.git] / libavcodec / arm / h264cmc_neon.S
1 /*
2  * Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
3  *
4  * This file is part of Libav.
5  *
6  * Libav is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * Libav is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with Libav; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 #include "libavutil/arm/asm.S"
22
23 /* chroma_mc8(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */
24 .macro  h264_chroma_mc8 type, codec=h264
25 function ff_\type\()_\codec\()_chroma_mc8_neon, export=1
26         push            {r4-r7, lr}
27         ldrd            r4,  r5,  [sp, #20]
28   .ifc \type,avg
29         mov             lr,  r0
30   .endif
31         pld             [r1]
32         pld             [r1, r2]
33
34   .ifc \codec,rv40
35         movrel          r6,  rv40bias
36         lsr             r7,  r5,  #1
37         add             r6,  r6,  r7,  lsl #3
38         lsr             r7,  r4,  #1
39         add             r6,  r6,  r7,  lsl #1
40         vld1.16         {d22[],d23[]}, [r6,:16]
41   .endif
42
43 A       muls            r7,  r4,  r5
44 T       mul             r7,  r4,  r5
45 T       cmp             r7,  #0
46         rsb             r6,  r7,  r5,  lsl #3
47         rsb             r12, r7,  r4,  lsl #3
48         sub             r4,  r7,  r4,  lsl #3
49         sub             r4,  r4,  r5,  lsl #3
50         add             r4,  r4,  #64
51
52         beq             2f
53
54         add             r5,  r1,  r2
55
56         vdup.8          d0,  r4
57         lsl             r4,  r2,  #1
58         vdup.8          d1,  r12
59         vld1.8          {d4, d5}, [r1], r4
60         vdup.8          d2,  r6
61         vdup.8          d3,  r7
62
63         vext.8          d5,  d4,  d5,  #1
64
65 1:
66         vld1.64         {d6, d7}, [r5], r4
67         pld             [r5]
68         vmull.u8        q8,  d4,  d0
69         vext.8          d7,  d6,  d7,  #1
70         vmlal.u8        q8,  d5,  d1
71         vld1.8          {d4, d5}, [r1], r4
72         vmlal.u8        q8,  d6,  d2
73         vext.8          d5,  d4,  d5,  #1
74         vmlal.u8        q8,  d7,  d3
75         vmull.u8        q9,  d6,  d0
76         subs            r3,  r3,  #2
77         vmlal.u8        q9,  d7,  d1
78         vmlal.u8        q9,  d4,  d2
79         vmlal.u8        q9,  d5,  d3
80         pld             [r1]
81   .ifc \codec,h264
82         vrshrn.u16      d16, q8,  #6
83         vrshrn.u16      d17, q9,  #6
84   .else
85         vadd.u16        q8,  q8,  q11
86         vadd.u16        q9,  q9,  q11
87         vshrn.u16       d16, q8,  #6
88         vshrn.u16       d17, q9,  #6
89   .endif
90   .ifc \type,avg
91         vld1.8          {d20}, [lr,:64], r2
92         vld1.8          {d21}, [lr,:64], r2
93         vrhadd.u8       q8,  q8,  q10
94   .endif
95         vst1.8          {d16}, [r0,:64], r2
96         vst1.8          {d17}, [r0,:64], r2
97         bgt             1b
98
99         pop             {r4-r7, pc}
100
101 2:      tst             r6,  r6
102         add             r12, r12, r6
103         vdup.8          d0,  r4
104         vdup.8          d1,  r12
105
106         beq             4f
107
108         add             r5,  r1,  r2
109         lsl             r4,  r2,  #1
110
111 3:
112         vld1.8          {d4}, [r1], r4
113         vld1.8          {d6}, [r5], r4
114
115         pld             [r5]
116         vmull.u8        q8,  d4,  d0
117         vmlal.u8        q8,  d6,  d1
118         vmull.u8        q9,  d6,  d0
119         vmlal.u8        q9,  d4,  d1
120   .ifc \codec,h264
121         vrshrn.u16      d16, q8,  #6
122         vrshrn.u16      d17, q9,  #6
123   .else
124         vadd.u16        q8,  q8,  q11
125         vadd.u16        q9,  q9,  q11
126         vshrn.u16       d16, q8,  #6
127         vshrn.u16       d17, q9,  #6
128   .endif
129   .ifc \type,avg
130         vld1.8          {d20}, [lr,:64], r2
131         vld1.8          {d21}, [lr,:64], r2
132         vrhadd.u8       q8,  q8,  q10
133   .endif
134         subs            r3,  r3,  #2
135         pld             [r1]
136         vst1.8          {d16}, [r0,:64], r2
137         vst1.8          {d17}, [r0,:64], r2
138         bgt             3b
139
140         pop             {r4-r7, pc}
141
142 4:      vld1.8          {d4, d5}, [r1], r2
143         vld1.8          {d6, d7}, [r1], r2
144         vext.8          d5,  d4,  d5,  #1
145         vext.8          d7,  d6,  d7,  #1
146
147         pld             [r1]
148         subs            r3,  r3,  #2
149         vmull.u8        q8,  d4,  d0
150         vmlal.u8        q8,  d5,  d1
151         vmull.u8        q9,  d6,  d0
152         vmlal.u8        q9,  d7,  d1
153         pld             [r1]
154   .ifc \codec,h264
155         vrshrn.u16      d16, q8,  #6
156         vrshrn.u16      d17, q9,  #6
157   .else
158         vadd.u16        q8,  q8,  q11
159         vadd.u16        q9,  q9,  q11
160         vshrn.u16       d16, q8,  #6
161         vshrn.u16       d17, q9,  #6
162   .endif
163   .ifc \type,avg
164         vld1.8          {d20}, [lr,:64], r2
165         vld1.8          {d21}, [lr,:64], r2
166         vrhadd.u8       q8,  q8,  q10
167   .endif
168         vst1.8          {d16}, [r0,:64], r2
169         vst1.8          {d17}, [r0,:64], r2
170         bgt             4b
171
172         pop             {r4-r7, pc}
173 endfunc
174 .endm
175
176 /* chroma_mc4(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */
177 .macro  h264_chroma_mc4 type, codec=h264
178 function ff_\type\()_\codec\()_chroma_mc4_neon, export=1
179         push            {r4-r7, lr}
180         ldrd            r4,  [sp, #20]
181   .ifc \type,avg
182         mov             lr,  r0
183   .endif
184         pld             [r1]
185         pld             [r1, r2]
186
187   .ifc \codec,rv40
188         movrel          r6,  rv40bias
189         lsr             r7,  r5,  #1
190         add             r6,  r6,  r7,  lsl #3
191         lsr             r7,  r4,  #1
192         add             r6,  r6,  r7,  lsl #1
193         vld1.16         {d22[],d23[]}, [r6,:16]
194   .endif
195
196 A       muls            r7,  r4,  r5
197 T       mul             r7,  r4,  r5
198 T       cmp             r7,  #0
199         rsb             r6,  r7,  r5,  lsl #3
200         rsb             r12, r7,  r4,  lsl #3
201         sub             r4,  r7,  r4,  lsl #3
202         sub             r4,  r4,  r5,  lsl #3
203         add             r4,  r4,  #64
204
205         beq             2f
206
207         add             r5,  r1,  r2
208
209         vdup.8          d0,  r4
210         lsl             r4,  r2,  #1
211         vdup.8          d1,  r12
212         vld1.8          {d4},     [r1], r4
213         vdup.8          d2,  r6
214         vdup.8          d3,  r7
215
216         vext.8          d5,  d4,  d5,  #1
217         vtrn.32         d0,  d1
218         vtrn.32         d2,  d3
219         vtrn.32         d4,  d5
220
221 1:
222         vld1.8          {d6},     [r5], r4
223         pld             [r5]
224         vext.8          d7,  d6,  d7,  #1
225         vmull.u8        q8,  d4,  d0
226         vtrn.32         d6,  d7
227
228         vld1.8          {d4},     [r1], r4
229         vmlal.u8        q8,  d6,  d2
230         vext.8          d5,  d4,  d5,  #1
231         vmull.u8        q9,  d6,  d0
232         vtrn.32         d4,  d5
233         vmlal.u8        q9,  d4,  d2
234
235         vadd.i16        d16, d16, d17
236         vadd.i16        d17, d18, d19
237   .ifc \codec,h264
238         vrshrn.u16      d16, q8,  #6
239   .else
240         vadd.u16        q8,  q8,  q11
241         vshrn.u16       d16, q8,  #6
242   .endif
243         subs            r3,  r3,  #2
244         pld             [r1]
245   .ifc \type,avg
246         vld1.32         {d20[0]}, [lr,:32], r2
247         vld1.32         {d20[1]}, [lr,:32], r2
248         vrhadd.u8       d16, d16, d20
249   .endif
250         vst1.32         {d16[0]}, [r0,:32], r2
251         vst1.32         {d16[1]}, [r0,:32], r2
252         bgt             1b
253
254         pop             {r4-r7, pc}
255
256 2:      tst             r6,  r6
257         add             r12, r12, r6
258         vdup.8          d0,  r4
259         vdup.8          d1,  r12
260         vtrn.32         d0,  d1
261
262         beq             4f
263
264         vext.32         d1,  d0,  d1,  #1
265         add             r5,  r1,  r2
266         lsl             r4,  r2,  #1
267         vld1.32         {d4[0]},  [r1], r4
268 3:
269         vld1.32         {d4[1]},  [r5], r4
270
271         pld             [r5]
272         vmull.u8        q8,  d4,  d0
273         vld1.32         {d4[0]},  [r1], r4
274         vmull.u8        q9,  d4,  d1
275
276         vadd.i16        d16, d16, d17
277         vadd.i16        d17, d18, d19
278   .ifc \codec,h264
279         vrshrn.u16      d16, q8,  #6
280   .else
281         vadd.u16        q8,  q8,  q11
282         vshrn.u16       d16, q8,  #6
283   .endif
284   .ifc \type,avg
285         vld1.32         {d20[0]}, [lr,:32], r2
286         vld1.32         {d20[1]}, [lr,:32], r2
287         vrhadd.u8       d16, d16, d20
288   .endif
289         subs            r3,  r3,  #2
290         pld             [r1]
291         vst1.32         {d16[0]}, [r0,:32], r2
292         vst1.32         {d16[1]}, [r0,:32], r2
293         bgt             3b
294
295         pop             {r4-r7, pc}
296
297 4:      vld1.8          {d4},     [r1], r2
298         vld1.8          {d6},     [r1], r2
299         vext.8          d5,  d4,  d5,  #1
300         vext.8          d7,  d6,  d7,  #1
301         vtrn.32         d4,  d5
302         vtrn.32         d6,  d7
303
304         vmull.u8        q8,  d4,  d0
305         vmull.u8        q9,  d6,  d0
306         subs            r3,  r3,  #2
307
308         vadd.i16        d16, d16, d17
309         vadd.i16        d17, d18, d19
310         pld             [r1]
311   .ifc \codec,h264
312         vrshrn.u16      d16, q8,  #6
313   .else
314         vadd.u16        q8,  q8,  q11
315         vshrn.u16       d16, q8,  #6
316   .endif
317   .ifc \type,avg
318         vld1.32         {d20[0]}, [lr,:32], r2
319         vld1.32         {d20[1]}, [lr,:32], r2
320         vrhadd.u8       d16, d16, d20
321   .endif
322         pld             [r1]
323         vst1.32         {d16[0]}, [r0,:32], r2
324         vst1.32         {d16[1]}, [r0,:32], r2
325         bgt             4b
326
327         pop             {r4-r7, pc}
328 endfunc
329 .endm
330
331 .macro  h264_chroma_mc2 type
332 function ff_\type\()_h264_chroma_mc2_neon, export=1
333         push            {r4-r6, lr}
334         ldr             r4,  [sp, #16]
335         ldr             lr,  [sp, #20]
336         pld             [r1]
337         pld             [r1, r2]
338         orrs            r5,  r4,  lr
339         beq             2f
340
341         mul             r5,  r4,  lr
342         rsb             r6,  r5,  lr,  lsl #3
343         rsb             r12, r5,  r4,  lsl #3
344         sub             r4,  r5,  r4,  lsl #3
345         sub             r4,  r4,  lr,  lsl #3
346         add             r4,  r4,  #64
347         vdup.8          d0,  r4
348         vdup.8          d2,  r12
349         vdup.8          d1,  r6
350         vdup.8          d3,  r5
351         vtrn.16         q0,  q1
352 1:
353         vld1.32         {d4[0]},  [r1], r2
354         vld1.32         {d4[1]},  [r1], r2
355         vrev64.32       d5,  d4
356         vld1.32         {d5[1]},  [r1]
357         vext.8          q3,  q2,  q2,  #1
358         vtrn.16         q2,  q3
359         vmull.u8        q8,  d4,  d0
360         vmlal.u8        q8,  d5,  d1
361   .ifc \type,avg
362         vld1.16         {d18[0]}, [r0,:16], r2
363         vld1.16         {d18[1]}, [r0,:16]
364         sub             r0,  r0,  r2
365   .endif
366         vtrn.32         d16, d17
367         vadd.i16        d16, d16, d17
368         vrshrn.u16      d16, q8,  #6
369   .ifc \type,avg
370         vrhadd.u8       d16, d16, d18
371   .endif
372         vst1.16         {d16[0]}, [r0,:16], r2
373         vst1.16         {d16[1]}, [r0,:16], r2
374         subs            r3,  r3,  #2
375         bgt             1b
376         pop             {r4-r6, pc}
377 2:
378   .ifc \type,put
379         ldrh_post       r5,  r1,  r2
380         strh_post       r5,  r0,  r2
381         ldrh_post       r6,  r1,  r2
382         strh_post       r6,  r0,  r2
383   .else
384         vld1.16         {d16[0]}, [r1], r2
385         vld1.16         {d16[1]}, [r1], r2
386         vld1.16         {d18[0]}, [r0,:16], r2
387         vld1.16         {d18[1]}, [r0,:16]
388         sub             r0,  r0,  r2
389         vrhadd.u8       d16, d16, d18
390         vst1.16         {d16[0]}, [r0,:16], r2
391         vst1.16         {d16[1]}, [r0,:16], r2
392   .endif
393         subs            r3,  r3,  #2
394         bgt             2b
395         pop             {r4-r6, pc}
396 endfunc
397 .endm
398
399 #if CONFIG_H264_DECODER
400         h264_chroma_mc8 put
401         h264_chroma_mc8 avg
402         h264_chroma_mc4 put
403         h264_chroma_mc4 avg
404         h264_chroma_mc2 put
405         h264_chroma_mc2 avg
406 #endif
407
408 #if CONFIG_RV40_DECODER
409 const   rv40bias
410         .short           0, 16, 32, 16
411         .short          32, 28, 32, 28
412         .short           0, 32, 16, 32
413         .short          32, 28, 32, 28
414 endconst
415
416         h264_chroma_mc8 put, rv40
417         h264_chroma_mc8 avg, rv40
418         h264_chroma_mc4 put, rv40
419         h264_chroma_mc4 avg, rv40
420 #endif