2 * Copyright (c) 2016 Google Inc.
4 * This file is part of Libav.
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "libavutil/arm/asm.S"
24 @ Do an 8x8 transpose, using q registers for the subtransposes that don't
25 @ need to address the indiviudal d registers.
26 @ r0,r1 == rq0, r2,r3 == rq1, etc
27 .macro transpose_q_8x8 rq0, rq1, rq2, rq3, r0, r1, r2, r3, r4, r5, r6, r7
38 @ Do a 4x4 transpose, using q registers for the subtransposes that don't
39 @ need to address the indiviudal d registers.
40 @ r0,r1 == rq0, r2,r3 == rq1
41 .macro transpose_q_4x4 rq0, rq1, r0, r1, r2, r3
47 @ The input to and output from this macro is in the registers d16-d31,
48 @ and d0-d7 are used as scratch registers.
49 @ p7 = d16 .. p3 = d20, p0 = d23, q0 = d24, q3 = d27, q7 = d31
50 @ Depending on the width of the loop filter, we either use d16-d19
51 @ and d28-d31 as temp registers, or d8-d15.
52 @ tmp1,tmp2 = tmpq1, tmp3,tmp4 = tmpq2, tmp5,tmp6 = tmpq3, tmp7,tmp8 = tmpq4
53 .macro loop_filter wd, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8, tmpq1, tmpq2, tmpq3, tmpq4
58 vabd.u8 d4, d20, d21 @ abs(p3 - p2)
59 vabd.u8 d5, d21, d22 @ abs(p2 - p1)
60 vabd.u8 d6, d22, d23 @ abs(p1 - p0)
61 vabd.u8 d7, d24, d25 @ abs(q0 - q1)
62 vabd.u8 \tmp1, d25, d26 @ abs(q1 - q2)
63 vabd.u8 \tmp2, d26, d27 @ abs(q2 - q3)
66 vmax.u8 \tmp1, \tmp1, \tmp2
67 vabdl.u8 q3, d23, d24 @ abs(p0 - q0)
69 vadd.u16 q3, q3, q3 @ abs(p0 - q0) * 2
70 vabd.u8 d5, d22, d25 @ abs(p1 - q1)
71 vmax.u8 d4, d4, \tmp1 @ max(abs(p3 - p2), ..., abs(q2 - q3))
73 vcle.u8 d4, d4, d2 @ max(abs()) <= I
74 vaddw.u8 q3, q3, d5 @ abs(p0 - q0) * 2 + abs(p1 - q1) >> 1
82 @ If no pixels need filtering, just exit as soon as possible
88 vabd.u8 d6, d20, d23 @ abs(p3 - p0)
89 vabd.u8 d2, d21, d23 @ abs(p2 - p0)
90 vabd.u8 d1, d22, d23 @ abs(p1 - p0)
91 vabd.u8 \tmp1, d25, d24 @ abs(q1 - q0)
92 vabd.u8 \tmp2, d26, d24 @ abs(q2 - q0)
93 vabd.u8 \tmp3, d27, d24 @ abs(q3 - q0)
96 vmax.u8 \tmp2, \tmp2, \tmp3
98 vabd.u8 d7, d16, d23 @ abs(p7 - p0)
100 vabd.u8 d2, d17, d23 @ abs(p6 - p0)
101 vmax.u8 d6, d6, \tmp2
102 vabd.u8 d1, d18, d23 @ abs(p5 - p0)
103 vcle.u8 d6, d6, d0 @ flat8in
104 vabd.u8 d8, d19, d23 @ abs(p4 - p0)
105 vand d6, d6, d4 @ flat8in && fm
106 vabd.u8 d9, d28, d24 @ abs(q4 - q0)
107 vbic d4, d4, d6 @ fm && !flat8in
108 vabd.u8 d10, d29, d24 @ abs(q5 - q0)
109 vabd.u8 d11, d30, d24 @ abs(q6 - q0)
110 vabd.u8 d12, d31, d24 @ abs(q7 - q0)
115 vmax.u8 d11, d11, d12
116 @ The rest of the calculation of flat8out is interleaved below
118 @ The rest of the calculation of flat8in is interleaved below
122 @ Calculate the normal inner loop filter for 2 or 4 pixels
123 vabd.u8 d5, d22, d23 @ abs(p1 - p0)
130 vabd.u8 d1, d25, d24 @ abs(q1 - q0)
134 vmax.u8 d6, d6, \tmp2
136 vsubl.u8 \tmpq1, d22, d25 @ p1 - q1
137 vmax.u8 d5, d5, d1 @ max(abs(p1 - p0), abs(q1 - q0))
138 vsubl.u8 \tmpq2, d24, d23 @ q0 - p0
141 vcle.u8 d6, d6, d0 @ flat8in
143 vcle.u8 d5, d5, d3 @ !hev
145 vand d6, d6, d4 @ flat8in && fm
147 vqmovn.s16 \tmp1, \tmpq1 @ av_clip_int8(p1 - q1)
149 vcle.u8 d7, d7, d0 @ flat8out
151 vbic d4, d4, d6 @ fm && !flat8in
153 vand d5, d5, d4 @ !hev && fm && !flat8in
155 vand d7, d7, d6 @ flat8out && flat8in && fm
158 vmul.s16 \tmpq2, \tmpq2, \tmpq3 @ 3 * (q0 - p0)
159 vbic \tmp1, \tmp1, d5 @ if (!hev) av_clip_int8 = 0
161 vaddw.s8 \tmpq2, \tmpq2, \tmp1 @ 3 * (q0 - p0) [+ av_clip_int8(p1 - q1)]
163 vqmovn.s16 \tmp1, \tmpq2 @ f
165 vbic d6, d6, d7 @ fm && flat8in && !flat8out
168 vqadd.s8 \tmp3, \tmp1, d2 @ FFMIN(f + 4, 127)
169 vqadd.s8 \tmp4, \tmp1, d3 @ FFMIN(f + 3, 127)
170 vmovl.u8 q0, d23 @ p0
171 vshr.s8 \tmp3, \tmp3, #3 @ f1
172 vshr.s8 \tmp4, \tmp4, #3 @ f2
174 vmovl.u8 q1, d24 @ q0
175 vaddw.s8 q0, q0, \tmp4 @ p0 + f2
176 vsubw.s8 q1, q1, \tmp3 @ q0 - f1
177 vqmovun.s16 d0, q0 @ out p0
178 vqmovun.s16 d1, q1 @ out q0
179 vrshr.s8 \tmp3, \tmp3, #1 @ f = (f1 + 1) >> 1
180 vbit d23, d0, d4 @ if (fm && !flat8in)
183 vmovl.u8 q0, d22 @ p1
184 vmovl.u8 q1, d25 @ q1
188 vaddw.s8 q0, q0, \tmp3 @ p1 + f
189 vsubw.s8 q1, q1, \tmp3 @ q1 - f
193 vqmovun.s16 d0, q0 @ out p1
194 vqmovun.s16 d2, q1 @ out q1
195 vbit d22, d0, d5 @ if (!hev && fm && !flat8in)
199 @ If no pixels need flat8in, jump to flat8out
200 @ (or to a writeout of the inner 4 pixels, for wd=8)
204 vaddl.u8 \tmpq1, d20, d21
205 vaddl.u8 \tmpq2, d22, d25
206 vaddl.u8 \tmpq3, d20, d22
207 vaddl.u8 \tmpq4, d23, d26
208 vadd.u16 q0, \tmpq1, \tmpq1
211 vadd.u16 q0, q0, \tmpq3
212 vsub.s16 \tmpq2, \tmpq2, \tmpq1
213 vsub.s16 \tmpq4, \tmpq4, \tmpq3
214 vrshrn.u16 d2, q0, #3 @ out p2
216 vadd.u16 q0, q0, \tmpq2
217 vaddl.u8 \tmpq1, d20, d23
218 vaddl.u8 \tmpq2, d24, d27
219 vrshrn.u16 d3, q0, #3 @ out p1
221 vadd.u16 q0, q0, \tmpq4
222 vsub.s16 \tmpq2, \tmpq2, \tmpq1
223 vaddl.u8 \tmpq3, d21, d24
224 vaddl.u8 \tmpq4, d25, d27
225 vrshrn.u16 d4, q0, #3 @ out p0
227 vadd.u16 q0, q0, \tmpq2
228 vsub.s16 \tmpq4, \tmpq4, \tmpq3
229 vaddl.u8 \tmpq1, d22, d25
230 vaddl.u8 \tmpq2, d26, d27
231 vrshrn.u16 d5, q0, #3 @ out q0
233 vadd.u16 q0, q0, \tmpq4
234 vsub.s16 \tmpq2, \tmpq2, \tmpq1
235 vrshrn.u16 \tmp5, q0, #3 @ out q1
237 vadd.u16 q0, q0, \tmpq2
238 @ The output here is written back into the input registers. This doesn't
239 @ matter for the flat8out part below, since we only update those pixels
240 @ which won't be touched below.
244 vrshrn.u16 \tmp6, q0, #3 @ out q2
254 @ If no pixels needed flat8in nor flat8out, jump to a
255 @ writeout of the inner 4 pixels
259 @ If no pixels need flat8out, jump to a writeout of the inner 6 pixels
263 @ This writes all outputs into d2-d17 (skipping d6 and d16).
264 @ If this part is skipped, the output is read from d21-d26 (which is the input
266 vshll.u8 q0, d16, #3 @ 8 * d16
267 vsubw.u8 q0, q0, d16 @ 7 * d16
269 vaddl.u8 q4, d17, d18
270 vaddl.u8 q5, d19, d20
272 vaddl.u8 q4, d16, d17
273 vaddl.u8 q6, d21, d22
275 vaddl.u8 q5, d18, d25
276 vaddl.u8 q7, d23, d24
280 vaddl.u8 q6, d16, d18
281 vaddl.u8 q7, d19, d26
282 vrshrn.u16 d2, q0, #4
285 vaddl.u8 q4, d16, d19
286 vaddl.u8 q5, d20, d27
289 vrshrn.u16 d3, q0, #4
292 vaddl.u8 q6, d16, d20
293 vaddl.u8 q7, d21, d28
296 vrshrn.u16 d4, q0, #4
299 vaddl.u8 q4, d16, d21
300 vaddl.u8 q5, d22, d29
303 vrshrn.u16 d5, q0, #4
306 vaddl.u8 q6, d16, d22
307 vaddl.u8 q7, d23, d30
310 vrshrn.u16 d6, q0, #4
313 vaddl.u8 q5, d16, d23
315 vaddl.u8 q6, d24, d31
317 vrshrn.u16 d8, q0, #4
321 vaddl.u8 q6, d17, d24
322 vaddl.u8 q7, d25, d31
324 vrshrn.u16 d9, q0, #4
328 vaddl.u8 q6, d26, d31
330 vrshrn.u16 d10, q0, #4
333 vaddl.u8 q7, d18, d25
334 vaddl.u8 q9, d19, d26
336 vaddl.u8 q7, d27, d31
338 vrshrn.u16 d11, q0, #4
341 vaddl.u8 q6, d20, d27
343 vaddl.u8 q9, d28, d31
346 vrshrn.u16 d12, q0, #4
349 vaddl.u8 q7, d21, d28
350 vaddl.u8 q10, d29, d31
352 vrshrn.u16 d13, q0, #4
355 vsub.s16 q10, q10, q7
356 vaddl.u8 q9, d22, d29
357 vaddl.u8 q11, d30, d31
359 vrshrn.u16 d14, q0, #4
362 vsub.s16 q11, q11, q9
364 vrshrn.u16 d15, q0, #4
368 vrshrn.u16 d17, q0, #4
373 @ For wd <= 8, we use d16-d19 and d28-d31 for temp registers,
374 @ while we need those for inputs/outputs in wd=16 and use d8-d15
375 @ for temp registers there instead.
377 loop_filter 4, d16, d17, d18, d19, d28, d29, d30, d31, q8, q9, q14, q15
381 loop_filter 8, d16, d17, d18, d19, d28, d29, d30, d31, q8, q9, q14, q15
384 .macro loop_filter_16
385 loop_filter 16, d8, d9, d10, d11, d12, d13, d14, d15, q4, q5, q6, q7
389 @ The public functions in this file have got the following signature:
390 @ void loop_filter(uint8_t *dst, ptrdiff_t stride, int mb_lim, int lim, int hev_thr);
392 function ff_vp9_loop_filter_v_4_8_neon, export=1
393 sub r12, r0, r1, lsl #2
394 vld1.8 {d20}, [r12,:64], r1 @ p3
395 vld1.8 {d24}, [r0, :64], r1 @ q0
396 vld1.8 {d21}, [r12,:64], r1 @ p2
397 vld1.8 {d25}, [r0, :64], r1 @ q1
398 vld1.8 {d22}, [r12,:64], r1 @ p1
399 vld1.8 {d26}, [r0, :64], r1 @ q2
400 vld1.8 {d23}, [r12,:64], r1 @ p0
401 vld1.8 {d27}, [r0, :64], r1 @ q3
402 sub r0, r0, r1, lsl #2
403 sub r12, r12, r1, lsl #1
407 vst1.8 {d22}, [r12,:64], r1
408 vst1.8 {d24}, [r0, :64], r1
409 vst1.8 {d23}, [r12,:64], r1
410 vst1.8 {d25}, [r0, :64], r1
415 function ff_vp9_loop_filter_h_4_8_neon, export=1
417 add r0, r12, r1, lsl #2
418 vld1.8 {d20}, [r12], r1
419 vld1.8 {d24}, [r0], r1
420 vld1.8 {d21}, [r12], r1
421 vld1.8 {d25}, [r0], r1
422 vld1.8 {d22}, [r12], r1
423 vld1.8 {d26}, [r0], r1
424 vld1.8 {d23}, [r12], r1
425 vld1.8 {d27}, [r0], r1
427 sub r12, r12, r1, lsl #2
428 sub r0, r0, r1, lsl #2
429 @ Move r0/r12 forward by 2 pixels; we don't need to rewrite the
430 @ outermost 2 pixels since they aren't changed.
434 @ Transpose the 8x8 pixels, taking advantage of q registers, to get
435 @ one register per column.
436 transpose_q_8x8 q10, q11, q12, q13, d20, d21, d22, d23, d24, d25, d26, d27
440 @ We only will write the mid 4 pixels back; after the loop filter,
441 @ these are in d22, d23, d24, d25 (q11, q12), ordered as rows
442 @ (8x4 pixels). We need to transpose them to columns, done with a
443 @ 4x4 transpose (which in practice is two 4x4 transposes of the two
444 @ 4x4 halves of the 8x4 pixels; into 4x8 pixels).
445 transpose_q_4x4 q11, q12, d22, d23, d24, d25
447 vst1.32 {d22[0]}, [r12], r1
448 vst1.32 {d22[1]}, [r0], r1
449 vst1.32 {d23[0]}, [r12], r1
450 vst1.32 {d23[1]}, [r0], r1
451 vst1.32 {d24[0]}, [r12], r1
452 vst1.32 {d24[1]}, [r0], r1
453 vst1.32 {d25[0]}, [r12], r1
454 vst1.32 {d25[1]}, [r0], r1
459 function ff_vp9_loop_filter_v_8_8_neon, export=1
460 sub r12, r0, r1, lsl #2
461 vld1.8 {d20}, [r12,:64], r1 @ p3
462 vld1.8 {d24}, [r0, :64], r1 @ q0
463 vld1.8 {d21}, [r12,:64], r1 @ p2
464 vld1.8 {d25}, [r0, :64], r1 @ q1
465 vld1.8 {d22}, [r12,:64], r1 @ p1
466 vld1.8 {d26}, [r0, :64], r1 @ q2
467 vld1.8 {d23}, [r12,:64], r1 @ p0
468 vld1.8 {d27}, [r0, :64], r1 @ q3
469 sub r12, r12, r1, lsl #2
470 sub r0, r0, r1, lsl #2
475 vst1.8 {d21}, [r12,:64], r1
476 vst1.8 {d24}, [r0, :64], r1
477 vst1.8 {d22}, [r12,:64], r1
478 vst1.8 {d25}, [r0, :64], r1
479 vst1.8 {d23}, [r12,:64], r1
480 vst1.8 {d26}, [r0, :64], r1
484 sub r12, r0, r1, lsl #1
485 vst1.8 {d22}, [r12,:64], r1
486 vst1.8 {d24}, [r0, :64], r1
487 vst1.8 {d23}, [r12,:64], r1
488 vst1.8 {d25}, [r0, :64], r1
492 function ff_vp9_loop_filter_h_8_8_neon, export=1
494 add r0, r12, r1, lsl #2
495 vld1.8 {d20}, [r12], r1
496 vld1.8 {d24}, [r0], r1
497 vld1.8 {d21}, [r12], r1
498 vld1.8 {d25}, [r0], r1
499 vld1.8 {d22}, [r12], r1
500 vld1.8 {d26}, [r0], r1
501 vld1.8 {d23}, [r12], r1
502 vld1.8 {d27}, [r0], r1
504 sub r12, r12, r1, lsl #2
505 sub r0, r0, r1, lsl #2
507 transpose_q_8x8 q10, q11, q12, q13, d20, d21, d22, d23, d24, d25, d26, d27
511 @ Even though only 6 pixels per row have been changed, we write the
512 @ full 8 pixel registers.
513 transpose_q_8x8 q10, q11, q12, q13, d20, d21, d22, d23, d24, d25, d26, d27
515 vst1.8 {d20}, [r12], r1
516 vst1.8 {d24}, [r0], r1
517 vst1.8 {d21}, [r12], r1
518 vst1.8 {d25}, [r0], r1
519 vst1.8 {d22}, [r12], r1
520 vst1.8 {d26}, [r0], r1
521 vst1.8 {d23}, [r12], r1
522 vst1.8 {d27}, [r0], r1
526 @ If we didn't need to do the flat8in part, we use the same writeback
527 @ as in loop_filter_h_4_8.
530 transpose_q_4x4 q11, q12, d22, d23, d24, d25
531 vst1.32 {d22[0]}, [r12], r1
532 vst1.32 {d22[1]}, [r0], r1
533 vst1.32 {d23[0]}, [r12], r1
534 vst1.32 {d23[1]}, [r0], r1
535 vst1.32 {d24[0]}, [r12], r1
536 vst1.32 {d24[1]}, [r0], r1
537 vst1.32 {d25[0]}, [r12], r1
538 vst1.32 {d25[1]}, [r0], r1
542 function vp9_loop_filter_v_16_neon
543 sub r12, r0, r1, lsl #3
544 @ Read p7-p0 using r12 and q0-q7 using r0
545 vld1.8 {d16}, [r12,:64], r1 @ p7
546 vld1.8 {d24}, [r0, :64], r1 @ q0
547 vld1.8 {d17}, [r12,:64], r1 @ p6
548 vld1.8 {d25}, [r0, :64], r1 @ q1
549 vld1.8 {d18}, [r12,:64], r1 @ p5
550 vld1.8 {d26}, [r0, :64], r1 @ q2
551 vld1.8 {d19}, [r12,:64], r1 @ p4
552 vld1.8 {d27}, [r0, :64], r1 @ q3
553 vld1.8 {d20}, [r12,:64], r1 @ p3
554 vld1.8 {d28}, [r0, :64], r1 @ q4
555 vld1.8 {d21}, [r12,:64], r1 @ p2
556 vld1.8 {d29}, [r0, :64], r1 @ q5
557 vld1.8 {d22}, [r12,:64], r1 @ p1
558 vld1.8 {d30}, [r0, :64], r1 @ q6
559 vld1.8 {d23}, [r12,:64], r1 @ p0
560 vld1.8 {d31}, [r0, :64], r1 @ q7
561 sub r12, r12, r1, lsl #3
562 sub r0, r0, r1, lsl #3
567 @ If we did the flat8out part, we get the output in
568 @ d2-d17 (skipping d7 and d16). r12 points to r0 - 7 * stride,
569 @ store d2-d9 there, and d10-d17 into r0.
570 vst1.8 {d2}, [r12,:64], r1
571 vst1.8 {d10}, [r0, :64], r1
572 vst1.8 {d3}, [r12,:64], r1
573 vst1.8 {d11}, [r0, :64], r1
574 vst1.8 {d4}, [r12,:64], r1
575 vst1.8 {d12}, [r0, :64], r1
576 vst1.8 {d5}, [r12,:64], r1
577 vst1.8 {d13}, [r0, :64], r1
578 vst1.8 {d6}, [r12,:64], r1
579 vst1.8 {d14}, [r0, :64], r1
580 vst1.8 {d8}, [r12,:64], r1
581 vst1.8 {d15}, [r0, :64], r1
582 vst1.8 {d9}, [r12,:64], r1
583 vst1.8 {d17}, [r0, :64], r1
584 sub r0, r0, r1, lsl #3
591 add r12, r12, r1, lsl #2
592 @ If we didn't do the flat8out part, the output is left in the
594 vst1.8 {d21}, [r12,:64], r1
595 vst1.8 {d24}, [r0, :64], r1
596 vst1.8 {d22}, [r12,:64], r1
597 vst1.8 {d25}, [r0, :64], r1
598 vst1.8 {d23}, [r12,:64], r1
599 vst1.8 {d26}, [r0, :64], r1
600 sub r0, r0, r1, lsl #1
604 sub r12, r0, r1, lsl #1
605 vst1.8 {d22}, [r12,:64], r1
606 vst1.8 {d24}, [r0, :64], r1
607 vst1.8 {d23}, [r12,:64], r1
608 vst1.8 {d25}, [r0, :64], r1
609 sub r0, r0, r1, lsl #1
613 function ff_vp9_loop_filter_v_16_8_neon, export=1
618 bl vp9_loop_filter_v_16_neon
624 function ff_vp9_loop_filter_v_16_16_neon, export=1
626 // The filter clobbers r2 and r3, but we need to keep them for the second round
630 bl vp9_loop_filter_v_16_neon
634 bl vp9_loop_filter_v_16_neon
640 function vp9_loop_filter_h_16_neon
642 vld1.8 {d16}, [r12,:64], r1
643 vld1.8 {d24}, [r0, :64], r1
644 vld1.8 {d17}, [r12,:64], r1
645 vld1.8 {d25}, [r0, :64], r1
646 vld1.8 {d18}, [r12,:64], r1
647 vld1.8 {d26}, [r0, :64], r1
648 vld1.8 {d19}, [r12,:64], r1
649 vld1.8 {d27}, [r0, :64], r1
650 vld1.8 {d20}, [r12,:64], r1
651 vld1.8 {d28}, [r0, :64], r1
652 vld1.8 {d21}, [r12,:64], r1
653 vld1.8 {d29}, [r0, :64], r1
654 vld1.8 {d22}, [r12,:64], r1
655 vld1.8 {d30}, [r0, :64], r1
656 vld1.8 {d23}, [r12,:64], r1
657 vld1.8 {d31}, [r0, :64], r1
658 sub r0, r0, r1, lsl #3
659 sub r12, r12, r1, lsl #3
661 @ The 16x8 pixels read above is in two 8x8 blocks; the left
662 @ half in d16-d23, and the right half in d24-d31. Do two 8x8 transposes
663 @ of this, to get one column per register. This could be done with two
664 @ transpose_8x8 as below, but this takes advantage of the q registers.
665 transpose16_4x4 q8, q9, q10, q11, q12, q13, q14, q15
677 @ Transpose back; this is the same transpose as above, but
678 @ we can't take advantage of q registers for the transpose, since
679 @ all d registers in the transpose aren't consecutive.
680 transpose_8x8 d16, d2, d3, d4, d5, d6, d8, d9
681 transpose_8x8 d10, d11, d12, d13, d14, d15, d17, d31
683 vst1.8 {d16}, [r12,:64], r1
684 vst1.8 {d10}, [r0, :64], r1
686 vst1.8 {d2}, [r12,:64], r1
687 vst1.8 {d11}, [r0, :64], r1
689 vst1.8 {d3}, [r12,:64], r1
690 vst1.8 {d12}, [r0, :64], r1
692 vst1.8 {d4}, [r12,:64], r1
693 vst1.8 {d13}, [r0, :64], r1
695 vst1.8 {d5}, [r12,:64], r1
696 vst1.8 {d14}, [r0, :64], r1
698 vst1.8 {d6}, [r12,:64], r1
699 vst1.8 {d15}, [r0, :64], r1
701 vst1.8 {d8}, [r12,:64], r1
702 vst1.8 {d17}, [r0, :64], r1
704 vst1.8 {d9}, [r12,:64], r1
705 vst1.8 {d31}, [r0, :64], r1
706 sub r0, r0, r1, lsl #3
710 @ The same writeback as in loop_filter_h_8_8
712 add r0, r12, r1, lsl #2
713 transpose_q_8x8 q10, q11, q12, q13, d20, d21, d22, d23, d24, d25, d26, d27
715 vst1.8 {d20}, [r12], r1
716 vst1.8 {d24}, [r0], r1
717 vst1.8 {d21}, [r12], r1
718 vst1.8 {d25}, [r0], r1
719 vst1.8 {d22}, [r12], r1
720 vst1.8 {d26}, [r0], r1
721 vst1.8 {d23}, [r12], r1
722 vst1.8 {d27}, [r0], r1
723 sub r0, r0, r1, lsl #3
727 @ The same writeback as in loop_filter_h_4_8
729 add r0, r12, r1, lsl #2
730 transpose_q_4x4 q11, q12, d22, d23, d24, d25
731 vst1.32 {d22[0]}, [r12], r1
732 vst1.32 {d22[1]}, [r0], r1
733 vst1.32 {d23[0]}, [r12], r1
734 vst1.32 {d23[1]}, [r0], r1
735 vst1.32 {d24[0]}, [r12], r1
736 vst1.32 {d24[1]}, [r0], r1
737 vst1.32 {d25[0]}, [r12], r1
738 vst1.32 {d25[1]}, [r0], r1
739 sub r0, r0, r1, lsl #3
744 function ff_vp9_loop_filter_h_16_8_neon, export=1
749 bl vp9_loop_filter_h_16_neon
755 function ff_vp9_loop_filter_h_16_16_neon, export=1
757 // The filter clobbers r2 and r3, but we need to keep them for the second round
761 bl vp9_loop_filter_h_16_neon
762 add r0, r0, r1, lsl #3
765 bl vp9_loop_filter_h_16_neon