swscale/arm/yuv2rgb: simplify process_16px_* macro call
[ffmpeg.git] / libswscale / arm / yuv2rgb_neon.S
1 /*
2  * Copyright (c) 2015 Matthieu Bouron <matthieu.bouron stupeflix.com>
3  * Copyright (c) 2015 Clément Bœsch <clement stupeflix.com>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "libavutil/arm/asm.S"
23
24
25 .macro compute_premult_16 half_u1, half_u2, half_v1, half_v2
26     vmov                d2, \half_u1                                   @ copy left q14 to left q1
27     vmov                d3, \half_u1                                   @ copy left q14 to right q1
28     vmov                d4, \half_u2                                   @ copy right q14 to left q2
29     vmov                d5, \half_u2                                   @ copy right q14 to right q2
30
31     vmov                d6, \half_v1                                   @ copy left q15 to left q3
32     vmov                d7, \half_v1                                   @ copy left q15 to right q3
33     vmov                d8, \half_v2                                   @ copy right q15 to left q4
34     vmov                d9, \half_v2                                   @ copy right q15 to right q4
35
36     vzip.16             d2, d3                                         @ U1U1U2U2U3U3U4U4
37     vzip.16             d4, d5                                         @ U5U5U6U6U7U7U8U8
38
39     vzip.16             d6, d7                                         @ V1V1V2V2V3V3V4V4
40     vzip.16             d8, d9                                         @ V5V5V6V6V7V7V8V8
41
42     vmul.s16            q8,  q3, d1[0]                                 @  V * v2r             (left,  red)
43     vmul.s16            q9,  q4, d1[0]                                 @  V * v2r             (right, red)
44     vmul.s16            q10, q1, d1[1]                                 @  U * u2g
45     vmul.s16            q11, q2, d1[1]                                 @  U * u2g
46     vmla.s16            q10, q3, d1[2]                                 @  U * u2g + V * v2g   (left,  green)
47     vmla.s16            q11, q4, d1[2]                                 @  U * u2g + V * v2g   (right, green)
48     vmul.s16            q12, q1, d1[3]                                 @  U * u2b             (left,  blue)
49     vmul.s16            q13, q2, d1[3]                                 @  U * u2b             (right, blue)
50 .endm
51
52 .macro compute_premult_32 half_u half_v
53     vmov                d2, \half_u                                    @ copy left q14 to left q1
54     vmov                d3, \half_u                                    @ copy left q14 to right q1
55     vmov                d4, \half_v                                    @ copy left q15 to left q2
56     vmov                d5, \half_v                                    @ copy left q15 to right q2
57
58     vzip.16             d2, d3                                         @ U1U1U2U2U3U3U4U4
59     vzip.16             d4, d5                                         @ V1V1V2V2V3V3V4V4
60
61     vmull.s16           q8,  d4, d1[0]                                 @  V * v2r             (left,  red)
62     vmull.s16           q9,  d5, d1[0]                                 @  V * v2r             (right, red)
63     vmull.s16           q10, d2, d1[1]                                 @  U * u2g
64     vmull.s16           q11, d3, d1[1]                                 @  U * u2g
65     vmlal.s16           q10, d4, d1[2]                                 @  U * u2g + V * v2g   (left,  green)
66     vmlal.s16           q11, d5, d1[2]                                 @  U * u2g + V * v2g   (right, green)
67     vmull.s16           q12, d2, d1[3]                                 @  U * u2b             (left,  blue)
68     vmull.s16           q13, d3, d1[3]                                 @  U * u2b             (right, blue)
69 .endm
70
71 .macro compute_color_16 dst_comp1 dst_comp2 pre1 pre2
72     vadd.s16            q1, q14, \pre1
73     vadd.s16            q2, q15, \pre2
74     vqrshrun.s16        \dst_comp1, q1, #6
75     vqrshrun.s16        \dst_comp2, q2, #6
76 .endm
77
78 .macro compute_color_32 dst_comp pre1 pre2
79     vadd.s32            q3, q1, \pre1
80     vadd.s32            q4, q2, \pre2
81     vqrshrun.s32        d10, q3, #13
82     vqrshrun.s32        d11, q4, #13                                   @ q5 = ({q3,q4} + (1<<12)) >> 13
83     vqmovn.u16          \dst_comp, q5                                  @ saturate 16bit -> 8bit
84 .endm
85
86 .macro compute_rgba_16 r1 r2 g1 g2 b1 b2 a1 a2
87     compute_color_16    \r1, \r2, q8,  q9
88     compute_color_16    \g1, \g2, q10, q11
89     compute_color_16    \b1, \b2, q12, q13
90     vmov.u8             \a1, #255
91     vmov.u8             \a2, #255
92 .endm
93
94 .macro compute_rgba_32 r g b a
95     compute_color_32    \r, q8,  q9
96     compute_color_32    \g, q10, q11
97     compute_color_32    \b, q12, q13
98     vmov.u8             \a, #255
99 .endm
100
101 .macro compute_16px_16 dst y0 y1 ofmt
102     vmovl.u8            q14, \y0                                       @ 8px of y
103     vmovl.u8            q15, \y1                                       @ 8px of y
104
105     vdup.16             q5, r9                                         @ q5  = y_offset
106     vdup.16             q7, r10                                        @ q7  = y_coeff
107
108     vsub.s16            q14, q5
109     vsub.s16            q15, q5
110
111     vmul.s16            q14, q7                                        @ q14 = (srcY - y_offset) * y_coeff (left)
112     vmul.s16            q15, q7                                        @ q15 = (srcY - y_offset) * y_coeff (right)
113
114
115 .ifc \ofmt,argb
116     compute_rgba_16     d7, d11, d8, d12, d9, d13, d6, d10
117 .endif
118
119 .ifc \ofmt,rgba
120     compute_rgba_16     d6, d10, d7, d11, d8, d12, d9, d13
121 .endif
122
123 .ifc \ofmt,abgr
124     compute_rgba_16     d9, d13, d8, d12, d7, d11, d6, d10
125 .endif
126
127 .ifc \ofmt,bgra
128     compute_rgba_16     d8, d12, d7, d11, d6, d10, d9, d13
129 .endif
130     vst4.8              {q3, q4}, [\dst,:128]!
131     vst4.8              {q5, q6}, [\dst,:128]!
132
133 .endm
134
135 .macro compute_8px_32 dst half_y ofmt
136     vmovl.u8            q7, \half_y                                    @ 8px of Y
137     vdup.16             q5, r9
138     vsub.s16            q7, q5
139     vmull.s16           q1, d14, d0                                    @ q1 = (srcY - y_offset) * y_coeff (left)
140     vmull.s16           q2, d15, d0                                    @ q2 = (srcY - y_offset) * y_coeff (right)
141
142 .ifc \ofmt,argb
143     compute_rgba_32     d13, d14, d15, d12
144 .endif
145
146 .ifc \ofmt,rgba
147     compute_rgba_32     d12, d13, d14, d15
148 .endif
149
150 .ifc \ofmt,abgr
151     compute_rgba_32     d15, d14, d13, d12
152 .endif
153
154 .ifc \ofmt,bgra
155     compute_rgba_32     d14, d13, d12, d15
156 .endif
157
158     vst4.8              {q6, q7}, [\dst,:128]!
159 .endm
160
161 .macro process_16px_16 ofmt
162     compute_premult_16  d28, d29, d30, d31
163
164     vld1.8              {q7}, [r4]!                                    @ first line of luma
165     compute_16px_16     r2, d14, d15, \ofmt
166
167     vld1.8              {q7}, [r12]!                                   @ second line of luma
168     compute_16px_16     r11, d14, d15, \ofmt
169 .endm
170
171 .macro process_16px_32 ofmt
172     compute_premult_32  d28, d30
173
174     vld1.8              {q7}, [r4]!                                    @ first line of luma
175     vmov                d28, d15                                       @ save right of the first line of luma for later use
176     compute_8px_32      r2, d14, \ofmt
177
178     vld1.8              {q7}, [r12]!                                   @ second line of luma
179     vmov                d30, d15                                       @ save right of the second line of luma for later use
180     compute_8px_32      r11, d14, \ofmt
181
182     compute_premult_32  d29, d31
183     compute_8px_32      r2,  d28, \ofmt
184     compute_8px_32      r11, d30, \ofmt
185 .endm
186
187 .macro load_args
188     push                {r4-r12, lr}
189     vpush               {q4-q7}
190     ldr                 r4, [sp, #104]                                 @ r4  = srcY
191     ldr                 r5, [sp, #108]                                 @ r5  = linesizeY
192     ldr                 r6, [sp, #112]                                 @ r6  = srcC
193     ldr                 r7, [sp, #116]                                 @ r7  = linesizeC
194     ldr                 r8, [sp, #120]                                 @ r8  = table
195     ldr                 r9, [sp, #124]                                 @ r9  = y_offset
196     ldr                 r10,[sp, #128]                                 @ r10 = y_coeff
197     vdup.16             d0, r10                                        @ d0  = y_coeff
198     vld1.16             {d1}, [r8]                                     @ d1  = *table
199     add                 r11, r2, r3                                    @ r11 = dst + linesize (dst2)
200     add                 r12, r4, r5                                    @ r12 = srcY + linesizeY (srcY2)
201     lsl                 r3, r3, #1
202     lsl                 r5, r5, #1
203     lsl                 r8, r0, #2
204     sub                 r3, r3, r8                                     @ r3 = linesize  * 2 - width * 4 (padding)
205     sub                 r5, r5, r0                                     @ r5 = linesizeY * 2 - width     (paddingY)
206     sub                 r7, r7, r0                                     @ r7 = linesizeC     - width     (paddingC)
207 .endm
208
209 .macro declare_func ifmt ofmt precision
210 function ff_\ifmt\()_to_\ofmt\()_neon_\precision\(), export=1
211     load_args
212 1:
213     mov                 r8, r0                                         @ r8 = width
214 2:
215     pld [r6, #64*3]
216     pld [r4, #64*3]
217     pld [r12, #64*3]
218
219     vld2.8              {d2, d3}, [r6]!                                @ q1: interleaved chroma line
220     vmov.i8             d10, #128
221 .ifc \ifmt,nv12
222     vsubl.u8            q14, d2, d10                                   @ q14 = U - 128
223     vsubl.u8            q15, d3, d10                                   @ q15 = V - 128
224 .else
225     vsubl.u8            q14, d3, d10                                   @ q14 = U - 128
226     vsubl.u8            q15, d2, d10                                   @ q15 = V - 128
227 .endif
228
229     process_16px_\precision \ofmt
230
231     subs                r8, r8, #16                                    @ width -= 16
232     bgt                 2b
233
234     add                 r2, r2, r3                                     @ dst   += padding
235     add                 r4, r4, r5                                     @ srcY  += paddingY
236     add                 r11, r11, r3                                   @ dst2  += padding
237     add                 r12, r12, r5                                   @ srcY2 += paddingY
238     add                 r6, r6, r7                                     @ srcC  += paddingC
239
240     subs                r1, r1, #2                                     @ height -= 2
241     bgt                 1b
242
243     vpop                {q4-q7}
244     pop                 {r4-r12, lr}
245     mov                 pc, lr
246 endfunc
247 .endm
248
249 .macro declare_rgb_funcs ifmt precision
250     declare_func \ifmt, argb, \precision
251     declare_func \ifmt, rgba, \precision
252     declare_func \ifmt, abgr, \precision
253     declare_func \ifmt, bgra, \precision
254 .endm
255
256 declare_rgb_funcs nv12, 16
257 declare_rgb_funcs nv21, 16
258 declare_rgb_funcs nv12, 32
259 declare_rgb_funcs nv21, 32