This creates proper position independent code when accessing
data symbols if CONFIG_PIC is set.
References to external symbols should now use the movrelx macro.
Some additional code changes are required since this macro may
need a register to hold the GOT pointer.
Signed-off-by: Mans Rullgard <mans@mansr.com>
beq 4f
push {r4-r11,lr}
add r5, sp, #40
beq 4f
push {r4-r11,lr}
add r5, sp, #40
- movrel r4, X(ff_ac3_bin_to_band_tab)
- movrel lr, X(ff_ac3_band_start_tab)
+ movrelx r4, X(ff_ac3_bin_to_band_tab), r11
+ movrelx lr, X(ff_ac3_band_start_tab)
ldm r5, {r5-r7}
ldrb r4, [r4, r2]
add r1, r1, r2, lsl #1 @ psd + start
ldm r5, {r5-r7}
ldrb r4, [r4, r2]
add r1, r1, r2, lsl #1 @ psd + start
bl fft\n4\()_neon
mov r0, r4
pop {r4, lr}
bl fft\n4\()_neon
mov r0, r4
pop {r4, lr}
- movrel r1, X(ff_cos_\n\()_fixed)
+ movrelx r1, X(ff_cos_\n\()_fixed)
mov r2, #\n4/2
b fft_pass_neon
endfunc
mov r2, #\n4/2
b fft_pass_neon
endfunc
vswp d29, d30 @ q14{r12,i12,i14,r15} q15{r13,i13,i15,r14}
vadd.f32 q0, q12, q13 @ {t1,t2,t5,t6}
vadd.f32 q1, q14, q15 @ {t1a,t2a,t5a,t6a}
vswp d29, d30 @ q14{r12,i12,i14,r15} q15{r13,i13,i15,r14}
vadd.f32 q0, q12, q13 @ {t1,t2,t5,t6}
vadd.f32 q1, q14, q15 @ {t1a,t2a,t5a,t6a}
- movrel r2, X(ff_cos_16)
+ movrelx r2, X(ff_cos_16)
vsub.f32 q13, q12, q13 @ {t3,t4,t7,t8}
vrev64.32 d1, d1
vsub.f32 q15, q14, q15 @ {t3a,t4a,t7a,t8a}
vsub.f32 q13, q12, q13 @ {t3,t4,t7,t8}
vrev64.32 d1, d1
vsub.f32 q15, q14, q15 @ {t3a,t4a,t7a,t8a}
bl fft\n4\()_neon
mov r0, r4
pop {r4, lr}
bl fft\n4\()_neon
mov r0, r4
pop {r4, lr}
- movrel r1, X(ff_cos_\n)
+ movrelx r1, X(ff_cos_\n)
mov r2, #\n4/2
b fft_pass_neon
endfunc
mov r2, #\n4/2
b fft_pass_neon
endfunc
vmov.i32 d3, #0
.Lhf_apply_noise_0:
push {r4,lr}
vmov.i32 d3, #0
.Lhf_apply_noise_0:
push {r4,lr}
+ movrelx r4, X(ff_sbr_noise_table)
- movrel r4, X(ff_sbr_noise_table)
add r3, r3, #1
bfc r3, #9, #23
sub r12, r12, #1
add r3, r3, #1
bfc r3, #9, #23
sub r12, r12, #1
eor lr, r12, #1<<31
vmov d3, r12, lr
.Lhf_apply_noise_1:
eor lr, r12, #1<<31
vmov d3, r12, lr
.Lhf_apply_noise_1:
+ movrelx r4, X(ff_sbr_noise_table)
- movrel r4, X(ff_sbr_noise_table)
add r3, r3, #1
bfc r3, #9, #23
sub r12, r12, #1
add r3, r3, #1
bfc r3, #9, #23
sub r12, r12, #1
vadd.s16 q1, q8, q12
vsub.s16 q8, q8, q12
vld1.64 {d28-d31}, [r2,:128]!
vadd.s16 q1, q8, q12
vsub.s16 q8, q8, q12
vld1.64 {d28-d31}, [r2,:128]!
-function vp3_idct_core_neon
vmull.s16 q2, d18, xC1S7 // (ip[1] * C1) << 16
vmull.s16 q3, d19, xC1S7
vmull.s16 q4, d2, xC4S4 // ((ip[0] + ip[4]) * C4) << 16
vmull.s16 q2, d18, xC1S7 // (ip[1] * C1) << 16
vmull.s16 q3, d19, xC1S7
vmull.s16 q4, d2, xC4S4 // ((ip[0] + ip[4]) * C4) << 16
function ff_decode_block_coeffs_armv6, export=1
push {r0,r1,r4-r11,lr}
function ff_decode_block_coeffs_armv6, export=1
push {r0,r1,r4-r11,lr}
- movrel lr, X(ff_vp56_norm_shift)
+ movrelx lr, X(ff_vp56_norm_shift)
ldrd r4, r5, [sp, #44] @ token_prob, qmul
cmp r3, #0
ldr r11, [r5]
ldrd r4, r5, [sp, #44] @ token_prob, qmul
cmp r3, #0
ldr r11, [r5]
mov r9, #8
it ge
addge r12, r12, #1
mov r9, #8
it ge
addge r12, r12, #1
- movrel r4, X(ff_vp8_dct_cat_prob)
+ movrelx r4, X(ff_vp8_dct_cat_prob), r1
lsl r9, r9, r12
ldr r4, [r4, r12, lsl #2]
add r12, r9, #3
lsl r9, r9, r12
ldr r4, [r4, r12, lsl #2]
add r12, r9, #3
.endm
.macro function name, export=0
.endm
.macro function name, export=0
+ .set .Lpic_idx, 0
+ .set .Lpic_gp, 0
+ .if .Lpic_idx
+ .altmacro
+ put_pic %(.Lpic_idx - 1)
+ .noaltmacro
+ .endif
ELF .size \name, . - \name
.endfunc
.purgem endfunc
ELF .size \name, . - \name
.endfunc
.purgem endfunc
+.macro put_pic num
+ put_pic_\num
+.endm
+
+.macro do_def_pic num, val, label
+ .macro put_pic_\num
+ .if \num
+ .altmacro
+ put_pic %(\num - 1)
+ .noaltmacro
+ .endif
+\label: .word \val
+ .purgem put_pic_\num
+ .endm
+.endm
+
+.macro def_pic val, label
+ .altmacro
+ do_def_pic %.Lpic_idx, \val, \label
+ .noaltmacro
+ .set .Lpic_idx, .Lpic_idx + 1
+.endm
+
+.macro ldpic rd, val, indir=0
+ ldr \rd, .Lpicoff\@
+.Lpic\@:
+ .if \indir
+ ldr \rd, [pc, \rd]
+ .else
+ add \rd, pc, \rd
+ .endif
+ def_pic \val - (.Lpic\@ + (8 >> CONFIG_THUMB)), .Lpicoff\@
+.endm
+
-#if HAVE_ARMV6T2 && !CONFIG_PIC && !defined(__APPLE__)
+#if CONFIG_PIC
+ ldpic \rd, \val
+#elif HAVE_ARMV6T2 && !defined(__APPLE__)
movw \rd, #:lower16:\val
movt \rd, #:upper16:\val
#else
movw \rd, #:lower16:\val
movt \rd, #:upper16:\val
#else
+.macro movrelx rd, val, gp
+#if CONFIG_PIC && defined(__ELF__)
+ .ifnb \gp
+ .if .Lpic_gp
+ .unreq gp
+ .endif
+ gp .req \gp
+ ldpic gp, _GLOBAL_OFFSET_TABLE_
+ .elseif !.Lpic_gp
+ gp .req r12
+ ldpic gp, _GLOBAL_OFFSET_TABLE_
+ .endif
+ .set .Lpic_gp, 1
+ ldr \rd, .Lpicoff\@
+ ldr \rd, [gp, \rd]
+ def_pic \val(GOT), .Lpicoff\@
+#elif CONFIG_PIC && defined(__APPLE__)
+ ldpic \rd, .Lpic\@, indir=1
+ .non_lazy_symbol_pointer
+.Lpic\@:
+ .indirect_symbol \val
+ .word 0
+ .text
+#else
+ movrel \rd, \val
+#endif
+.endm
+
.macro ldr_pre rt, rn, rm:vararg
A ldr \rt, [\rn, \rm]!
T add \rn, \rn, \rm
.macro ldr_pre rt, rn, rm:vararg
A ldr \rt, [\rn, \rm]!
T add \rn, \rn, \rm