arm64: atomics: Use K constraint when toolchain appears to support it
The 'K' constraint is a documented AArch64 machine constraint supported by GCC for matching integer constants that can be used with a 32-bit logical instruction. Unfortunately, some released compilers erroneously accept the immediate '4294967295' for this constraint, which is later refused by GAS at assembly time. This had led us to avoid the use of the 'K' constraint altogether. Instead, detect whether the compiler is up to the job when building the kernel and pass the 'K' constraint to our 32-bit atomic macros when it appears to be supported. Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
Родитель
5aad6cdabb
Коммит
03adcbd996
|
@ -39,6 +39,12 @@ $(warning LSE atomics not supported by binutils)
|
|||
endif
|
||||
endif
|
||||
|
||||
cc_has_k_constraint := $(call try-run,echo \
|
||||
'int main(void) { \
|
||||
asm volatile("and w0, w0, %w0" :: "K" (4294967295)); \
|
||||
return 0; \
|
||||
}' | $(CC) -S -x c -o "$$TMP" -,,-DCONFIG_CC_HAS_K_CONSTRAINT=1)
|
||||
|
||||
ifeq ($(CONFIG_ARM64), y)
|
||||
brokengasinst := $(call as-instr,1:\n.inst 0\n.rept . - 1b\n\nnop\n.endr\n,,-DCONFIG_BROKEN_GAS_INST=1)
|
||||
|
||||
|
@ -63,7 +69,8 @@ ifeq ($(CONFIG_GENERIC_COMPAT_VDSO), y)
|
|||
endif
|
||||
endif
|
||||
|
||||
KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst) $(compat_vdso)
|
||||
KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst) \
|
||||
$(compat_vdso) $(cc_has_k_constraint)
|
||||
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
|
||||
KBUILD_CFLAGS += $(call cc-disable-warning, psabi)
|
||||
KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) $(compat_vdso)
|
||||
|
|
|
@ -10,6 +10,8 @@
|
|||
#ifndef __ASM_ATOMIC_LL_SC_H
|
||||
#define __ASM_ATOMIC_LL_SC_H
|
||||
|
||||
#include <linux/stringify.h>
|
||||
|
||||
#if IS_ENABLED(CONFIG_ARM64_LSE_ATOMICS) && IS_ENABLED(CONFIG_AS_LSE)
|
||||
#define __LL_SC_FALLBACK(asm_ops) \
|
||||
" b 3f\n" \
|
||||
|
@ -23,6 +25,10 @@ asm_ops "\n" \
|
|||
#define __LL_SC_FALLBACK(asm_ops) asm_ops
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_CC_HAS_K_CONSTRAINT
|
||||
#define K
|
||||
#endif
|
||||
|
||||
/*
|
||||
* AArch64 UP and SMP safe atomic ops. We use load exclusive and
|
||||
* store exclusive to ensure that these are atomic. We may loop
|
||||
|
@ -44,7 +50,7 @@ __ll_sc_atomic_##op(int i, atomic_t *v) \
|
|||
" stxr %w1, %w0, %2\n" \
|
||||
" cbnz %w1, 1b\n") \
|
||||
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
||||
: #constraint "r" (i)); \
|
||||
: __stringify(constraint) "r" (i)); \
|
||||
}
|
||||
|
||||
#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
|
||||
|
@ -63,7 +69,7 @@ __ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \
|
|||
" cbnz %w1, 1b\n" \
|
||||
" " #mb ) \
|
||||
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
||||
: #constraint "r" (i) \
|
||||
: __stringify(constraint) "r" (i) \
|
||||
: cl); \
|
||||
\
|
||||
return result; \
|
||||
|
@ -85,7 +91,7 @@ __ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \
|
|||
" cbnz %w2, 1b\n" \
|
||||
" " #mb ) \
|
||||
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
|
||||
: #constraint "r" (i) \
|
||||
: __stringify(constraint) "r" (i) \
|
||||
: cl); \
|
||||
\
|
||||
return result; \
|
||||
|
@ -113,10 +119,15 @@ ATOMIC_OPS(sub, sub, J)
|
|||
ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
|
||||
ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
|
||||
|
||||
ATOMIC_OPS(and, and, )
|
||||
ATOMIC_OPS(and, and, K)
|
||||
ATOMIC_OPS(or, orr, K)
|
||||
ATOMIC_OPS(xor, eor, K)
|
||||
/*
|
||||
* GAS converts the mysterious and undocumented BIC (immediate) alias to
|
||||
* an AND (immediate) instruction with the immediate inverted. We don't
|
||||
* have a constraint for this, so fall back to register.
|
||||
*/
|
||||
ATOMIC_OPS(andnot, bic, )
|
||||
ATOMIC_OPS(or, orr, )
|
||||
ATOMIC_OPS(xor, eor, )
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
#undef ATOMIC_FETCH_OP
|
||||
|
@ -138,7 +149,7 @@ __ll_sc_atomic64_##op(s64 i, atomic64_t *v) \
|
|||
" stxr %w1, %0, %2\n" \
|
||||
" cbnz %w1, 1b") \
|
||||
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
||||
: #constraint "r" (i)); \
|
||||
: __stringify(constraint) "r" (i)); \
|
||||
}
|
||||
|
||||
#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
|
||||
|
@ -157,7 +168,7 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \
|
|||
" cbnz %w1, 1b\n" \
|
||||
" " #mb ) \
|
||||
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
||||
: #constraint "r" (i) \
|
||||
: __stringify(constraint) "r" (i) \
|
||||
: cl); \
|
||||
\
|
||||
return result; \
|
||||
|
@ -179,7 +190,7 @@ __ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \
|
|||
" cbnz %w2, 1b\n" \
|
||||
" " #mb ) \
|
||||
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
|
||||
: #constraint "r" (i) \
|
||||
: __stringify(constraint) "r" (i) \
|
||||
: cl); \
|
||||
\
|
||||
return result; \
|
||||
|
@ -208,9 +219,14 @@ ATOMIC64_OPS(sub, sub, J)
|
|||
ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
|
||||
|
||||
ATOMIC64_OPS(and, and, L)
|
||||
ATOMIC64_OPS(andnot, bic, )
|
||||
ATOMIC64_OPS(or, orr, L)
|
||||
ATOMIC64_OPS(xor, eor, L)
|
||||
/*
|
||||
* GAS converts the mysterious and undocumented BIC (immediate) alias to
|
||||
* an AND (immediate) instruction with the immediate inverted. We don't
|
||||
* have a constraint for this, so fall back to register.
|
||||
*/
|
||||
ATOMIC64_OPS(andnot, bic, )
|
||||
|
||||
#undef ATOMIC64_OPS
|
||||
#undef ATOMIC64_FETCH_OP
|
||||
|
@ -269,7 +285,7 @@ __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \
|
|||
"2:") \
|
||||
: [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
|
||||
[v] "+Q" (*(u##sz *)ptr) \
|
||||
: [old] #constraint "r" (old), [new] "r" (new) \
|
||||
: [old] __stringify(constraint) "r" (old), [new] "r" (new) \
|
||||
: cl); \
|
||||
\
|
||||
return oldval; \
|
||||
|
@ -280,21 +296,21 @@ __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \
|
|||
* handle the 'K' constraint for the value 4294967295 - thus we use no
|
||||
* constraint for 32 bit operations.
|
||||
*/
|
||||
__CMPXCHG_CASE(w, b, , 8, , , , , )
|
||||
__CMPXCHG_CASE(w, h, , 16, , , , , )
|
||||
__CMPXCHG_CASE(w, , , 32, , , , , )
|
||||
__CMPXCHG_CASE(w, b, , 8, , , , , K)
|
||||
__CMPXCHG_CASE(w, h, , 16, , , , , K)
|
||||
__CMPXCHG_CASE(w, , , 32, , , , , K)
|
||||
__CMPXCHG_CASE( , , , 64, , , , , L)
|
||||
__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory", )
|
||||
__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory", )
|
||||
__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory", )
|
||||
__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory", K)
|
||||
__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory", K)
|
||||
__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory", K)
|
||||
__CMPXCHG_CASE( , , acq_, 64, , a, , "memory", L)
|
||||
__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory", )
|
||||
__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory", )
|
||||
__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory", )
|
||||
__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory", K)
|
||||
__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory", K)
|
||||
__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory", K)
|
||||
__CMPXCHG_CASE( , , rel_, 64, , , l, "memory", L)
|
||||
__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory", )
|
||||
__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory", )
|
||||
__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory", )
|
||||
__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory", K)
|
||||
__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory", K)
|
||||
__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory", K)
|
||||
__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L)
|
||||
|
||||
#undef __CMPXCHG_CASE
|
||||
|
@ -332,5 +348,6 @@ __CMPXCHG_DBL( , , , )
|
|||
__CMPXCHG_DBL(_mb, dmb ish, l, "memory")
|
||||
|
||||
#undef __CMPXCHG_DBL
|
||||
#undef K
|
||||
|
||||
#endif /* __ASM_ATOMIC_LL_SC_H */
|
||||
|
|
Загрузка…
Ссылка в новой задаче