locking/atomic: arm: fix sync ops

The sync_*() ops on arch/arm are defined in terms of the regular bitops
with no special handling. This is not correct, as UP kernels elide
barriers for the fully-ordered operations, and so the required ordering
is lost when such UP kernels are run under a hypervsior on an SMP
system.

Fix this by defining sync ops with the required barriers.

Note: On 32-bit arm, the sync_*() ops are currently only used by Xen,
which requires ARMv7, but the semantics can be implemented for ARMv6+.

Fixes: e54d2f6152 ("xen/arm: sync_bitops")
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Kees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20230605070124.3741859-2-mark.rutland@arm.com
This commit is contained in:
Mark Rutland 2023-06-05 08:00:58 +01:00 committed by Peter Zijlstra
parent 497cc42bf5
commit dda5f312bb
6 changed files with 65 additions and 7 deletions

View File

@ -394,6 +394,23 @@ ALT_UP_B(.L0_\@)
#endif
.endm
/*
* Raw SMP data memory barrier
*/
.macro __smp_dmb mode
#if __LINUX_ARM_ARCH__ >= 7
.ifeqs "\mode","arm"
dmb ish
.else
W(dmb) ish
.endif
#elif __LINUX_ARM_ARCH__ == 6
mcr p15, 0, r0, c7, c10, 5 @ dmb
#else
.error "Incompatible SMP platform"
#endif
.endm
#if defined(CONFIG_CPU_V7M)
/*
* setmode is used to assert to be in svc mode during boot. For v7-M

View File

@ -14,14 +14,35 @@
* ops which are SMP safe even on a UP kernel.
*/
/*
* Unordered
*/
#define sync_set_bit(nr, p) _set_bit(nr, p)
#define sync_clear_bit(nr, p) _clear_bit(nr, p)
#define sync_change_bit(nr, p) _change_bit(nr, p)
#define sync_test_and_set_bit(nr, p) _test_and_set_bit(nr, p)
#define sync_test_and_clear_bit(nr, p) _test_and_clear_bit(nr, p)
#define sync_test_and_change_bit(nr, p) _test_and_change_bit(nr, p)
#define sync_test_bit(nr, addr) test_bit(nr, addr)
#define arch_sync_cmpxchg arch_cmpxchg
/*
* Fully ordered
*/
int _sync_test_and_set_bit(int nr, volatile unsigned long * p);
#define sync_test_and_set_bit(nr, p) _sync_test_and_set_bit(nr, p)
int _sync_test_and_clear_bit(int nr, volatile unsigned long * p);
#define sync_test_and_clear_bit(nr, p) _sync_test_and_clear_bit(nr, p)
int _sync_test_and_change_bit(int nr, volatile unsigned long * p);
#define sync_test_and_change_bit(nr, p) _sync_test_and_change_bit(nr, p)
#define arch_sync_cmpxchg(ptr, old, new) \
({ \
__typeof__(*(ptr)) __ret; \
__smp_mb__before_atomic(); \
__ret = arch_cmpxchg_relaxed((ptr), (old), (new)); \
__smp_mb__after_atomic(); \
__ret; \
})
#endif

View File

@ -28,7 +28,7 @@ UNWIND( .fnend )
ENDPROC(\name )
.endm
.macro testop, name, instr, store
.macro __testop, name, instr, store, barrier
ENTRY( \name )
UNWIND( .fnstart )
ands ip, r1, #3
@ -38,7 +38,7 @@ UNWIND( .fnstart )
mov r0, r0, lsr #5
add r1, r1, r0, lsl #2 @ Get word offset
mov r3, r2, lsl r3 @ create mask
smp_dmb
\barrier
#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
.arch_extension mp
ALT_SMP(W(pldw) [r1])
@ -50,13 +50,21 @@ UNWIND( .fnstart )
strex ip, r2, [r1]
cmp ip, #0
bne 1b
smp_dmb
\barrier
cmp r0, #0
movne r0, #1
2: bx lr
UNWIND( .fnend )
ENDPROC(\name )
.endm
.macro testop, name, instr, store
__testop \name, \instr, \store, smp_dmb
.endm
.macro sync_testop, name, instr, store
__testop \name, \instr, \store, __smp_dmb
.endm
#else
.macro bitop, name, instr
ENTRY( \name )

View File

@ -10,3 +10,7 @@
.text
testop _test_and_change_bit, eor, str
#if __LINUX_ARM_ARCH__ >= 6
sync_testop _sync_test_and_change_bit, eor, str
#endif

View File

@ -10,3 +10,7 @@
.text
testop _test_and_clear_bit, bicne, strne
#if __LINUX_ARM_ARCH__ >= 6
sync_testop _sync_test_and_clear_bit, bicne, strne
#endif

View File

@ -10,3 +10,7 @@
.text
testop _test_and_set_bit, orreq, streq
#if __LINUX_ARM_ARCH__ >= 6
sync_testop _sync_test_and_set_bit, orreq, streq
#endif