arm64: Add workaround for Arm Cortex-A77 erratum 1508412

On Cortex-A77 r0p0 and r1p0, a sequence of a non-cacheable or device load
and a store exclusive or PAR_EL1 read can cause a deadlock.

The workaround requires a DMB SY before and after a PAR_EL1 register
read. In addition, it's possible an interrupt (doing a device read) or
KVM guest exit could be taken between the DMB and PAR read, so we
also need a DMB before returning from interrupt and before returning to
a guest.

A deadlock is still possible with the workaround as KVM guests must also
have the workaround. IOW, a malicious guest can deadlock an affected
systems.

This workaround also depends on a firmware counterpart to enable the h/w
to insert DMB SY after load and store exclusive instructions. See the
errata document SDEN-1152370 v10 [1] for more information.

[1] https://static.docs.arm.com/101992/0010/Arm_Cortex_A77_MP074_Software_Developer_Errata_Notice_v10.pdf

Signed-off-by: Rob Herring <robh@kernel.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Marc Zyngier <maz@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Julien Thierry <julien.thierry.kdev@gmail.com>
Cc: kvmarm@lists.cs.columbia.edu
Link: https://lore.kernel.org/r/20201028182839.166037-2-robh@kernel.org
Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
Rob Herring 2020-10-28 13:28:39 -05:00 committed by Will Deacon
parent 8a6b88e662
commit 96d389ca10
13 changed files with 66 additions and 15 deletions

View File

@ -90,6 +90,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A76 | #1463225 | ARM64_ERRATUM_1463225 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A77 | #1508412 | ARM64_ERRATUM_1508412 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N1 | #1349291 | N/A |

View File

@ -636,6 +636,26 @@ config ARM64_ERRATUM_1542419
If unsure, say Y.
config ARM64_ERRATUM_1508412
bool "Cortex-A77: 1508412: workaround deadlock on sequence of NC/Device load and store exclusive or PAR read"
default y
help
This option adds a workaround for Arm Cortex-A77 erratum 1508412.
Affected Cortex-A77 cores (r0p0, r1p0) could deadlock on a sequence
of a store-exclusive or read of PAR_EL1 and a load with device or
non-cacheable memory attributes. The workaround depends on a firmware
counterpart.
KVM guests must also have the workaround implemented or they can
deadlock the system.
Work around the issue by inserting DMB SY barriers around PAR_EL1
register reads and warning KVM users. The DMB barrier is sufficient
to prevent a speculative PAR_EL1 read.
If unsure, say Y.
config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313"
default y

View File

@ -65,7 +65,8 @@
#define ARM64_HAS_ARMv8_4_TTL 55
#define ARM64_HAS_TLB_RANGE 56
#define ARM64_MTE 57
#define ARM64_WORKAROUND_1508412 58
#define ARM64_NCAPS 58
#define ARM64_NCAPS 59
#endif /* __ASM_CPUCAPS_H */

View File

@ -1007,6 +1007,7 @@
#include <linux/build_bug.h>
#include <linux/types.h>
#include <asm/alternative.h>
#define __DEFINE_MRS_MSR_S_REGNUM \
" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" \
@ -1095,6 +1096,14 @@
write_sysreg_s(__scs_new, sysreg); \
} while (0)
#define read_sysreg_par() ({ \
u64 par; \
asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412)); \
par = read_sysreg(par_el1); \
asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412)); \
par; \
})
#endif
#endif /* __ASM_SYSREG_H */

View File

@ -522,6 +522,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.matches = has_neoverse_n1_erratum_1542419,
.cpu_enable = cpu_enable_trap_ctr_access,
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_1508412
{
/* we depend on the firmware portion for correctness */
.desc = "ARM erratum 1508412 (kernel portion)",
.capability = ARM64_WORKAROUND_1508412,
ERRATA_MIDR_RANGE(MIDR_CORTEX_A77,
0, 0,
1, 0),
},
#endif
{
}

View File

@ -365,6 +365,9 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
br x30
#endif
.else
/* Ensure any device/NC reads complete */
alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412
eret
.endif
sb

View File

@ -1719,7 +1719,8 @@ int kvm_arch_init(void *opaque)
return -ENODEV;
}
if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE))
if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) ||
cpus_have_final_cap(ARM64_WORKAROUND_1508412))
kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \
"Only trusted guests should be used on this system.\n");

View File

@ -140,9 +140,9 @@ static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
* We do need to save/restore PAR_EL1 though, as we haven't
* saved the guest context yet, and we may return early...
*/
par = read_sysreg(par_el1);
par = read_sysreg_par();
if (!__kvm_at("s1e1r", far))
tmp = read_sysreg(par_el1);
tmp = read_sysreg_par();
else
tmp = SYS_PAR_EL1_F; /* back to the guest */
write_sysreg(par, par_el1);
@ -421,7 +421,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
handle_tx2_tvm(vcpu))
return true;
goto guest;
/*
* We trap the first access to the FP/SIMD to save the host context
@ -431,13 +431,13 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
* Similarly for trapped SVE accesses.
*/
if (__hyp_handle_fpsimd(vcpu))
return true;
goto guest;
if (__hyp_handle_ptrauth(vcpu))
return true;
goto guest;
if (!__populate_fault_info(vcpu))
return true;
goto guest;
if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
bool valid;
@ -452,7 +452,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
int ret = __vgic_v2_perform_cpuif_access(vcpu);
if (ret == 1)
return true;
goto guest;
/* Promote an illegal access to an SError.*/
if (ret == -1)
@ -468,12 +468,17 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
int ret = __vgic_v3_perform_cpuif_access(vcpu);
if (ret == 1)
return true;
goto guest;
}
exit:
/* Return to the host kernel and handle the exit */
return false;
guest:
/* Re-enter the guest */
asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412));
return true;
}
static inline void __kvm_unexpected_el2_exception(void)

View File

@ -43,7 +43,7 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
ctxt_sys_reg(ctxt, CONTEXTIDR_EL1) = read_sysreg_el1(SYS_CONTEXTIDR);
ctxt_sys_reg(ctxt, AMAIR_EL1) = read_sysreg_el1(SYS_AMAIR);
ctxt_sys_reg(ctxt, CNTKCTL_EL1) = read_sysreg_el1(SYS_CNTKCTL);
ctxt_sys_reg(ctxt, PAR_EL1) = read_sysreg(par_el1);
ctxt_sys_reg(ctxt, PAR_EL1) = read_sysreg_par();
ctxt_sys_reg(ctxt, TPIDR_EL1) = read_sysreg(tpidr_el1);
ctxt_sys_reg(ctxt, SP_EL1) = read_sysreg(sp_el1);

View File

@ -250,7 +250,7 @@ void __noreturn hyp_panic(void)
{
u64 spsr = read_sysreg_el2(SYS_SPSR);
u64 elr = read_sysreg_el2(SYS_ELR);
u64 par = read_sysreg(par_el1);
u64 par = read_sysreg_par();
bool restore_host = true;
struct kvm_cpu_context *host_ctxt;
struct kvm_vcpu *vcpu;

View File

@ -215,7 +215,7 @@ void __noreturn hyp_panic(void)
{
u64 spsr = read_sysreg_el2(SYS_SPSR);
u64 elr = read_sysreg_el2(SYS_ELR);
u64 par = read_sysreg(par_el1);
u64 par = read_sysreg_par();
__hyp_call_panic(spsr, elr, par);
unreachable();

View File

@ -95,7 +95,7 @@ static bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break;
case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
case ELR_EL1: *val = read_sysreg_s(SYS_ELR_EL12); break;
case PAR_EL1: *val = read_sysreg_s(SYS_PAR_EL1); break;
case PAR_EL1: *val = read_sysreg_par(); break;
case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break;
case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break;
case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break;

View File

@ -262,7 +262,7 @@ static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr,
local_irq_save(flags);
asm volatile("at s1e1r, %0" :: "r" (addr));
isb();
par = read_sysreg(par_el1);
par = read_sysreg_par();
local_irq_restore(flags);
/*