linux-stable/arch/arm64/kvm/hyp/nvhe/host.S

310 lines
8.2 KiB
ArmAsm
Raw Normal View History

/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020 - Google Inc
* Author: Andrew Scull <ascull@google.com>
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_ptrauth.h>
.text
SYM_FUNC_START(__host_exit)
get_host_ctxt x0, x1
/* Store the host regs x2 and x3 */
stp x2, x3, [x0, #CPU_XREG_OFFSET(2)]
/* Retrieve the host regs x0-x1 from the stack */
ldp x2, x3, [sp], #16 // x0, x1
/* Store the host regs x0-x1 and x4-x17 */
stp x2, x3, [x0, #CPU_XREG_OFFSET(0)]
stp x4, x5, [x0, #CPU_XREG_OFFSET(4)]
stp x6, x7, [x0, #CPU_XREG_OFFSET(6)]
stp x8, x9, [x0, #CPU_XREG_OFFSET(8)]
stp x10, x11, [x0, #CPU_XREG_OFFSET(10)]
stp x12, x13, [x0, #CPU_XREG_OFFSET(12)]
stp x14, x15, [x0, #CPU_XREG_OFFSET(14)]
stp x16, x17, [x0, #CPU_XREG_OFFSET(16)]
/* Store the host regs x18-x29, lr */
save_callee_saved_regs x0
/* Save the host context pointer in x29 across the function call */
mov x29, x0
#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
alternative_if_not ARM64_HAS_ADDRESS_AUTH
b __skip_pauth_save
alternative_else_nop_endif
alternative_if ARM64_KVM_PROTECTED_MODE
/* Save kernel ptrauth keys. */
add x18, x29, #CPU_APIAKEYLO_EL1
ptrauth_save_state x18, x19, x20
/* Use hyp keys. */
adr_this_cpu x18, kvm_hyp_ctxt, x19
add x18, x18, #CPU_APIAKEYLO_EL1
ptrauth_restore_state x18, x19, x20
isb
alternative_else_nop_endif
__skip_pauth_save:
#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
bl handle_trap
__host_enter_restore_full:
/* Restore kernel keys. */
#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
alternative_if_not ARM64_HAS_ADDRESS_AUTH
b __skip_pauth_restore
alternative_else_nop_endif
alternative_if ARM64_KVM_PROTECTED_MODE
add x18, x29, #CPU_APIAKEYLO_EL1
ptrauth_restore_state x18, x19, x20
alternative_else_nop_endif
__skip_pauth_restore:
#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
/* Restore host regs x0-x17 */
ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)]
ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)]
ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)]
ldp x6, x7, [x29, #CPU_XREG_OFFSET(6)]
/* x0-7 are use for panic arguments */
__host_enter_for_panic:
ldp x8, x9, [x29, #CPU_XREG_OFFSET(8)]
ldp x10, x11, [x29, #CPU_XREG_OFFSET(10)]
ldp x12, x13, [x29, #CPU_XREG_OFFSET(12)]
ldp x14, x15, [x29, #CPU_XREG_OFFSET(14)]
ldp x16, x17, [x29, #CPU_XREG_OFFSET(16)]
/* Restore host regs x18-x29, lr */
restore_callee_saved_regs x29
/* Do not touch any register after this! */
__host_enter_without_restoring:
eret
sb
SYM_FUNC_END(__host_exit)
/*
* void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
*/
SYM_FUNC_START(__host_enter)
mov x29, x0
b __host_enter_restore_full
SYM_FUNC_END(__host_enter)
/*
* void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
* u64 elr, u64 par);
*/
SYM_FUNC_START(__hyp_do_panic)
/* Prepare and exit to the host's panic function. */
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
PSR_MODE_EL1h)
msr spsr_el2, lr
adr_l lr, nvhe_hyp_panic_handler
hyp_kimg_va lr, x6
msr elr_el2, lr
mov x29, x0
#ifdef CONFIG_NVHE_EL2_DEBUG
/* Ensure host stage-2 is disabled */
mrs x0, hcr_el2
bic x0, x0, #HCR_VM
msr hcr_el2, x0
isb
tlbi vmalls12e1
dsb nsh
#endif
KVM: arm64: Log source when panicking from nVHE hyp To aid with debugging, add details of the source of a panic from nVHE hyp. This is done by having nVHE hyp exit to nvhe_hyp_panic_handler() rather than directly to panic(). The handler will then add the extra details for debugging before panicking the kernel. If the panic was due to a BUG(), look up the metadata to log the file and line, if available, otherwise log an address that can be looked up in vmlinux. The hyp offset is also logged to allow other hyp VAs to be converted, similar to how the kernel offset is logged during a panic. __hyp_panic_string is now inlined since it no longer needs to be referenced as a symbol and the message is free to diverge between VHE and nVHE. The following is an example of the logs generated by a BUG in nVHE hyp. [ 46.754840] kvm [307]: nVHE hyp BUG at: arch/arm64/kvm/hyp/nvhe/switch.c:242! [ 46.755357] kvm [307]: Hyp Offset: 0xfffea6c58e1e0000 [ 46.755824] Kernel panic - not syncing: HYP panic: [ 46.755824] PS:400003c9 PC:0000d93a82c705ac ESR:f2000800 [ 46.755824] FAR:0000000080080000 HPFAR:0000000000800800 PAR:0000000000000000 [ 46.755824] VCPU:0000d93a880d0000 [ 46.756960] CPU: 3 PID: 307 Comm: kvm-vcpu-0 Not tainted 5.12.0-rc3-00005-gc572b99cf65b-dirty #133 [ 46.757459] Hardware name: QEMU QEMU Virtual Machine, BIOS 0.0.0 02/06/2015 [ 46.758366] Call trace: [ 46.758601] dump_backtrace+0x0/0x1b0 [ 46.758856] show_stack+0x18/0x70 [ 46.759057] dump_stack+0xd0/0x12c [ 46.759236] panic+0x16c/0x334 [ 46.759426] arm64_kernel_unmapped_at_el0+0x0/0x30 [ 46.759661] kvm_arch_vcpu_ioctl_run+0x134/0x750 [ 46.759936] kvm_vcpu_ioctl+0x2f0/0x970 [ 46.760156] __arm64_sys_ioctl+0xa8/0xec [ 46.760379] el0_svc_common.constprop.0+0x60/0x120 [ 46.760627] do_el0_svc+0x24/0x90 [ 46.760766] el0_svc+0x2c/0x54 [ 46.760915] el0_sync_handler+0x1a4/0x1b0 [ 46.761146] el0_sync+0x170/0x180 [ 46.761889] SMP: stopping secondary CPUs [ 46.762786] Kernel Offset: 0x3e1cd2820000 from 0xffff800010000000 [ 46.763142] PHYS_OFFSET: 0xffffa9f680000000 [ 46.763359] CPU features: 0x00240022,61806008 [ 46.763651] Memory Limit: none [ 46.813867] ---[ end Kernel panic - not syncing: HYP panic: [ 46.813867] PS:400003c9 PC:0000d93a82c705ac ESR:f2000800 [ 46.813867] FAR:0000000080080000 HPFAR:0000000000800800 PAR:0000000000000000 [ 46.813867] VCPU:0000d93a880d0000 ]--- Signed-off-by: Andrew Scull <ascull@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20210318143311.839894-6-ascull@google.com
2021-03-18 14:33:11 +00:00
/* Load the panic arguments into x0-7 */
mrs x0, esr_el2
mov x4, x3
mov x3, x2
hyp_pa x3, x6
get_vcpu_ptr x5, x6
mrs x6, far_el2
mrs x7, hpfar_el2
/* Enter the host, conditionally restoring the host context. */
cbz x29, __host_enter_without_restoring
b __host_enter_for_panic
SYM_FUNC_END(__hyp_do_panic)
SYM_FUNC_START(__host_hvc)
ldp x0, x1, [sp] // Don't fixup the stack yet
/* No stub for you, sonny Jim */
alternative_if ARM64_KVM_PROTECTED_MODE
b __host_exit
alternative_else_nop_endif
/* Check for a stub HVC call */
cmp x0, #HVC_STUB_HCALL_NR
b.hs __host_exit
add sp, sp, #16
/*
* Compute the idmap address of __kvm_handle_stub_hvc and
* jump there.
*
* Preserve x0-x4, which may contain stub parameters.
*/
adr_l x5, __kvm_handle_stub_hvc
hyp_pa x5, x6
br x5
SYM_FUNC_END(__host_hvc)
.macro host_el1_sync_vect
.align 7
.L__vect_start\@:
stp x0, x1, [sp, #-16]!
mrs x0, esr_el2
KVM: arm64: Extract ESR_ELx.EC only Since ARMv8.0 the upper 32 bits of ESR_ELx have been RES0, and recently some of the upper bits gained a meaning and can be non-zero. For example, when FEAT_LS64 is implemented, ESR_ELx[36:32] contain ISS2, which for an ST64BV or ST64BV0 can be non-zero. This can be seen in ARM DDI 0487G.b, page D13-3145, section D13.2.37. Generally, we must not rely on RES0 bit remaining zero in future, and when extracting ESR_ELx.EC we must mask out all other bits. All C code uses the ESR_ELx_EC() macro, which masks out the irrelevant bits, and therefore no alterations are required to C code to avoid consuming irrelevant bits. In a couple of places the KVM assembly extracts ESR_ELx.EC using LSR on an X register, and so could in theory consume previously RES0 bits. In both cases this is for comparison with EC values ESR_ELx_EC_HVC32 and ESR_ELx_EC_HVC64, for which the upper bits of ESR_ELx must currently be zero, but this could change in future. This patch adjusts the KVM vectors to use UBFX rather than LSR to extract ESR_ELx.EC, ensuring these are robust to future additions to ESR_ELx. Cc: stable@vger.kernel.org Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Alexandru Elisei <alexandru.elisei@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Suzuki K Poulose <suzuki.poulose@arm.com> Cc: Will Deacon <will@kernel.org> Acked-by: Will Deacon <will@kernel.org> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20211103110545.4613-1-mark.rutland@arm.com
2021-11-03 11:05:45 +00:00
ubfx x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH
cmp x0, #ESR_ELx_EC_HVC64
b.eq __host_hvc
b __host_exit
.L__vect_end\@:
.if ((.L__vect_end\@ - .L__vect_start\@) > 0x80)
.error "host_el1_sync_vect larger than vector entry"
.endif
.endm
.macro invalid_host_el2_vect
.align 7
/*
* Test whether the SP has overflowed, without corrupting a GPR.
* nVHE hypervisor stacks are aligned so that the PAGE_SHIFT bit
* of SP should always be 1.
*/
add sp, sp, x0 // sp' = sp + x0
sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
tbz x0, #PAGE_SHIFT, .L__hyp_sp_overflow\@
sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
/* If a guest is loaded, panic out of it. */
stp x0, x1, [sp, #-16]!
get_loaded_vcpu x0, x1
cbnz x0, __guest_exit_panic
add sp, sp, #16
/*
* The panic may not be clean if the exception is taken before the host
* context has been saved by __host_exit or after the hyp context has
* been partially clobbered by __host_enter.
*/
b hyp_panic
.L__hyp_sp_overflow\@:
/* Switch to the overflow stack */
adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
b hyp_panic_bad_stack
ASM_BUG()
.endm
.macro invalid_host_el1_vect
.align 7
mov x0, xzr /* restore_host = false */
mrs x1, spsr_el2
mrs x2, elr_el2
mrs x3, par_el1
b __hyp_do_panic
.endm
/*
* The host vector does not use an ESB instruction in order to avoid consuming
* SErrors that should only be consumed by the host. Guest entry is deferred by
* __guest_enter if there are any pending asynchronous exceptions so hyp will
* always return to the host without having consumerd host SErrors.
*
* CONFIG_KVM_INDIRECT_VECTORS is not applied to the host vectors because the
* host knows about the EL2 vectors already, and there is no point in hiding
* them.
*/
.align 11
SYM_CODE_START(__kvm_hyp_host_vector)
invalid_host_el2_vect // Synchronous EL2t
invalid_host_el2_vect // IRQ EL2t
invalid_host_el2_vect // FIQ EL2t
invalid_host_el2_vect // Error EL2t
invalid_host_el2_vect // Synchronous EL2h
invalid_host_el2_vect // IRQ EL2h
invalid_host_el2_vect // FIQ EL2h
invalid_host_el2_vect // Error EL2h
host_el1_sync_vect // Synchronous 64-bit EL1/EL0
invalid_host_el1_vect // IRQ 64-bit EL1/EL0
invalid_host_el1_vect // FIQ 64-bit EL1/EL0
invalid_host_el1_vect // Error 64-bit EL1/EL0
host_el1_sync_vect // Synchronous 32-bit EL1/EL0
invalid_host_el1_vect // IRQ 32-bit EL1/EL0
invalid_host_el1_vect // FIQ 32-bit EL1/EL0
invalid_host_el1_vect // Error 32-bit EL1/EL0
SYM_CODE_END(__kvm_hyp_host_vector)
/*
* Forward SMC with arguments in struct kvm_cpu_context, and
* store the result into the same struct. Assumes SMCCC 1.2 or older.
*
* x0: struct kvm_cpu_context*
*/
SYM_CODE_START(__kvm_hyp_host_forward_smc)
/*
* Use x18 to keep the pointer to the host context because
* x18 is callee-saved in SMCCC but not in AAPCS64.
*/
mov x18, x0
ldp x0, x1, [x18, #CPU_XREG_OFFSET(0)]
ldp x2, x3, [x18, #CPU_XREG_OFFSET(2)]
ldp x4, x5, [x18, #CPU_XREG_OFFSET(4)]
ldp x6, x7, [x18, #CPU_XREG_OFFSET(6)]
ldp x8, x9, [x18, #CPU_XREG_OFFSET(8)]
ldp x10, x11, [x18, #CPU_XREG_OFFSET(10)]
ldp x12, x13, [x18, #CPU_XREG_OFFSET(12)]
ldp x14, x15, [x18, #CPU_XREG_OFFSET(14)]
ldp x16, x17, [x18, #CPU_XREG_OFFSET(16)]
smc #0
stp x0, x1, [x18, #CPU_XREG_OFFSET(0)]
stp x2, x3, [x18, #CPU_XREG_OFFSET(2)]
stp x4, x5, [x18, #CPU_XREG_OFFSET(4)]
stp x6, x7, [x18, #CPU_XREG_OFFSET(6)]
stp x8, x9, [x18, #CPU_XREG_OFFSET(8)]
stp x10, x11, [x18, #CPU_XREG_OFFSET(10)]
stp x12, x13, [x18, #CPU_XREG_OFFSET(12)]
stp x14, x15, [x18, #CPU_XREG_OFFSET(14)]
stp x16, x17, [x18, #CPU_XREG_OFFSET(16)]
ret
SYM_CODE_END(__kvm_hyp_host_forward_smc)
KVM: arm64: Add missing BTI instructions Some bti instructions were missing from commit b53d4a272349 ("KVM: arm64: Use BTI for nvhe") 1) kvm_host_psci_cpu_entry kvm_host_psci_cpu_entry is called from __kvm_hyp_init_cpu through "br" instruction as __kvm_hyp_init_cpu resides in idmap section while kvm_host_psci_cpu_entry is in hyp .text so the offset is larger than 128MB range covered by "b". Which means that this function should start with "bti j" instruction. LLVM which is the only compiler supporting BTI for Linux, adds "bti j" for jump tables or by when taking the address of the block [1]. Same behaviour is observed with GCC. As kvm_host_psci_cpu_entry is a C function, this must be done in assembly. Another solution is to use X16/X17 with "br", as according to ARM ARM DDI0487I.a RLJHCL/IGMGRS, PACIASP has an implicit branch target identification instruction that is compatible with PSTATE.BTYPE 0b01 which includes "br X16/X17" And the kvm_host_psci_cpu_entry has PACIASP as it is an external function. Although, using explicit "bti" makes it more clear than relying on which register is used. A third solution is to clear SCTLR_EL2.BT, which would make PACIASP compatible PSTATE.BTYPE 0b11 ("br" to other registers). However this deviates from the kernel behaviour (in bti_enable()). 2) Spectre vector table "br" instructions are generated at runtime for the vector table (__bp_harden_hyp_vecs). These branches would land on vectors in __kvm_hyp_vector at offset 8. As all the macros are defined with valid_vect/invalid_vect, it is sufficient to add "bti j" at the correct offset. [1] https://reviews.llvm.org/D52867 Fixes: b53d4a272349 ("KVM: arm64: Use BTI for nvhe") Signed-off-by: Mostafa Saleh <smostafa@google.com> Reported-by: Sudeep Holla <sudeep.holla@arm.com> Acked-by: Marc Zyngier <maz@kernel.org> Tested-by: Sudeep Holla <sudeep.holla@arm.com> Link: https://lore.kernel.org/r/20230706152240.685684-1-smostafa@google.com Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
2023-07-06 15:22:40 +00:00
/*
* kvm_host_psci_cpu_entry is called through br instruction, which requires
* bti j instruction as compilers (gcc and llvm) doesn't insert bti j for external
* functions, but bti c instead.
*/
SYM_CODE_START(kvm_host_psci_cpu_entry)
bti j
b __kvm_host_psci_cpu_entry
SYM_CODE_END(kvm_host_psci_cpu_entry)